input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
from django.db import models
from django.conf import settings
from django.urls import reverse
from django.contrib.auth import get_user_model
from autoslug import AutoSlugField
from django.utils import timezone
from phonenumber_field.modelfields import PhoneNumberField
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.contrib.auth.hashers import make_password
from rest_framework.authtoken.models import Token
from cities_light.models import City, Country, Region
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save
# from lms_app.forms import RegisterForm
User = get_user_model()
login_choices = [
('0', 'Web'),
('1', 'App'),
]
ROLES = [
('0', 'Admin'),
('1', 'Teacher'),
('2', 'Student'),
]
material_choices = [
('0', 'Study Material'),
('1', 'Questionnaire'),
('2', 'Previous year Question'),
]
file_choices = [
('0', 'Video'),
('1', 'Image'),
('2', 'pdf'),
]
send_choices = [
('Teacher', 'Teachers'),
('Student', 'Students'),
]
scheme_choices = [
('0', 'Subject'),
('1', 'Chapter'),
]
state_choices = [
('KL', 'KERALA'),
('KA', 'KARNATAKA'),
('TN', 'TAMIL NADU'),
('GOA', 'GOA'),
]
gender_choices = [
('0', 'Male'),
('1', 'Female'),
]
DOUBT_STATUS = [
('0', 'New'),
('1', 'Solved'),
]
class Syllabus(models.Model):
name = models.CharField(max_length=255)
active = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Syllabus'
verbose_name_plural = 'Syllabus'
def __str__(self):
return self.name
class Standard(models.Model):
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=255)
active = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
# return str(self.syllabus)
# return f"{self.syllabus} - {self.name}"
return self.name
def get_absolute_url(self):
return reverse("standard_list", kwargs={"pk": self.pk})
class Subject(models.Model):
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=255)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
slug = AutoSlugField(populate_from='name')
active = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
# return f"{self.standard} - {self.name}"
@property
def get_standard(self):
return self.standard.name
# @property
# def subjects(self):
# subs = self.subject.all()
# _subs = []
# for sub in subs:
# _subs.append(str(sub))
# from django.utils.html import format_html
# return format_html(", ".join(_subs))
class Teacher(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField(null=True, blank=True)
username = models.CharField(max_length=255, null=True, blank=True)
password = models.CharField(max_length=255, null=True, blank=True)
contact_no_1 = PhoneNumberField(
default=None, null=True, blank=True, unique=True)
whatsapp_no = PhoneNumberField(
default=None, null=True, blank=True, unique=True)
address = models.CharField(max_length=255)
subject = models.ManyToManyField(Subject, blank=True)
image = models.ImageField(
upload_to='staticfiles/image/',null=True, blank=True)
#hanin created gender
gender = models.CharField(max_length = 6,choices=gender_choices, null=True)
user = models.OneToOneField(get_user_model(), null=True, blank=True, on_delete=models.CASCADE)
# user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
active = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
@receiver(post_save, sender=Teacher)
def create_teacher_user(sender, instance, created, **kwargs):
if created:
user = User.objects.create_user(username=instance.username, password=instance.password)
instance.user = user
instance.save()
@receiver(post_save, sender=Teacher)
def update_teacher_user(sender, instance, created, **kwargs):
if created == False:
instance.user.save()
# @receiver(post_save, sender=User)
# def create_auth_token(sender, instance=None, created=False, **kwargs):
# if created:
# Token.objects.create(user=instance)
#
class Chapter(models.Model):
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=255)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
slug = AutoSlugField(populate_from='name', null=True)
active = models.BooleanField(default=False)
free_tier = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Chapter'
verbose_name_plural = 'Chapters'
def __str__(self):
return self.name
class Video(models.Model):
name = models.CharField(max_length=255)
subtitle = models.CharField(max_length=255, null=True, blank=True)
# description = models.TextField()
description = models.TextField()
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, null=True, blank=True)
videofile = models.FileField(
upload_to='staticfiles/media_root/videos/', null=True)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
thumbnail_image = models.ImageField(
upload_to='staticfiles/thumbnail/', null=True, blank=True)
url_field = models.URLField(max_length=200, null=True, blank=True)
vimeo_video = models.CharField(max_length=200, null=True, blank=True)
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
# + ": " + str(self.videofile)
class Chat(models.Model):
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
pdf = models.FileField(upload_to='staticfiles/pdf')
remark = models.CharField(max_length=200)
uploaded_by = models.CharField(max_length=255, choices=ROLES)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Scheme(models.Model):
name = models.CharField(max_length=255)
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE)
subject = models.ManyToManyField(Subject)
slug = AutoSlugField(populate_from='name', null=True)
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
# subs = self.subject.all()
# _subs = []
# for sub in subs:
# _subs.append(str(sub))
# from django.utils.html import format_html
# return format_html(", ".join(_subs))
# def __str__(self):
# subject = self.subject_wise.all()
# for sub in subject:
# sub
# return str(sub)
# class UserProfile(models.Model):
# country = models.ForeignKey(Country ,on_delete=models.CASCADE)
# state = models.ForeignKey(Region ,on_delete=models.CASCADE)
#
# class Countrys(models.Model):
# country = models.ForeignKey(Country,on_delete=models.CASCADE)
# active = models.BooleanField(default=False)
# def __str__(self):
# return f'{self.country}'
# # return f'{self.country}'
# class State(models.Model):
# country = models.ForeignKey(Country, on_delete=models.CASCADE)
# name = models.ForeignKey(Region, on_delete=models.CASCADE, null=True, blank=True)
# def __str__(self):
# return f'{self.name}'
class Student(models.Model):
name = models.CharField(max_length=255)
username = models.CharField(max_length=255, null=True, blank=True)
password = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=6, choices=gender_choices)
# date_of_birth = models.DateField(null=True, blank=True)
address = models.CharField(max_length=255,null=True,blank=True)
country = models.ForeignKey(Country ,on_delete=models.CASCADE,null=True)
state = models.ForeignKey(Region ,on_delete=models.CASCADE,null=True)
city = models.CharField(null=True,blank=True,max_length=255)
district = models.CharField(null=True,blank=True,max_length=255)
present_country = models.ForeignKey(Country ,on_delete=models.CASCADE,null=True,related_name = 'pre_country')
email = models.EmailField(null=True, blank=True)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
guardian_name = models.CharField(null=True,blank=True,max_length=255)
guardian_relation = models.CharField(null=True,blank=True,max_length=50)
contact_no = PhoneNumberField(
default=None, null=True, blank=True, unique=True)
whatsapp_no = PhoneNumberField(
default=None, null=True, blank=True, unique=True)
syllabus = models.ForeignKey(Syllabus,null=True,blank=True, on_delete=models.CASCADE)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE)
scheme = models.ForeignKey(Scheme, on_delete=models.CASCADE)
course_type = models.CharField(null=True,blank=True,max_length=255)
user = models.OneToOneField(get_user_model(), null=True, blank=True, on_delete=models.CASCADE)
is_paid = models.BooleanField(default=False)
# subject = models.ManyToManyField(Subject)
scheme = models.ForeignKey(Scheme,null=True,blank=True, on_delete=models.CASCADE)
active = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
@receiver(post_save, sender=Student)
def create_student_user(sender, instance, created, **kwargs):
if created:
user = User.objects.create_user(username=instance.username, password=<PASSWORD>)
instance.user = user
instance.save()
@receiver(post_save, sender=Student)
def update_student_user(sender, instance, created, **kwargs):
if created == False:
instance.user.save()
class Comment(models.Model):
video = models.ForeignKey(Video, on_delete=models.CASCADE, null=True)
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, null=True)
student = models.ForeignKey(Student, on_delete=models.CASCADE, null=True)
text = models.TextField(null=True)
created = models.DateTimeField(auto_now_add=True)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return str(self.text)
class Documents(models.Model):
name = models.CharField(max_length=255)
subtitle = models.CharField(max_length=255 )
description = models.TextField(null=True, blank=True)
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, null=True, blank=True)
# material_type = models.CharField(max_length=50, choices=material_choices, null=True, blank=True)
url_field = models.URLField(max_length=200, null=True, blank=True)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
thumbnail_image = models.ImageField(
upload_to='staticfiles/thumbnail/', null=True, blank=True)
pdf = models.FileField(upload_to='staticfiles/pdf')
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document'
verbose_name_plural = 'Documents'
def __str__(self):
return self.name
class Study_Material(models.Model):
name = models.CharField(max_length=255)
subtitle = models.CharField(max_length=255 )
description = models.TextField(null=True, blank=True)
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, null=True, blank=True)
# chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE)
# material_type = models.CharField(max_length=50, choices=material_choices, null=True, blank=True)
url_field = models.URLField(max_length=200, null=True, blank=True)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
thumbnail_image = models.ImageField(
upload_to='staticfiles/thumbnail/', null=True, blank=True)
pdf = models.FileField(upload_to='staticfiles/pdf')
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Study_Material'
verbose_name_plural = 'Study_Materials'
def __str__(self):
return self.name
class Question_paper(models.Model):
name = models.CharField(max_length=255)
subtitle = models.CharField(max_length=255 )
description = models.TextField(null=True, blank=True)
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, null=True, blank=True)
# chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE)
# material_type = models.CharField(max_length=50, choices=material_choices, null=True, blank=True)
url_field = models.URLField(max_length=200, null=True, blank=True)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
thumbnail_image = models.ImageField(
upload_to='staticfiles/thumbnail/', null=True, blank=True)
pdf = models.FileField(upload_to='staticfiles/pdf')
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Question_paper'
verbose_name_plural = 'Question_papers'
def __str__(self):
return self.name
class File(models.Model):
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, null=True, blank=True)
# chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
pdf = models.FileField(upload_to='staticfiles/pdf')
remark = models.CharField(max_length=200)
uploaded_by = models.CharField(max_length=255, choices=ROLES)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Notification(models.Model):
title = models.CharField(max_length=255)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
description = models.TextField()
send_to = models.CharField(max_length=255, choices=send_choices)
active = models.BooleanField(default=False)
def __str__(self):
return self.title
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(
upload_to='staticfiles/image/', null=True, blank=True)
def __str__(self):
return f'{self.user.name} Profile'
# def save(self, *args, **kwargs):
# s = User.objects.get()
# # self.user.username = self.name
# # self.user.password = "<PASSWORD>"
# self.user=s
# super(Student, self).save(self, *args, **kwargs)
class Doubt(models.Model):
syllabus = models.ForeignKey(Syllabus, on_delete=models.CASCADE, null=True, blank=True)
standard = models.ForeignKey(Standard, on_delete=models.CASCADE, null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
chapter = | |
import os
import json
from sets import Set
import time
from concurrent.futures import ThreadPoolExecutor
from parsers.medline_xml import MedlineFileParser
def calcPrecision(tp, fp, fn):
al = float(tp + fp)
return float(tp) / al if al != 0.0 else 1
def calcRecall(tp, fp, fn):
al = float(tp + fn)
return float(tp) / al if al != 0.0 else 1
def calcFBeta(tp, fp, fn, beta):
beta2 = float(beta)**2
bottom = float((1 + beta2)*tp + beta2*fn + fp)
return (1 + beta2) * tp / bottom if bottom != 0.0 else 1
def calcF1(tp, fp, fn):
bottom = float(2*tp + fp + fn)
return 2.0*tp / bottom if bottom != 0.0 else 1
def calcF05(tp, fp, fn):
return calcFBeta(tp, fp, fn, 0.5)
class MedlineEvaluator:
def __init__(self, medline_dir_old, medline_dir_new, old_unannotated_fname, eval_articles_fname):
self._medline_dir_old = medline_dir_old
self._medline_dir_new = medline_dir_new
self._old_unannotated_fname = old_unannotated_fname
self._eval_articles_fname = eval_articles_fname
self._max_workers = 12
def extractCandidates(self, only_major):
# first find the articles that are not annotated in the old dataset
articles_old = self._readMedline(self._medline_dir_old, only_major)
unannotated_json = []
old_article_map = {}
for articleN, article in enumerate(articles_old):
if articleN % 10000 == 0:
print 'processing article ' + str(articleN+1)
if not article.hasMeshHeadings():
article_id = article.pmid
old_article_map[article_id] = article
unannotated_json.append(article.asDict())
if len(old_article_map) % 10000 == 1:
print 'found ' + str(len(old_article_map)) + ' unannotated articles'
print 'found ' + str(len(old_article_map)) + ' unannotated articles'
# dump the articles that are not annotated to a file
print 'dumping unannotated articles'
with open(self._old_unannotated_fname, 'w') as f:
json.dump(unannotated_json, f, indent=4, sort_keys=True)
# release memory
del unannotated_json
del articles_old
eval_candidates = []
# read the new articles
articles_new = self._readMedline(self._medline_dir_new, only_major)
for articleN, article in enumerate(articles_new):
if articleN % 10000 == 0:
print 'processing article ' + str(articleN+1)
article_id = article.pmid
if article_id in old_article_map and article.hasMeshHeadings():
eval_candidates.append(article.asDict())
if len(eval_candidates) % 10000 == 1:
print 'found ' + str(len(eval_candidates)) + ' candidates for evaluation'
# dump the evaluation candidates to a file
print 'dumping evaluation candidates'
with open(self._eval_articles_fname, 'w') as f:
json.dump(eval_candidates, f, indent=4, sort_keys=True)
def classify(self, classifier, classified_path):
'''
Classifying the articles
'''
annotated_path = self._eval_articles_fname
articles_json = None
print 'reading articles'
with open(annotated_path, 'r') as f:
articles_json = json.load(f)
start_secs = time.time()
failed_count = 0
for articleN, article_json in enumerate(articles_json):
if articleN % 1000 == 0:
print 'processing article ' + str(articleN+1)
abstract = article_json['abstract']
classified_headings = classifier.classify(abstract)
if classified_headings is not None:
article_json['classifiedMeshHeadings'] = classified_headings
else:
failed_count += 1
print 'failed classifications: ' + str(failed_count)
end_secs = time.time()
dur_secs = end_secs - start_secs
print 'writing json'
with open(classified_path, 'w') as f:
json.dump(articles_json, f, indent=4, sort_keys=True)
print 'classified ' + str(len(articles_json)) + ' articles in ' + str(dur_secs) + ' seconds'
print 'total failed classifications: ' + str(failed_count)
def appendMeshHeadings(self, articles, mesh, only_major=False):
descriptor_set = Set()
missing_descriptors = 0
total_descriptors = 0
for articleN, article in enumerate(articles):
if articleN % 10000 == 1:
print 'processing article ' + str(articleN)
descriptor_set.clear()
classified_descriptors = []
annotated_headings = []
categories = article['classifiedMeshHeadings']
meshHeadings = article['meshHeadings']
total_descriptors += len(categories)
for category in categories:
descriptor_ui = None
category_wgt = category['weight']
if 'descriptorUi' in category:
descriptor_ui = category['descriptorUi']
else:
category_name = category['category']
descriptor_ui = mesh.getCategoryDescriptor(category_name)
if descriptor_ui is None:
missing_descriptors += 1
continue
if articleN % 10000 == 1:
print 'category `' + str(category_name) + '` translated to `' + str(descriptor_ui) + '`'
tree_numbers = mesh.getTreeNumbers(descriptor_ui)
if not descriptor_ui in descriptor_set:
descriptor_set.add(descriptor_ui)
classified_descriptors.append({
'descriptorUi': descriptor_ui,
'treeNumbers': tree_numbers,
'weight': category_wgt
})
for headingN in xrange(len(meshHeadings)):
descriptor_ui = meshHeadings[headingN]
tree_numbers = mesh.getTreeNumbers(descriptor_ui)
annotated_headings.append({
'descriptorUi': descriptor_ui,
'treeNumbers': tree_numbers
})
article['classifiedMeshHeadings'] = classified_descriptors
article['annotatedMeshHeadings'] = annotated_headings
print 'missing descriptors ' + str(missing_descriptors) + ' of ' + str(total_descriptors)
print 'done!'
def evalExactMatches(self, articles, wgt_cutoff=0.0):
print 'evaluating exact matches'
n_articles = len(articles)
f1_dataset = 0
f05_dataset = 0
classified_uis_mean = 0
real_uis_mean = 0
f1_mean = 0
f05_mean = 0
precision_mean = 0
recall_mean = 0
n_bins = 101
f1_dist = [0 for _ in range(n_bins)]
f05_dist = [0 for _ in range(n_bins)]
precision_dist = [0 for _ in range(n_bins)]
recall_dist = [0 for _ in range(n_bins)]
for articleN, article in enumerate(articles):
if articleN % 10000 == 0:
print 'processing article ' + str(articleN+1)
classified_uis = Set([heading['descriptorUi'] for heading in article['classifiedMeshHeadings'] if heading['weight'] >= wgt_cutoff])
real_uis = Set(article['meshHeadings'])
tp = len(classified_uis.intersection(real_uis))
fp = len(classified_uis.difference(real_uis))
fn = len(real_uis.difference(classified_uis))
f1_article = calcF1(tp, fp, fn)
f05_article = calcF05(tp, fp, fn)
precision = calcPrecision(tp, fp, fn)
recall = calcRecall(tp, fp, fn)
f1_mean += f1_article
f05_mean += f05_article
precision_mean += precision
recall_mean += recall
f1_binN = int(100*f1_article)
f1_dist[f1_binN] += 1
f05_binN = int(100*f05_article)
f05_dist[f05_binN] += 1
precision_binN = int(100*precision)
precision_dist[precision_binN] += 1
recall_binN = int(100*recall)
recall_dist[recall_binN] += 1
f1_dataset += f1_article
f05_dataset += f05_article
classified_uis_mean += len(classified_uis)
real_uis_mean += len(real_uis)
f1_dataset += f1_article
classified_uis_mean /= float(len(articles))
real_uis_mean /= float(len(articles))
f1_dataset /= len(articles)
f05_dataset /= len(articles)
f1_mean /= float(n_articles)
f05_mean /= float(n_articles)
precision_mean /= float(n_articles)
recall_mean /= float(n_articles)
def median(dist):
prob_sum = 0
for valN, val in enumerate(dist):
prob_sum += val
if prob_sum >= 0.5:
return 0.005*(valN + valN+1)
return 1
f1_dist = [float(val) / n_articles for val in f1_dist]
f05_dist = [float(val) / n_articles for val in f05_dist]
precision_dist = [float(val) / n_articles for val in precision_dist]
recall_dist = [float(val) / n_articles for val in recall_dist]
f1_median = median(f1_dist)
f05_median = median(f05_dist)
precision_median = median(precision_dist)
recall_median = median(recall_dist)
return {
'avgHeadings': real_uis_mean,
'avgClassifiedHeadings': classified_uis_mean,
# 'precision': precision,
# 'recall': recall,
'f1_dataset': f1_dataset,
'f05_dataset': f05_dataset,
'f1_mean': f1_mean,
'f05_mean': f05_mean,
'precision_mean': precision_mean,
'recall_mean': recall_mean,
'f1_median': f1_median,
'f05_median': f05_median,
'precision_median': precision_median,
'recall_median': recall_median,
'f1_dist': f1_dist,
'f05_dist': f05_dist,
'precision_dist': precision_dist,
'recall_dist': recall_dist
}
def evalToDepth(self, articles, depth, wgt_cutoff=0.0):
f1_dataset = 0
f05_dataset = 0
classified_uis_mean = 0
real_uis_mean = 0
f1_mean = 0
f05_mean = 0
precision_mean = 0
recall_mean = 0
n_bins = 101
f1_dist = [0 for _ in range(n_bins)]
f05_dist = [0 for _ in range(n_bins)]
precision_dist = [0 for _ in range(n_bins)]
recall_dist = [0 for _ in range(n_bins)]
n_articles = 0
for articleN, article in enumerate(articles):
if articleN % 10000 == 1:
print 'processing article ' + str(articleN) + ', depth ' + str(depth) + ', wgt cutoff ' + str(wgt_cutoff)
classified_headings = article['classifiedMeshHeadings']
real_headings = article['annotatedMeshHeadings']
classified_headings = [heading for heading in classified_headings if heading['weight'] >= wgt_cutoff]
tp = 0
fp = 0
fn = 0
classified_undupl = TreeNumberHelper.removeDuplicates(classified_headings, depth)
real_undupl = TreeNumberHelper.removeDuplicates(real_headings, depth)
if len(real_undupl) == 0:
continue
# print '\n\n\n\nclassified: ' + str(classified_headings) + '\nundupl: ' + str(classified_undupl)
# print '\n\n\n\nreal: ' + str(real_headings) + '\nundupl: ' + str(real_undupl)
# true positives and false positives
for classified_tree_numbers in classified_undupl:
# classified_tree_numbers = classified_heading['treeNumbers']
# check if the heading appears in the real headings
is_tp = False
for real_tree_numbers in real_undupl:
# real_tree_numbers = real_heading['treeNumbers']
match = TreeNumberHelper.anyMatchesToDepth(
classified_tree_numbers,
real_tree_numbers,
depth
)
# print 'matches: ' + str(match) + '\nclassified: ' + str(classified_tree_numbers) + '\nreal: ' + str(real_tree_numbers)
if match:
is_tp = True
break
tp += 1 if is_tp else 0
fp += 1 if not is_tp else 0
# false negatives
for real_tree_numbers in real_undupl:
# real_tree_numbers = real_heading['treeNumbers']
if len(real_tree_numbers) == 0:
continue
# check that the heading does not appear in the classified headings
is_fn = True
for classified_tree_numbers in classified_undupl:
# classified_tree_numbers = classified_heading['treeNumbers']
match = TreeNumberHelper.anyMatchesToDepth(
classified_tree_numbers,
real_tree_numbers,
depth
)
if match:
is_fn = False
break
fn += 1 if is_fn else 0
# if articleN >= 10:
# print 'exiting'
# exit(0)
f1_article = calcF1(tp, fp, fn)
f05_article = calcF05(tp, fp, fn)
precision = calcPrecision(tp, fp, fn)
recall = calcRecall(tp, fp, fn)
f1_mean += f1_article
f05_mean += f05_article
precision_mean += precision
recall_mean += recall
f1_binN = int(100*f1_article)
f1_dist[f1_binN] += 1
f05_binN = int(100*f05_article)
f05_dist[f05_binN] += 1
precision_binN = int(100*precision)
precision_dist[precision_binN] += 1
recall_binN = int(100*recall)
recall_dist[recall_binN] += 1
f1_dataset += f1_article
f05_dataset += f05_article
classified_uis_mean += len(classified_undupl)
real_uis_mean += len(real_undupl)
n_articles += 1
classified_uis_mean /= float(len(articles))
real_uis_mean /= float(len(articles))
f1_dataset /= len(articles)
f05_dataset /= len(articles)
f1_mean /= | |
method and after all required
data has been extracted and store from the class to prepare the class for
the next image/target processing. You should use this method as an
opportunity to remove any old data that you don't want to potentially
carry over to a new image/target pair. Typically, you can leave this
method alone as it already handles the most likely source of issues, but
in some cases you may want to handle even more with it.
============================================= ==========================================================================
Once you have defined these things, simply wrap the class with the :meth:`.RelativeOpNav.register` decorator (or call
the decorator on the un-initialized class object) and then you will be good to go.
As an example, lets build a new RelNav technique for using moments to compute the center-of-figure of a target in an
image (note that a moment based algorithm already exists in :mod:`.moment_algorithm` and this will be a much simpler
technique). First, we need to import the :class:`.RelNavEstimator` and :class:`.RelNavObservablesType` classes. We also
need to import the :class:`.RelativeOpNav` class so that we can register our new technique.
.. code::
from giant.relative_opnav.estimators import RelNavEstimator, RelNavObservablesType
from giant.relative_opnav.relnav_class import RelativeOpNav
from giant.point_spread_functions import Moment
import cv2 # we need
Now, we can define our class and the class/instance attributes we will use
.. code::
@RelativeOpNav.register # use the register decorator to register this new technique
# subclass the relnav estimator to get some of the concrete implementations it provides
class MomentCenterFindingSimple(RelNavEstimator):
technique = "simple_moments" # the name that will be used to identify the technique in the RelativeOpNav class
observable_type = [RelNavObservablesType.CENTER_FINDING] # we only generate center finding observables
generates_templates = False # we don't generate templates for this technique.
def __init__(self, scene, camera, image_processing, use_apparent_area=True,
apparent_area_margin_of_safety=2, search_range=None):
# let the super class prep most of our instance attributes
super().__init__(scene, camera, image_processing)
# store and or apply any extra options here
# this flag tells us to use the apparent diameter to predict the size
self.use_apparent_area = use_apparent_area
# this fudge factor is used to account for the fact that things aren't spherical and don't project to
# circles in most cases even if they are.
self.apparent_area_margin_of_safety = apparent_area_margin_of_safety
# specify the search range for trying to pair the identified segments with the a priori location of the
# camera
self.search_range = search_range
if self.search_range is None:
self.search_range = max(self.camera.model.n_rows, self.camera.model.n_cols)
Now, we need to continue our class definition by defining the estimate method. Since this generates center finding
observables we need to be sure to populate both the :attr:`~.RelNavEstimator.computed_bearings` and
:attr:`~.RelNavEstimator.observed_bearings` attributes, as well as the :attr:`~.RelNavEstimator.details` attribute.
The details of what exactly we're doing for the technique here are out of scope and are further addressed in the
:mod:`.moment_algorithm` documentation.
.. code::
def estimate(image, include_targets=None):
image_processing_original_segment_area = self.image_processing.minimum_segment_area
# use the phase angle to predict the minimum size of a blob to expect assuming a spherical target.
# because many targets aren't spherical we give a factor of safety setting the minimum size to half the
# predicted area for each target.
if self.use_apparent_area:
minimum_area = None
# do it for each target and take the minimum one
for target_ind, target in self.target_generator(include_targets):
# compute the phase angle
phase = self.scene.phase_angle(target_ind)
# predict the apparent diameter in pixels
apparent_diameter = target.get_apparent_diameter(self.camera.model, temperature=image.temperature)
apparent_radius = apparent_diameter/2
# compute the predicted area in pixels assuming a projected circle for the illuminated limb and an
# ellipse for the terminator
if phase <= np.pi/2:
predicted_area = np.pi*apparent_radius**2/2*(1+np.cos(phase))
else:
predicted_area = np.pi*apparent_radius**2/2*(1-np.cos(phase))
# apply the margin of safety
predicted_area /= self.apparent_area_margin_of_safety
# store it if it is smaller
if minimum_area is None:
minimum_area = predicted_area
else:
minimum_area = min(predicted_area, minimum_area)
# set the minimum segment area for image processing
self.image_processing.minimum_segment_area = minimum_area
# segment our image using Otsu/connected components
segments, foreground, segment_stats, segment_centroids = self.image_processing.segment_image(image)
# process each target using the concrete target_generator method from the super class
for target_ind, target in self.target_generator(include_targets):
# predict the location of the center of figure by projecting the target location onto the image plane
# we assume that the scene has been updated to reflect the image time correctly and everything is already
# in the camera frame.
self.computed_bearings[target_ind] = self.camera.model.project_onto_image(target.position.ravel(),
temperature=image.temperature)
# figure out which segment is closest
closest_ind = None
closest_distance = None
for segment_ind, centroid in enumerate(segment_centroids):
distance = np.linalg.norm(centroid - self.computed_bearings[target_ind])
if closest_ind is None:
if distance < self.search_range:
closest_ind = segment_ind
closest_distance = distance
else:
if distance < closet_distance:
closest_ind = segment_ind
closest_distance = distance
# if nothing met the tolerance throw an error
if closest_ind is None:
raise ValueError(f"No segments were found within the search range. for target {target_ind}"
f"Please try adjusting your parameters and try again")
# now, get the observed centroid
# extract the region around the blob from the found segment. Include some extra pixels to capture things
# like the terminator. Use a fudge factor of 1 tenth of the sqrt of the area with a minimum of 10
fudge_factor = max(np.sqrt(segment_stats[closest_ind, cv2.CC_STAT_AREA])*0.1, 10)
top_left = np.floor(segment_stats[closest_ind, [cv2.CC_STAT_TOP, cv2.CC_STAT_LEFT]] -
fudge_factor).astype(int)
bottom_right = np.ceil(top_left + segment_stats[closest_ind, [cv2.CC_STAT_HEIGHT, cv2.CC_STAT_WIDTH]] +
2*fudge_factor).astype(int)
use_image = np.zeros(image.shape, dtype=bool)
use_image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]] = \
foreground[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
# get the x/y pixels where we need to include in the centroiding
y, x = np.where(use_image)
# do the moment fit using the Moment "PSF"
fit = Moment.fit(x.astype(np.float64), y.astype(np.float64), image[keep_image].astype(np.float64))
# store the fit in case people want to inspect it more closely
self.details[target_ind] = {'fit object': fit,
'minimum segment area': self.image_processing.minimum_segment_area}
# store the location of the centroid (typically we would phase correct this but not in this example)
self.observed_bearings[target_ind] = fit.centroid
# reset the image processing minimum segment area in case we messed with it
self.image_processing.minimum_segment_area = image_processing_original_segment_area
and that's it, we've now implemented a basic moment algorithm for RelNav. This new technique could be accessed from the
:class:`.RelativeOpNav` class using attribute ``simple_moments`` and it can be applied to images using method
``simple_moments_estimate``. We can also initialize our new technique directly through :class:`.RelativeOpNav` by
supplying
``simple_moment_kwargs={'use_apparent_area': True, 'apparent_area_margin_of_safety': 1.5, 'search_range': 200}``
as a key word argument to the :class:`.RelativeOpNav` initialization. Finally, we could retrieve the details about our
moment algorithm fit through the ``simple_moment_details`` attribute.
.. rubric:: Adding a New Technique With a Custom Handler
Occasionally, you may need to implement a new RelNav type that doesn't work like the others. Perhaps it doesn't
generate bearing, position, or constraint measurements but something else entirely. Or perhaps it needs more than just
the image, camera, and scene to extract the measurements from the image. If this is the case then you have 2 options
for proceeding.
The first option is to define a custom handler for the :class:`.RelativeOpNav` class to use in place of the
:meth:`.default_estimator`. This custom handler should be a function that accepts at minimum the RelNav instance as the
first argument (essentially it should be a method for the :class:`.RelativeOpNav` class but defined outside of the class
definition). In addition, it typically should have 2 optional arguments ``image_ind`` and ``include_targets`` which can
be used to control what image/target pairs the technique is applied to, although that is strictly a convention and not a
requirement. Inside the function, you should handle preparing the scene and calling the estimate method of your
technique for each image requested (note that you can use the methods/attributes that exist in the
:class:`.RelativeOpNav` class to help you do some common things). You should also handle storing the results for each
image/target pair that is processed. You may need to do some fancy work at the beginning to check if an instance
attribute already exists in the RelNav instance using ``getattr`` and ``setattr``. Finally, you should ensure that the
:attr:`.RelNavEstimator.observable_type` class attribute is set to only ``CUSTOM``. This should then allow you to
register the technique with the :class:`.RelativeOpNav` class and use it as you would with a regular registered
technique.
While that is one option, it may not be the best in a special case like this. The primary | |
0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
]
])
# Declares the covariant Ricci curvature tensor class object.
self.ricci_tensor_dd = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the contravariant Ricci curvature tensor class object.
self.ricci_tensor_uu = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the mixed Ricci curvature tensor class object.
self.ricci_tensor_ud = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares Ricci curvature tensor class object.
self.ricci_scalar = 0
# Declares the covariant Einstein curvature tensor class object.
self.einstein_tensor_dd = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the contravariant Einstein curvature tensor class object.
self.einstein_tensor_uu = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the mixed Einstein curvature tensor class object.
self.einstein_tensor_ud = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the covariant stress-energy tensor class object.
self.stress_energy_tensor_dd = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the contravariant stress-energy tensor class object.
self.stress_energy_tensor_uu = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the mixed stress-energy tensor class object.
self.stress_energy_tensor_ud = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the contravariant Schouten tensor class object.
self.schouten_tensor_uu = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares the covariant Schouten tensor class object.
self.schouten_tensor_dd = Matrix([
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
])
# Declares cosmological constant class object.
self.cosmological_constant = 0
# Acceleration vectors.
self.proper_acceleration = [ 0, 0, 0, 0 ]
self.coordinate_acceleration = [ 0, 0, 0, 0 ]
self.geodesic_deviation_acceleration = [ 0, 0, 0, 0 ]
# Velocity vectors.
self.proper_velocity = [ 0, 0, 0, 0 ]
self.coordinate_velocity = [ 0, 0, 0, 0 ]
self.geodesic_velocity= [ 0, 0, 0, 0 ]
# Position vectors.
self.proper_position = [ 0, 0, 0, 0 ]
self.coordinate_position = [ 0, 0, 0, 0 ]
self.geodesic_deviation_position = [ 0, 0, 0, 0 ]
"""
Initializing object functions
=============================
"""
# TODO
# finish all of these functions.
self.set_all_metric_coefficients("dd")
#self.set_all_metric_coefficients("uu")
self.set_all_connection_coefficients("udd")
#self.set_all_connection_coefficients("ddd")
self.set_all_riemann_coefficients("uddd")
#self.set_all_riemann_coefficients("dddd")
self.set_all_ricci_coefficients("dd")
#self.set_all_weyl_coefficients("dddd")
#self.set_all_weyl_coefficients("uddd")
self.set_all_schouten_coefficients("dd")
#self.set_all_cotton_coefficients("ddd")
#self.set_all_ricci_coefficients("uu")
#self.set_all_ricci_coefficients("ud")
self.set_ricci_scalar()
self.set_all_einstein_coefficients("dd")
#self.set_all_einstein_coefficients("uu")
#self.set_all_einstein_coefficients("ud")
self.set_all_stress_energy_coefficients("dd")
#self.set_all_stress_energy_coefficients("uu")
#self.set_all_stress_energy_coefficients("ud")
#self.set_cosmological_constant(solution[3])
self.set_all_proper_time_geodesic_accelerations()
self.set_all_coordinate_time_geodesic_accelerations()
self.set_all_geodesic_deviation_accelerations()
"""
Metric coefficient functions
============================
"""
def get_metric_coefficient(self, index_config, mu, nu):
"""
Description
===========
Gets a single metric coefficient from class object for a given index configuration and index value pair.
Example
=======
>> newtonian = SpaceTime(Solution().weak_field_approximation(), True)
>> print(newtonian.get_metric_coefficient("uu",1,1))
>>
LaTeX representation
====================
g_{ij}
g^{ij}
URL Reference
=============
https://en.wikipedia.org/wiki/Metric_tensor
TODOs
=====
- Link example with test.
- Need higher quality tests.
"""
if (index_config == "uu"):
return self.metric_tensor_uu[mu, nu]
elif(index_config == "dd"):
return self.metric_tensor_dd[mu, nu]
else:
print("Invalid index_config string.")
def set_metric_coefficient(self, index_config, mu, nu, expression):
"""
Description
===========
Sets a single metric coefficient equal to a given expression.
WARNING: This function is used for memory managment purposes and is not reccomended. for regular use since it can easily create contradictions within a solution easily. This may have more uses in the future.
Example
=======
>> newtonian = SpaceTime(Solution().weak_field_approximation(), True)
>> print(newtonian.set_metric_coefficient("uu",1,1),0)
LaTeX representation
====================
g_{23} = # set_metric_coefficient("dd",2,3,0)
g^{03} = # set_metric_coefficient("uu",0,3,0)
URL Reference
=============
https://en.wikipedia.org/wiki/Metric_tensor
TODOs
=====
- Link example with test.
- Need higher quality tests.
"""
if (index_config == "uu"):
self.metric_tensor_uu[mu,nu] = expression
elif(index_config == "dd"):
self.metric_tensor_dd[mu,nu] = expression
else:
print("Invalid index_config string.")
def set_all_metric_coefficients(self, index_config):
"""
Description
===========
Sets all metric coefficients for a given index configuration. It retrieves these values from the solution input.
* Effectively this function only is needed when the user specifies a print on object creation.
Example
=======
>> newtonian = SpaceTime(Solution().weak_field_approximation(), True)
>> newtonian.set_all_metric_coefficients("uu") # Redundant becasue function is called at creation of SpaceTime object.
LaTeX representation
====================
g_{ij}
g^{ij}
URL Reference
=============
https://en.wikipedia.org/wiki/Metric_tensor
TODOs
=====
- Link example with test.
- Need higher quality tests.
"""
if (index_config == "uu"):
if(self.suppress_printing == False):
print("")
print("")
print("Metric tensor coefficients (uu)")
print("===============================")
for mu in self.dimensions:
for nu in self.dimensions:
self.print_metric_coefficient(index_config, mu, nu)
elif(index_config == "dd"):
if(self.suppress_printing == False):
print("")
print("")
print("Metric tensor coefficients (dd)")
print("===============================")
for mu in self.dimensions:
for nu in self.dimensions:
self.print_metric_coefficient(index_config, mu, nu)
else:
print("Invalid index_config string.")
def print_metric_coefficient(self, index_config, mu, nu):
"""
Description
===========
Prints a single metric tensor coefficient.
Example
=======
>> newtonian = SpaceTime(Solution().weak_field_approximation(), True)
>> newtonian.print_metric_coefficient("uu",3,1)
0
LaTeX representation
====================
g_{ij}
g^{ij}
URL Reference
=============
https://en.wikipedia.org/wiki/Metric_tensor
TODOs
=====
- Link example with test.
- Need higher quality tests.
"""
if (index_config == "uu"):
print("")
pprint(Eq(Symbol('g^%s%s' % (mu, nu)), self.get_metric_coefficient(index_config, mu, nu)))
elif(index_config == "dd"):
print("")
pprint(Eq(Symbol('g_%s%s' % (mu, nu)), self.get_metric_coefficient(index_config, mu, nu)))
else:
print("Invalid index_config string.")
def print_all_metric_coefficients(self, index_config):
"""
Description
===========
Prints all metric tensor coefficients.
Example
=======
>> newtonian = SpaceTime(Solution().weak_field_approximation(), True)
>> newtonian.print_all_metric_coefficients("uu")
...
LaTeX representation
====================
| |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.db.models import Max, Q
from easy_thumbnails.files import get_thumbnailer
from jfu.http import upload_receive, UploadResponse, JFUResponse
import json
import datetime
import pytz
import uuid
import os
from sendfile import sendfile
import importlib
import copy
import inspect
import urllib
from wand.image import Image
from accounting_core.utils import CostCenterLinked
from generic.datatables import generic_list_json
from generic.forms import ContactForm
from app.utils import update_current_unit, get_current_unit, update_current_year, get_current_year, send_templated_mail, has_property, set_property
from rights.utils import BasicRightModel
def get_unit_data(model_class, request, allow_blank=True, allow_all_units=False):
from generic.models import GenericExternalUnitAllowed
unit_mode = hasattr(model_class.MetaData, 'has_unit') and model_class.MetaData.has_unit
unit_blank = allow_blank and unit_mode and issubclass(model_class, GenericExternalUnitAllowed)
current_unit = None
if unit_mode:
if request.GET.get('upk'):
update_current_unit(request, request.GET.get('upk'))
if request.POST.get('upk'):
update_current_unit(request, request.POST.get('upk'))
current_unit = get_current_unit(request, unit_blank, allow_all_units)
if current_unit and current_unit.is_hidden:
# Enpeche d'éventuel petit malins de trichers en mettant les IDs à la
# main
if not current_unit.check_if_can_use_hidden(request.user):
raise Http404
return unit_mode, current_unit, unit_blank
def get_year_data(model_class, request):
from accounting_core.utils import AccountingYearLinked
from accounting_core.models import AccountingYear
year_mode = issubclass(model_class, AccountingYearLinked)
current_year = None
if year_mode:
if request.GET.get('ypk'):
update_current_year(request, request.GET.get('ypk'))
if request.POST.get('ypk'):
update_current_year(request, request.POST.get('ypk'))
current_year = get_current_year(request)
return year_mode, current_year, AccountingYear
def generate_generic_list(module, base_name, model_class, json_view_suffix, right_to_check, right_to_check_edit, template_to_use, allow_blank, object_filter=False, bonus_args_transformator=None, tag_class=None, allow_all_units=False):
@login_required
def _generic_generic_list(request, **bonus_args):
json_view = '%s.views.%s%s' % (module.__name__, base_name, json_view_suffix)
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
deleted_view = '%s.views.%s_deleted' % (module.__name__, base_name)
status_view = '%s.views.%s_switch_status' % (module.__name__, base_name)
logs_view = '%s.views.%s_logs' % (module.__name__, base_name)
tag_search_view = '%s.views.%s_tag_search' % (module.__name__, base_name)
mayi_view = '%s.views.%s_mayi' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_blank=allow_blank, allow_all_units=allow_all_units)
main_unit = None
allow_all_units_ = allow_all_units # Need a local copy
if unit_mode:
# Remove upk in urls (unit has been changed)
if 'upk' in request.GET:
get_params = dict(request.GET.iterlists())
del get_params['upk']
return HttpResponseRedirect('{}?{}'.format(request.path, urllib.urlencode(get_params)))
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
main_unit.set_rights_can_select(lambda unit: model_class.static_rights_can(right_to_check, request.user, unit, current_year))
main_unit.set_rights_can_edit(lambda unit: model_class.static_rights_can(right_to_check_edit, request.user, unit, current_year))
main_unit.check_if_can_use_hidden(request.user)
allow_all_units_ = allow_all_units and model_class.static_rights_can(right_to_check, request.user, main_unit, current_year)
else:
# The LIST right is not verified here if we're in unit mode. We
# need to test (in the view) in another unit is available for LIST
# if the current unit isn't !
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can(right_to_check, request.user, current_unit, current_year):
raise Http404
if hasattr(model_class, 'moderable_object') and model_class.moderable_object: # If the object is moderable, list all moderable things by the current user
# List all moderiables in the 'todo' satate
moderables = model_class.objects.filter(status=model_class.moderable_state).exclude(deleted=True)
# Filter to check if user has rights
moderables = filter(lambda m: m.rights_can('VALIDATE', request.user), moderables)
else:
moderables = False
if object_filter and hasattr(model_class, 'get_linked_object_class'):
objects = model_class.get_linked_object_class().objects.filter(unit=current_unit)
else:
objects = []
if bonus_args_transformator:
extra_data = bonus_args_transformator(request, **bonus_args) or {}
else:
extra_data = {}
data = {
'Model': model_class, 'json_view': json_view, 'edit_view': edit_view, 'deleted_view': deleted_view, 'show_view': show_view, 'status_view': status_view, 'logs_view': logs_view, 'tag_search_view': tag_search_view, 'mayi_view': mayi_view,
'unit_mode': unit_mode, 'main_unit': main_unit, 'unit_blank': unit_blank, 'allow_all_units': allow_all_units_,
'year_mode': year_mode, 'years_available': AccountingYear.build_year_menu('LIST', request.user),
'moderables': moderables, 'object_filter': objects, 'tag_mode': tag_class is not None, 'tag': request.GET.get('tag', ''),
}
data.update(extra_data)
if hasattr(model_class.MetaData, 'extra_args_for_list'):
data.update(model_class.MetaData.extra_args_for_list(request, current_unit, current_year))
return render(request, ['%s/%s/%s.html' % (module.__name__, base_name, template_to_use,), 'generic/generic/%s.html' % (template_to_use,)], data)
return _generic_generic_list
def generate_list(module, base_name, model_class, tag_class):
return generate_generic_list(module, base_name, model_class, '_list_json', 'LIST', 'CREATE', 'list', True, tag_class=tag_class, allow_all_units=True)
def generate_list_json(module, base_name, model_class, tag_class):
@login_required
@csrf_exempt
def _generic_list_json(request):
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
delete_view = '%s.views.%s_delete' % (module.__name__, base_name)
logs_view = '%s.views.%s_logs' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_all_units=True)
if unit_mode:
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
all_units_mode = unit_mode and current_unit and current_unit.pk == -2
if all_units_mode:
unit_to_check = main_unit
else:
unit_to_check = current_unit
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('LIST', request.user, unit_to_check, current_year):
raise Http404
if unit_mode and not all_units_mode:
if not current_unit:
if request.user.is_superuser or model_class.static_rights_can('LIST', request.user, main_unit, current_year): # Never filter
filter_ = lambda x: x.filter(unit=None)
else:
filter_ = lambda x: x.filter(unit=None, unit_blank_user=request.user)
else:
if hasattr(model_class.MetaData, 'costcenterlinked') and model_class.MetaData.costcenterlinked:
filter_ = lambda x: x.filter(Q(costcenter__deleted=False) & (Q(costcenter__unit=current_unit) | (Q(costcenter__unit__parent_hierarchique=current_unit) & Q(costcenter__unit__is_commission=False))))
else:
filter_ = lambda x: x.filter(unit=current_unit)
else:
filter_ = lambda x: x
if year_mode:
filter__ = lambda x: filter_(x).filter(accounting_year=current_year)
else:
filter__ = filter_
if hasattr(model_class.MetaData, 'extra_filter_for_list'):
filter___ = model_class.MetaData.extra_filter_for_list(request, unit_to_check, current_year, filter__)
else:
filter___ = filter__
tag = request.GET.get('tag')
if tag_class and tag:
filter____ = lambda x: filter___(x).filter(tags__tag=tag).distinct()
else:
filter____ = filter___
return generic_list_json(request, model_class, [col for (col, disp) in model_class.MetaData.list_display] + ['pk'], [module.__name__ + '/' + base_name + '/list_json.html', 'generic/generic/list_json.html'],
{'Model': model_class,
'show_view': show_view,
'edit_view': edit_view,
'delete_view': delete_view,
'logs_view': logs_view,
'list_display': model_class.MetaData.list_display,
'all_units_mode': all_units_mode,
},
True, model_class.MetaData.filter_fields,
bonus_filter_function=filter____,
selector_column=True,
bonus_total_filter_function=filter___,
)
return _generic_list_json
def generate_list_related(module, base_name, model_class):
return generate_generic_list(module, base_name, model_class, '_list_related_json', 'VALIDATE', 'VALIDATE', 'list_related', False, True)
def generate_list_related_json(module, base_name, model_class):
@login_required
@csrf_exempt
def _generate_list_related_json(request):
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
delete_view = '%s.views.%s_delete' % (module.__name__, base_name)
logs_view = '%s.views.%s_logs' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_blank=False)
if unit_mode:
if hasattr(model_class.MetaState, 'filter_unit_field'):
filter_ = lambda x: x.filter(**{model_class.MetaState.filter_unit_field.replace('.', '__'): current_unit}).distinct()
else:
filter_ = lambda x: x.filter(**{model_class.MetaState.unit_field.replace('.', '__'): current_unit}).distinct()
else:
filter_ = lambda x: x.distinct()
if year_mode:
filter__ = lambda x: filter_(x).filter(accounting_year=current_year)
else:
filter__ = filter_
def filter_object(qs, request):
if request.POST.get('sSearch_0'):
if hasattr(model_class.MetaState, 'filter_unit_field'):
return qs.filter(**{'__'.join(model_class.MetaState.filter_unit_field.split('.')[:-1] + ['pk']): request.POST.get('sSearch_0'), model_class.MetaState.filter_unit_field.replace('.', '__'): current_unit})
else:
return qs.filter(**{'__'.join(model_class.MetaState.unit_field.split('.')[:-1] + ['pk']): request.POST.get('sSearch_0'), model_class.MetaState.unit_field.replace('.', '__'): current_unit})
else:
return qs
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('VALIDATE', request.user, current_unit, current_year):
raise Http404
return generic_list_json(request, model_class, [col for (col, disp) in model_class.MetaData.list_display_related] + ['pk'], [module.__name__ + '/' + base_name + '/list_related_json.html', 'generic/generic/list_related_json.html'],
{'Model': model_class,
'show_view': show_view,
'edit_view': edit_view,
'delete_view': delete_view,
'logs_view': logs_view,
'list_display': model_class.MetaData.list_display_related,
'upk_noswitch': True, 'from_related': True,
},
True, model_class.MetaData.filter_fields,
bonus_filter_function=filter__,
bonus_filter_function_with_parameters=filter_object,
deca_one_status=True,
selector_column=True,
)
return _generate_list_related_json
def generate_edit(module, base_name, model_class, form_class, log_class, file_class, tag_class):
from accounting_tools.models import LinkedInfo
@login_required
def _generic_edit(request, pk):
list_view = '%s.views.%s_list' % (module.__name__, base_name)
list_related_view = '%s.views.%s_list_related' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
file_upload_view = '%s.views.%s_file_upload' % (module.__name__, base_name)
file_delete_view = '%s.views.%s_file_delete' % (module.__name__, base_name)
file_get_view = '%s.views.%s_file_get' % (module.__name__, base_name)
file_get_thumbnail_view = '%s.views.%s_file_get_thumbnail' % (module.__name__, base_name)
tag_search_view = '%s.views.%s_tag_search' % (module.__name__, base_name)
related_mode = request.GET.get('_fromrelated') == '_'
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
extra_args = {}
try:
obj = model_class.objects.get(pk=pk, deleted=False)
if unit_mode:
obj_unit = obj.costcenter.unit if isinstance(obj, CostCenterLinked) else obj.unit
update_current_unit(request, obj_unit.pk if obj_unit else -1)
current_unit = obj_unit
if year_mode:
update_current_year(request, obj.accounting_year.pk)
current_year = obj.accounting_year
if isinstance(obj, BasicRightModel) and not obj.rights_can('EDIT', request.user):
raise Http404
except (ValueError, model_class.DoesNotExist):
obj = model_class()
if hasattr(model_class, 'MetaEdit') and hasattr(model_class.MetaEdit, 'set_extra_defaults'):
model_class.MetaEdit.set_extra_defaults(obj, request)
if unit_mode:
if unit_blank and not current_unit:
obj.unit_blank_user = request.user
if has_property(obj, 'MetaData.costcenterlinked') and obj.MetaData.costcenterlinked and current_unit.costcenter_set.first():
obj.costcenter = current_unit.costcenter_set.first()
if has_property(obj, obj.MetaRights.linked_unit_property):
set_property(obj, obj.MetaRights.linked_unit_property, current_unit)
else:
obj.unit = current_unit
if year_mode:
# Est-ce qu'on va tenter de créer un truc dans une année
# comptable pas possible ?
if current_year not in AccountingYear.build_year_menu('CREATE', request.user):
update_current_year(request, None)
___, current_year, ___ = get_year_data(model_class, request)
obj.accounting_year = current_year
if unit_mode and isinstance(obj, BasicRightModel) and not obj.rights_can('CREATE', request.user) and current_unit:
# Try to find a suitable unit, since the user may access
# this page without using a create button (who switched the
# unit)
from units.models import Unit
for test_unit in Unit.objects.order_by('?'):
if has_property(obj, obj.MetaRights.linked_unit_property):
set_property(obj, obj.MetaRights.linked_unit_property, test_unit)
else:
obj.unit = test_unit
if obj.rights_can('CREATE', request.user):
current_unit = test_unit
break
# Set the original (or new) unit
if has_property(obj, obj.MetaRights.linked_unit_property):
set_property(obj, obj.MetaRights.linked_unit_property, current_unit)
else:
obj.unit = current_unit
if isinstance(obj, BasicRightModel) and not obj.rights_can('CREATE', request.user):
raise Http404
if unit_mode:
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
main_unit.set_rights_can_select(lambda unit: model_class.static_rights_can('CREATE', request.user, unit, current_year))
main_unit.set_rights_can_edit(lambda unit: model_class.static_rights_can('CREATE', request.user, unit, current_year))
main_unit.check_if_can_use_hidden(request.user)
else:
main_unit = None
if obj.pk:
before_data = obj.build_state()
else:
before_data = None
file_mode = file_class is not None
file_key = None # Will be set later
from generic.models import GenericModelWithLines
lines_objects | |
<filename>signjoey/prediction.py
#!/usr/bin/env python
import torch
torch.backends.cudnn.deterministic = True
import logging
import numpy as np
import pickle as pickle
import time
import torch.nn as nn
from typing import List
from torchtext.data import Dataset
from signjoey.loss import XentLoss
from signjoey.helpers import (
bpe_postprocess,
load_config,
get_latest_checkpoint,
load_checkpoint,
)
from signjoey.metrics import bleu, chrf, rouge, wer_list
from signjoey.model import build_model, SignModel
from signjoey.batch import Batch
from signjoey.data import load_data, make_data_iter
from signjoey.vocabulary import PAD_TOKEN, SIL_TOKEN
from signjoey.phoenix_utils.phoenix_cleanup import (
clean_phoenix_2014,
clean_phoenix_2014_trans,
)
# pylint: disable=too-many-arguments,too-many-locals,no-member
def validate_on_data(
model: SignModel,
data: Dataset,
batch_size: int,
use_cuda: bool,
sgn_dim: int,
do_recognition: bool,
recognition_loss_function: torch.nn.Module,
recognition_loss_weight: int,
do_translation: bool,
translation_loss_function: torch.nn.Module,
translation_loss_weight: int,
translation_max_output_length: int,
level: str,
txt_pad_index: int,
fusion_type :str,
recognition_beam_size: int = 1,
translation_beam_size: int = 1,
translation_beam_alpha: int = -1,
batch_type: str = "sentence",
dataset_version: str = "phoenix_2014_trans",
frame_subsampling_ratio: int = None,
) -> (
float,
float,
float,
List[str],
List[List[str]],
List[str],
List[str],
List[List[str]],
List[np.array],
):
"""
Generate translations for the given data.
If `loss_function` is not None and references are given,
also compute the loss.
:param model: model module
:param data: dataset for validation
:param batch_size: validation batch size
:param use_cuda: if True, use CUDA
:param translation_max_output_length: maximum length for generated hypotheses
:param level: segmentation level, one of "char", "bpe", "word"
:param translation_loss_function: translation loss function (XEntropy)
:param recognition_loss_function: recognition loss function (CTC)
:param recognition_loss_weight: CTC loss weight
:param translation_loss_weight: Translation loss weight
:param txt_pad_index: txt padding token index
:param sgn_dim: Feature dimension of sgn frames
:param recognition_beam_size: beam size for validation (recognition, i.e. CTC).
If 0 then greedy decoding (default).
:param translation_beam_size: beam size for validation (translation).
If 0 then greedy decoding (default).
:param translation_beam_alpha: beam search alpha for length penalty (translation),
disabled if set to -1 (default).
:param batch_type: validation batch type (sentence or token)
:param do_recognition: flag for predicting glosses
:param do_translation: flag for predicting text
:param dataset_version: phoenix_2014 or phoenix_2014_trans
:param frame_subsampling_ratio: frame subsampling ratio
:return:
- current_valid_score: current validation score [eval_metric],
- valid_loss: validation loss,
- valid_ppl:, validation perplexity,
- valid_sources: validation sources,
- valid_sources_raw: raw validation sources (before post-processing),
- valid_references: validation references,
- valid_hypotheses: validation_hypotheses,
- decoded_valid: raw validation hypotheses (before post-processing),
- valid_attention_scores: attention scores for validation hypotheses
"""
valid_iter = make_data_iter(
dataset=data,
batch_size=batch_size,
batch_type=batch_type,
shuffle=False,
train=False,
)
# disable dropout
model.eval()
# don't track gradients during validation
with torch.no_grad():
all_gls_outputs = []
all_txt_outputs = []
all_attention_scores = []
total_recognition_loss = 0
total_translation_loss = 0
total_num_txt_tokens = 0
total_num_gls_tokens = 0
total_num_seqs = 0
for valid_batch in iter(valid_iter):
batch = Batch(
is_train=False,
torch_batch=valid_batch,
txt_pad_index=txt_pad_index,
sgn_dim=sgn_dim,
fusion_type=fusion_type,
use_cuda=use_cuda,
frame_subsampling_ratio=frame_subsampling_ratio,
)
sort_reverse_index = batch.sort_by_sgn_lengths()
batch_recognition_loss, batch_translation_loss = model.get_loss_for_batch(
batch=batch,
fusion_type=fusion_type,
recognition_loss_function=recognition_loss_function
if do_recognition
else None,
translation_loss_function=translation_loss_function
if do_translation
else None,
recognition_loss_weight=recognition_loss_weight
if do_recognition
else None,
translation_loss_weight=translation_loss_weight
if do_translation
else None,
)
if do_recognition:
total_recognition_loss += batch_recognition_loss
total_num_gls_tokens += batch.num_gls_tokens
if do_translation:
total_translation_loss += batch_translation_loss
total_num_txt_tokens += batch.num_txt_tokens
total_num_seqs += batch.num_seqs
(
batch_gls_predictions,
batch_txt_predictions,
batch_attention_scores,
) = model.run_batch(
batch=batch,
recognition_beam_size=recognition_beam_size if do_recognition else None,
translation_beam_size=translation_beam_size if do_translation else None,
translation_beam_alpha=translation_beam_alpha
if do_translation
else None,
translation_max_output_length=translation_max_output_length
if do_translation
else None,
)
# sort outputs back to original order
if do_recognition:
all_gls_outputs.extend(
[batch_gls_predictions[sri] for sri in sort_reverse_index]
)
if do_translation:
all_txt_outputs.extend(batch_txt_predictions[sort_reverse_index])
all_attention_scores.extend(
batch_attention_scores[sort_reverse_index]
if batch_attention_scores is not None
else []
)
if do_recognition:
assert len(all_gls_outputs) == len(data)
if (
recognition_loss_function is not None
and recognition_loss_weight != 0
and total_num_gls_tokens > 0
):
valid_recognition_loss = total_recognition_loss
else:
valid_recognition_loss = -1
# decode back to symbols
decoded_gls = model.gls_vocab.arrays_to_sentences(arrays=all_gls_outputs)
# Gloss clean-up function
if dataset_version == "phoenix_2014_trans":
gls_cln_fn = clean_phoenix_2014_trans
elif dataset_version == "phoenix_2014":
gls_cln_fn = clean_phoenix_2014
else:
raise ValueError("Unknown Dataset Version: " + dataset_version)
# Construct gloss sequences for metrics
gls_ref = [gls_cln_fn(" ".join(t)) for t in data.gls]
gls_hyp = [gls_cln_fn(" ".join(t)) for t in decoded_gls]
assert len(gls_ref) == len(gls_hyp)
# GLS Metrics
gls_wer_score = wer_list(hypotheses=gls_hyp, references=gls_ref)
if do_translation:
assert len(all_txt_outputs) == len(data)
if (
translation_loss_function is not None
and translation_loss_weight != 0
and total_num_txt_tokens > 0
):
# total validation translation loss
valid_translation_loss = total_translation_loss
# exponent of token-level negative log prob
valid_ppl = torch.exp(total_translation_loss / total_num_txt_tokens)
else:
valid_translation_loss = -1
valid_ppl = -1
# decode back to symbols
decoded_txt = model.txt_vocab.arrays_to_sentences(arrays=all_txt_outputs)
# evaluate with metric on full dataset
join_char = " " if level in ["word", "bpe"] else ""
# Construct text sequences for metrics
txt_ref = [join_char.join(t) for t in data.txt]
txt_hyp = [join_char.join(t) for t in decoded_txt]
# post-process
if level == "bpe":
txt_ref = [bpe_postprocess(v) for v in txt_ref]
txt_hyp = [bpe_postprocess(v) for v in txt_hyp]
assert len(txt_ref) == len(txt_hyp)
# TXT Metrics
txt_bleu = bleu(references=txt_ref, hypotheses=txt_hyp)
txt_chrf = chrf(references=txt_ref, hypotheses=txt_hyp)
txt_rouge = rouge(references=txt_ref, hypotheses=txt_hyp)
valid_scores = {}
if do_recognition:
valid_scores["wer"] = gls_wer_score["wer"]
valid_scores["wer_scores"] = gls_wer_score
if do_translation:
valid_scores["bleu"] = txt_bleu["bleu4"]
valid_scores["bleu_scores"] = txt_bleu
valid_scores["chrf"] = txt_chrf
valid_scores["rouge"] = txt_rouge
results = {
"valid_scores": valid_scores,
"all_attention_scores": all_attention_scores,
}
if do_recognition:
results["valid_recognition_loss"] = valid_recognition_loss
results["decoded_gls"] = decoded_gls
results["gls_ref"] = gls_ref
results["gls_hyp"] = gls_hyp
if do_translation:
results["valid_translation_loss"] = valid_translation_loss
results["valid_ppl"] = valid_ppl
results["decoded_txt"] = decoded_txt
results["txt_ref"] = txt_ref
results["txt_hyp"] = txt_hyp
return results
# pylint: disable-msg=logging-too-many-args
def test(
cfg_file, ckpt: str, output_path: str = None, logger: logging.Logger = None
) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param output_path: path to output
:param logger: log output to this logger (creates new logger if not set)
"""
if logger is None:
logger = logging.getLogger(__name__)
if not logger.handlers:
FORMAT = "%(asctime)-15s - %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(level=logging.DEBUG)
cfg = load_config(cfg_file)
if "test" not in cfg["data"].keys():
raise ValueError("Test data must be specified in config.")
# when checkpoint is not specified, take latest (best) from model dir
if ckpt is None:
model_dir = cfg["training"]["model_dir"]
ckpt = get_latest_checkpoint(model_dir)
if ckpt is None:
raise FileNotFoundError(
"No checkpoint found in directory {}.".format(model_dir)
)
batch_size = cfg["training"]["batch_size"]
batch_type = cfg["training"].get("batch_type", "sentence")
use_cuda = cfg["training"].get("use_cuda", False)
level = cfg["data"]["level"]
dataset_version = cfg["data"].get("version", "phoenix_2014_trans")
translation_max_output_length = cfg["training"].get(
"translation_max_output_length", None
)
# load the data
_, dev_data, test_data, gls_vocab, txt_vocab = load_data(data_cfg=cfg["data"])
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
do_recognition = cfg["training"].get("recognition_loss_weight", 1.0) > 0.0
do_translation = cfg["training"].get("translation_loss_weight", 1.0) > 0.0
if cfg["fusion_type"] == 'early_fusion':
add_dim = 2*84 + 2*21 + 2*13
else :
add_dim = 0
model = build_model(
cfg=cfg["model"],
gls_vocab=gls_vocab,
txt_vocab=txt_vocab,
sgn_dim=sum(cfg["data"]["feature_size"]) + add_dim
if isinstance(cfg["data"]["feature_size"], list)
else cfg["data"]["feature_size"] + add_dim,
do_recognition=do_recognition,
do_translation=do_translation,
)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda:
model.cuda()
# Data Augmentation Parameters
frame_subsampling_ratio = cfg["data"].get("frame_subsampling_ratio", None)
# Note (Cihan): we are not using 'random_frame_subsampling' and
# 'random_frame_masking_ratio' in testing as they are just for training.
# whether to use beam search for decoding, 0: greedy decoding
if "testing" in cfg.keys():
recognition_beam_sizes = cfg["testing"].get("recognition_beam_sizes", [1])
translation_beam_sizes = cfg["testing"].get("translation_beam_sizes", [1])
translation_beam_alphas = cfg["testing"].get("translation_beam_alphas", [-1])
else:
recognition_beam_sizes = [1]
translation_beam_sizes = [1]
translation_beam_alphas = [-1]
if "testing" in cfg.keys():
max_recognition_beam_size = cfg["testing"].get(
"max_recognition_beam_size", None
)
if max_recognition_beam_size is not None:
recognition_beam_sizes = list(range(1, max_recognition_beam_size + 1))
if do_recognition:
recognition_loss_function = torch.nn.CTCLoss(
blank=model.gls_vocab.stoi[SIL_TOKEN], zero_infinity=True
)
if use_cuda:
recognition_loss_function.cuda()
if do_translation:
translation_loss_function = XentLoss(
pad_index=txt_vocab.stoi[PAD_TOKEN], smoothing=0.0
)
if use_cuda:
translation_loss_function.cuda()
# NOTE (Cihan): Currently Hardcoded to be 0 for TensorFlow decoding
assert model.gls_vocab.stoi[SIL_TOKEN] == 0
if do_recognition:
# Dev Recognition CTC Beam Search Results
dev_recognition_results = {}
dev_best_wer_score = float("inf")
dev_best_recognition_beam_size = 1
for rbw in recognition_beam_sizes:
logger.info("-" * 60)
valid_start_time = time.time()
logger.info("[DEV] partition [RECOGNITION] experiment [BW]: %d", rbw)
dev_recognition_results[rbw] = validate_on_data(
model=model,
data=dev_data,
batch_size=batch_size,
use_cuda=use_cuda,
batch_type=batch_type,
dataset_version=dataset_version,
sgn_dim=sum(cfg["data"]["feature_size"])
if isinstance(cfg["data"]["feature_size"], list)
else cfg["data"]["feature_size"],
txt_pad_index=txt_vocab.stoi[PAD_TOKEN],
fusion_type=cfg["model"]["fusion_type"],
# Recognition Parameters
do_recognition=do_recognition,
recognition_loss_function=recognition_loss_function,
recognition_loss_weight=1,
recognition_beam_size=rbw,
# Translation Parameters
do_translation=do_translation,
translation_loss_function=translation_loss_function
if do_translation
else None,
translation_loss_weight=1 if do_translation else None,
translation_max_output_length=translation_max_output_length
if do_translation
else None,
level=level if do_translation else None,
translation_beam_size=1 if do_translation else None,
translation_beam_alpha=-1 if do_translation else None,
frame_subsampling_ratio=frame_subsampling_ratio,
)
logger.info("finished in %.4fs ", time.time() - valid_start_time)
if dev_recognition_results[rbw]["valid_scores"]["wer"] < dev_best_wer_score:
dev_best_wer_score = dev_recognition_results[rbw]["valid_scores"]["wer"]
dev_best_recognition_beam_size = rbw
dev_best_recognition_result = dev_recognition_results[rbw]
logger.info("*" * 60)
logger.info(
"[DEV] partition | |
import os
import sys
import argparse
import json
import random
import soundfile as sf
from itertools import permutations, combinations
from math import factorial, inf
sys.path.extend('..')
from wham_scripts.utils import read_scaled_wav
import matplotlib.pyplot as plt
from pprint import pprint
"""
this script is used to random choose targets which used to make mixture audio
it will finally output a json file which tell next step how to generate
"""
# Path Information
P_SRC = "./THCHS30/data_thchs30"
P_SRC_TRN = P_SRC + "/train"
P_SRC_DEV = P_SRC + "/dev"
P_SRC_TST = P_SRC + "/test"
P_NOISY = "./high_res_wham/audio"
P_LOCAL = "./local"
P_META = P_LOCAL + "/metafile"
P_TMP = P_LOCAL + '/tmp'
# generate paramenters, will be changed by args
N_SRC = 4
N_PREMIX = 5
N_NOISY_USE = 2000 # num of noisy used in synthesis
N_MAX_DB = 2.5
N_USE_SP = 2 # max num of using a specific audio
"""
decode geometry infomation
geoInfo: list[x0,y0 x1,y1 ... xn,yn]
"""
def decodeGeo(geoInfo):
arrayGeo = []
for cords in geoInfo:
cord = cords.split(',')
if len(cord) not in [2, 3]:
raise Exception('only support 2/3-d coord')
x, y = float(cord[0]), float(cord[1])
z = 1 if len(cord) == 2 else float(cord[2])
arrayGeo.append([x, y, z])
return arrayGeo
"""
sort wav file according to speaker
"""
def catalize(data_list, path):
spk_id = None # current spk id of spk_wav
spk_wav = [] # temporary save wav info
spk_cat = [] # save category info
idx = 0
for k in data_list:
spk_id_new = k[:k.find("_")]
if spk_id != spk_id_new:
if len(spk_wav) > 0:
spk_cat.append({
'spk_id': spk_id,
'wav' : spk_wav
})
spk_wav = []
spk_id = spk_id_new
spk_wav.append({
'fname' : k,
'id' : idx,
'path' : r"{}/{}".format(path, k)
})
idx += 1
return spk_cat
"""
choose samples from sound source (for 2speakers)
"""
def chooseSample_2src(category):
n_speaker = len(category)
premix = []
for s1_idx in range(n_speaker - 1):
ut1Idx = [v for v in range(len(category[s1_idx]['wav']))]
random.shuffle(ut1Idx)
for s2_idx in range(s1_idx + 1, n_speaker):
ut2Idx = [v for v in range(len(category[s2_idx]['wav']))]
random.shuffle(ut2Idx)
if N_PREMIX > len(ut1Idx) or N_PREMIX > len(ut2Idx):
continue
for ut1, ut2 in zip(ut1Idx[:N_PREMIX], ut2Idx[:N_PREMIX]):
db = random.random() * N_MAX_DB
premix.append({
's1' : category[s1_idx]['wav'][ut1],
's2' : category[s2_idx]['wav'][ut2],
'permua' : random.randint(0, 1),
'db' : [db, -db],
'summery' : {
's1_spk' : category[s1_idx]['spk_id'],
's2_spk' : category[s2_idx]['spk_id'],
}
})
random.shuffle(premix)
return premix
"""
(for 3speakers)
"""
def chooseSample_3src(category):
n_speaker = len(category)
premix = []
for s1_idx in range(n_speaker - 2):
ut1Idx = [v for v in range(len(category[s1_idx]['wav']))]
random.shuffle(ut1Idx)
for s2_idx in range(s1_idx + 1, n_speaker - 1):
ut2Idx = [v for v in range(len(category[s2_idx]['wav']))]
random.shuffle(ut2Idx)
for s3_idx in range(s2_idx + 1, n_speaker):
ut3Idx = [v for v in range(len(category[s3_idx]['wav']))]
random.shuffle(ut3Idx)
if N_PREMIX > len(ut1Idx) or N_PREMIX > len(ut2Idx) or N_PREMIX > len(ut3Idx):
continue
for ut1, ut2, ut3 in zip(ut1Idx[:N_PREMIX], ut2Idx[:N_PREMIX], ut3Idx[:N_PREMIX]):
db = random.random() * N_MAX_DB
premix.append({
's1' : category[s1_idx]['wav'][ut1],
's2' : category[s2_idx]['wav'][ut2],
's3' : category[s3_idx]['wav'][ut3],
'permua' : random.randint(0, 5),
'db' : [db, 0, -db],
'summery' : {
's1_spk' : category[s1_idx]['spk_id'],
's2_spk' : category[s2_idx]['spk_id'],
's3_spk' : category[s3_idx]['spk_id'],
}
})
random.shuffle(premix)
return premix
"""
(for more than 3 speakers)
"""
def chooseSample_nsrc(category):
n_speaker = len(category)
combList = combinations(range(n_speaker), N_SRC)
permutNum = factorial(N_SRC)
premix = []
for spks in combList:
utIdx = [[v for v in range(len(category[id]['wav']))] for id in spks]
minUtSize = inf
validComb = True
for segId in utIdx:
if N_PREMIX > len(segId):
validComb = False
break
random.shuffle(segId)
minUtSize = min(minUtSize, len(segId))
if not validComb:
continue
minUtSize = N_PREMIX if N_PREMIX != -1 else minUtSize
for _utId in range(minUtSize):
db = [(random.random() * 2 - 1) * N_MAX_DB for _ in range(N_SRC)]
_mix = {
'permua' : random.randint(0, permutNum - 1),
'db' : db,
'summery' : {},
}
for _id in range(N_SRC):
_mix[f's{_id + 1}'] = category[spks[_id]]['wav'][utIdx[_id][_utId]]
_mix['summery'][f's{_id + 1}_spk'] = category[spks[_id]]['spk_id']
premix.append(_mix)
if len(premix) % 10000 == 0:
print('.', end='')
random.shuffle(premix)
return premix
"""
generate details of recipe include times, len
"""
def genDetailOfRecipe(recipe, noisy_info, room, arrayGeo, minSSL, noisyCfg='rand'):
mixture = []
used = {}
lst_permut = permutations(range(1))
noisy, noisy_idx = noisy_info
n_noisy = len(noisy_idx)
l2dist = lambda x,y: (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2
for r in recipe:
spkValid = True
for spk in range(N_SRC):
spkStr = f's{spk + 1}'
spkId = r[spkStr]['id']
if spkId in used and used[spkId] >= N_USE_SP:
spkValid = False
break
if not spkValid:
continue
mix = {
'db' : r['db'],
'permutation' : r['permua'],
'summery' : r['summery'],
}
s_len = []
out_name = ''
for spk in range(N_SRC):
spkStr = f's{spk + 1}'
spkId = r[spkStr]['id']
used[spkId] = 1 if spkId not in used else used[spkId] + 1
out_name = out_name + r[spkStr]['fname'][:-4] + '-'
s_len.append(len(read_scaled_wav(r[spkStr]['path'], 1, True)))
mix[spkStr + '_path'] = r[spkStr]['path']
mix['name'] = out_name + '.wav'
"""
read wav files from disk, get len of each wav file,
decide the len of mixture, the start point of noisy, etc
"""
# TODO: only min mode now
mix_len = min(s_len)
idx_n = noisy_idx[random.randint(0, n_noisy - 1)]
while noisy[idx_n]['len'] < mix_len:
idx_n = noisy_idx[random.randint(0, n_noisy - 1)]
noisy_start = random.randint(0, noisy[idx_n]['len'] - mix_len)
# location of sound source
if room is not None:
# for N_SRC sound source + 1 noise source, only support 1 noise source for now.
mix['ssl'] = []
for _ in range(N_SRC + 1):
ssl = arrayGeo[0]
d2 = minSSL ** 2
while d2 > l2dist(ssl, arrayGeo[0]):
ssl = [random.random() * room[0], random.random() * room[1], random.random() * room[2]]
mix['ssl'].append(ssl)
mix['noisy_path'] = noisy[idx_n]['path']
mix['noisy_start'] = noisy_start
mix['len'] = mix_len
mixture.append(mix)
if len(mixture) % 100 == 0:
print('.', end='')
return mixture
"""
get noisy audio infomation like audio len
info : infomation of audio in discrete
noisy_idx : tell which idx is used in gen
"""
def loadNoisyInfo():
_t = os.listdir(P_NOISY)
_t.sort()
lst_noisy = [(k, v) for k, v in zip(range(len(_t)),_t)] # set of tuple (idx, path)
random.shuffle(lst_noisy)
print('Loading noisy info from json...', end='')
info = {}
noisy_idx = []
os.makedirs(P_TMP, exist_ok=True)
if os.path.exists(P_TMP + '/noisy_info.json'):
with open(P_TMP + '/noisy_info.json', 'r') as f:
_t = json.load(f)
info = {int(v[0]):v[1] for v in _t.items()}
print('Complete!\nLoading noisy info from wav', end='')
for idx, path in lst_noisy[:N_NOISY_USE]:
noisy_idx.append(idx)
if len(noisy_idx) % 100 == 0:
print('.', end='')
if idx in info.keys():
continue
info[idx] = {
'path': P_NOISY + '/' + path,
'len' : len(read_scaled_wav(r"{}/{}".format(P_NOISY, path), 1, True))
}
with open(P_TMP + '/noisy_info.json', 'w') as f:
json.dump(info, f)
print('Complete!')
return info, noisy_idx
"""
Do some summery task
"""
def summeryRecipe(mixture):
n_spkSample = {} # summery the number of a specific speaker's audio used in mixture
for r in mixture:
s1_spk = r['summery']['s1_spk']
s2_spk = r['summery']['s2_spk']
n_spkSample[s1_spk] = 1 if s1_spk not in n_spkSample else n_spkSample[s1_spk] + 1
n_spkSample[s2_spk] = 1 if s2_spk not in n_spkSample else n_spkSample[s2_spk] + 1
return n_spkSample
"""
plot bar of the num used in mixture
"""
def plotBarOfNum(s):
p_bar_spk_x = {}
p_bar_color = ['r', 'g', 'b']
datasetName = ['tr', 'cv', 'tt']
for idx, c in zip(range(3), p_bar_color):
s_Num = s[datasetName[idx] + 'NumUsed']
for spk, num in s_Num.items():
if spk not in p_bar_spk_x:
p_bar_spk_x[spk] = len(p_bar_spk_x)
plt.bar(p_bar_spk_x[spk] + idx * 0.3, num, width=0.3, color=c)
pprint(p_bar_spk_x)
plt.show()
"""
gen mixture
"""
def genMetafile(args):
global N_SRC, N_PREMIX, N_USE_SP
N_SRC = args.src
N_PREMIX = args.premix
N_USE_SP = args.dupli
if args.arrayGeo is not None:
arrayGeometry = decodeGeo(args.arrayGeo)
# only support one room setting
roomInfo = decodeGeo(args.room)[0]
else:
arrayGeometry = None
roomInfo = None
PLOT_STAT_FLG = args.static
"""
file:
set of file names under P_SRC_*
path:
the dir of file
category:
set of [
spk_id(like 'A11'),
Info of spk_id's pk_wav(s) include [
filename,
idx(index in tr tt or cv),
path]]
recipe:
set of [
recipe of clean mix, Info of s?[
path of orig speech,
and idx]
permuation, the idx of permuation used in generate,
db used in 1st 2nd or 3rd sound in recipe]
"""
Dataset = [
{'name':'tr', 'path': P_SRC_TRN},
{'name':'cv', 'path': P_SRC_DEV},
{'name':'tt', 'path': P_SRC_TST},
]
noisy = loadNoisyInfo()
summery = {}
P_JSON = P_META + f'/{N_SRC}speakers'
os.makedirs(P_META, exist_ok=True)
os.makedirs(P_JSON, exist_ok=True)
for ds in Dataset:
print(r"Gen {} speech samples...".format(ds['name']), end='')
ds['file'] = [k for k in os.listdir(ds['path']) if not k.endswith('.trn')]
ds['file'].sort()
ds['category'] = catalize(ds['file'], ds['path'])
| |
<reponame>grst/diffxpy
import abc
try:
import anndata
except ImportError:
anndata = None
import batchglm.api as glm
import logging
import numpy as np
import patsy
import pandas as pd
from random import sample
import scipy.sparse
from typing import Union, Dict, Tuple, List, Set
from .utils import split_x, dmat_unique
from ..stats import stats
from . import correction
from diffxpy import pkg_constants
logger = logging.getLogger("diffxpy")
class _DifferentialExpressionTest(metaclass=abc.ABCMeta):
"""
Dummy class specifying all needed methods / parameters necessary for DifferentialExpressionTest.
Useful for type hinting. Structure:
Methods which are called by constructor and which compute (corrected) p-values:
_test()
_correction()
Accessor methods for important metrics which have to be extracted from estimated models:
log_fold_change()
reduced_model_gradient()
full_model_gradient()
Interface method which provides summary of results:
results()
plot()
"""
def __init__(self):
self._pval = None
self._qval = None
self._mean = None
self._log_likelihood = None
@property
@abc.abstractmethod
def gene_ids(self) -> np.ndarray:
pass
@property
@abc.abstractmethod
def x(self):
pass
@abc.abstractmethod
def log_fold_change(self, base=np.e, **kwargs):
pass
def log2_fold_change(self, **kwargs):
"""
Calculates the pairwise log_2 fold change(s) for this DifferentialExpressionTest.
"""
return self.log_fold_change(base=2, **kwargs)
def log10_fold_change(self, **kwargs):
"""
Calculates the log_10 fold change(s) for this DifferentialExpressionTest.
"""
return self.log_fold_change(base=10, **kwargs)
def _test(self, **kwargs) -> np.ndarray:
pass
def _correction(self, method) -> np.ndarray:
"""
Performs multiple testing corrections available in statsmodels.stats.multitest.multipletests()
on self.pval.
:param method: Multiple testing correction method.
Browse available methods in the annotation of statsmodels.stats.multitest.multipletests().
"""
if np.all(np.isnan(self.pval)):
return self.pval
else:
return correction.correct(pvals=self.pval, method=method)
def _ave(self):
"""
Returns the mean expression by gene.
:return: np.ndarray
"""
pass
@property
def log_likelihood(self):
if self._log_likelihood is None:
self._log_likelihood = self._ll()
return self._log_likelihood
@property
def mean(self):
if self._mean is None:
self._mean = self._ave()
return self._mean
@property
def pval(self):
if self._pval is None:
self._pval = self._test().copy()
return self._pval
@property
def qval(self, method="fdr_bh"):
if self._qval is None:
self._qval = self._correction(method=method).copy()
return self._qval
def log10_pval_clean(self, log10_threshold=-30):
"""
Return log10 transformed and cleaned p-values.
NaN p-values are set to one and p-values below log10_threshold
in log10 space are set to log10_threshold.
:param log10_threshold: minimal log10 p-value to return.
:return: Cleaned log10 transformed p-values.
"""
pvals = np.reshape(self.pval, -1).astype(dtype=np.float)
pvals = np.clip(
pvals,
np.nextafter(0, 1),
np.inf
)
log10_pval_clean = np.log(pvals) / np.log(10)
log10_pval_clean[np.isnan(log10_pval_clean)] = 1
log10_pval_clean = np.clip(log10_pval_clean, log10_threshold, 0, log10_pval_clean)
return log10_pval_clean
def log10_qval_clean(self, log10_threshold=-30):
"""
Return log10 transformed and cleaned q-values.
NaN p-values are set to one and q-values below log10_threshold
in log10 space are set to log10_threshold.
:param log10_threshold: minimal log10 q-value to return.
:return: Cleaned log10 transformed q-values.
"""
qvals = np.reshape(self.qval, -1).astype(dtype=np.float)
qvals = np.clip(
qvals,
np.nextafter(0, 1),
np.inf
)
log10_qval_clean = np.log(qvals) / np.log(10)
log10_qval_clean[np.isnan(log10_qval_clean)] = 1
log10_qval_clean = np.clip(log10_qval_clean, log10_threshold, 0, log10_qval_clean)
return log10_qval_clean
@abc.abstractmethod
def summary(self, **kwargs) -> pd.DataFrame:
pass
def _threshold_summary(
self,
res: pd.DataFrame,
qval_thres=None,
fc_upper_thres=None,
fc_lower_thres=None,
mean_thres=None
) -> pd.DataFrame:
"""
Reduce differential expression results into an output table with desired thresholds.
:param res: Unfiltered summary table.
:param qval_thres: Upper bound of corrected p-values for gene to be included.
:param fc_upper_thres: Upper bound of fold-change for gene to be included.
:param fc_lower_thres: Lower bound of fold-change p-values for gene to be included.
:param mean_thres: Lower bound of average expression for gene to be included.
:return: Filtered summary table.
"""
assert fc_lower_thres > 0 if fc_lower_thres is not None else True, "supply positive fc_lower_thres"
assert fc_upper_thres > 0 if fc_upper_thres is not None else True, "supply positive fc_upper_thres"
if qval_thres is not None:
qvals = res['qval'].values
qval_include = np.logical_not(np.isnan(qvals))
qval_include[qval_include] = qvals[qval_include] <= qval_thres
res = res.iloc[qval_include, :]
if fc_upper_thres is not None and fc_lower_thres is None:
res = res.iloc[res['log2fc'].values >= np.log(fc_upper_thres) / np.log(2), :]
elif fc_upper_thres is None and fc_lower_thres is not None:
res = res.iloc[res['log2fc'].values <= np.log(fc_lower_thres) / np.log(2), :]
elif fc_upper_thres is not None and fc_lower_thres is not None:
res = res.iloc[np.logical_or(
res['log2fc'].values <= np.log(fc_lower_thres) / np.log(2),
res['log2fc'].values >= np.log(fc_upper_thres) / np.log(2)), :]
if mean_thres is not None:
res = res.iloc[res['mean'].values >= mean_thres, :]
return res
def plot_volcano(
self,
corrected_pval=True,
log10_p_threshold=-30,
log2_fc_threshold=10,
alpha=0.05,
min_fc=1,
size=20,
highlight_ids: Union[List, Tuple] = (),
highlight_size: float = 30,
highlight_col: str = "red",
show: bool = True,
save: Union[str, None] = None,
suffix: str = "_volcano.png",
return_axs: bool = False
):
"""
Returns a volcano plot of p-value vs. log fold change
:param corrected_pval: Whether to use multiple testing corrected
or raw p-values.
:param log10_p_threshold: lower bound of log10 p-values displayed in plot.
:param log2_fc_threshold: Negative lower and upper bound of
log2 fold change displayed in plot.
:param alpha: p/q-value lower bound at which a test is considered
non-significant. The corresponding points are colored in grey.
:param min_fc: Fold-change lower bound for visualization,
the points below the threshold are colored in grey.
:param size: Size of points.
:param highlight_ids: Genes to highlight in volcano plot.
:param highlight_size: Size of points of genes to highlight in volcano plot.
:param highlight_col: Color of points of genes to highlight in volcano plot.
:param show: Whether (if save is not None) and where (save indicates dir and file stem) to display plot.
:param save: Path+file name stem to save plots to.
File will be save+suffix. Does not save if save is None.
:param suffix: Suffix for file name to save plot to. Also use this to set the file type.
:param return_axs: Whether to return axis objects.
:return: Tuple of matplotlib (figure, axis)
"""
import seaborn as sns
import matplotlib.pyplot as plt
plt.ioff()
if corrected_pval:
neg_log_pvals = - self.log10_qval_clean(log10_threshold=log10_p_threshold)
else:
neg_log_pvals = - self.log10_pval_clean(log10_threshold=log10_p_threshold)
logfc = np.reshape(self.log2_fold_change(), -1)
# Clipping throws errors if not performed in actual data format (ndarray or DataArray):
logfc = np.clip(logfc, -log2_fc_threshold, log2_fc_threshold, logfc)
fig, ax = plt.subplots()
is_significant = np.logical_and(
neg_log_pvals >= - np.log(alpha) / np.log(10),
np.abs(logfc) >= np.log(min_fc) / np.log(2)
)
sns.scatterplot(y=neg_log_pvals, x=logfc, hue=is_significant, ax=ax,
legend=False, s=size,
palette={True: "orange", False: "black"})
highlight_ids_found = np.array([x in self.gene_ids for x in highlight_ids])
highlight_ids_clean = [highlight_ids[i] for i in np.where(highlight_ids_found)[0]]
highlight_ids_not_found = [highlight_ids[i] for i in np.where(np.logical_not(highlight_ids_found))[0]]
if len(highlight_ids_not_found) > 0:
logger.warning("not all highlight_ids were found in data set: ", ", ".join(highlight_ids_not_found))
if len(highlight_ids_clean) > 0:
neg_log_pvals_highlights = np.zeros([len(highlight_ids_clean)])
logfc_highlights = np.zeros([len(highlight_ids_clean)])
is_highlight = np.zeros([len(highlight_ids_clean)])
for i, id_i in enumerate(highlight_ids_clean):
idx = np.where(self.gene_ids == id_i)[0]
neg_log_pvals_highlights[i] = neg_log_pvals[idx]
logfc_highlights[i] = logfc[idx]
sns.scatterplot(y=neg_log_pvals_highlights, x=logfc_highlights,
hue=is_highlight, ax=ax,
legend=False, s=highlight_size,
palette={0: highlight_col})
if corrected_pval:
ax.set(xlabel="log2FC", ylabel='-log10(corrected p-value)')
else:
ax.set(xlabel="log2FC", ylabel='-log10(p-value)')
# Save, show and return figure.
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return
def plot_ma(
self,
corrected_pval=True,
log2_fc_threshold=10,
min_mean=1e-4,
alpha=0.05,
size=20,
highlight_ids: Union[List, Tuple] = (),
highlight_size: float = 30,
highlight_col: str = "red",
show: bool = True,
save: Union[str, None] = None,
suffix: str = "_ma_plot.png",
return_axs: bool = False
):
"""
Returns an MA plot of mean expression vs. log fold change with significance
super-imposed.
:param corrected_pval: Whether to use multiple testing corrected
or raw p-values.
:param log2_fc_threshold: Negative lower and upper bound of
log2 fold change displayed in plot.
:param min_mean:
Lower bound for mean expression of plot. All values below this threshold
are updated to this threshold.
:param alpha: p/q-value lower bound at which a test is considered
non-significant. The corresponding points are colored in grey.
:param size: Size of points.
:param highlight_ids: Genes to highlight in volcano plot.
:param highlight_size: Size of points of genes to highlight in volcano plot.
:param highlight_col: Color of points of genes to highlight in volcano plot.
:param show: Whether (if save is not None) and where (save indicates dir and file stem) to display plot.
:param save: Path+file name stem to save plots to.
File will be save+suffix. Does not save if save is None.
:param suffix: Suffix for file name to save plot to. Also use this to set the file type.
:param return_axs: Whether to return axis objects.
:return: Tuple of matplotlib (figure, axis)
"""
import seaborn as sns
import matplotlib.pyplot as plt
assert min_mean >= 0, "min_mean must be positive"
plt.ioff()
ave = np.log(np.clip(
self.mean.astype(dtype=np.float),
np.max(np.array([np.nextafter(0, 1), min_mean])),
np.inf
))
logfc = np.reshape(self.log2_fold_change(), | |
self.helper_bones[cat].values():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
armature.pose.bones[prefix + bone].bone_group_index = 5
armature.data.bones[prefix + bone].layers[5] = True
armature.data.bones[prefix + bone].layers[0] = False
for container, bone in self.other_bones.items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
if container == 'attachment':
armature.pose.bones[prefix + bone].bone_group_index = 6
armature.data.bones[prefix + bone].layers[6] = True
armature.data.bones[prefix + bone].layers[0] = False
elif container == 'weapon':
armature.pose.bones[prefix + bone].bone_group_index = 7
armature.data.bones[prefix + bone].layers[7] = True
armature.data.bones[prefix + bone].layers[0] = False
else:
armature.pose.bones[prefix + bone].bone_group_index = 8
armature.data.bones[prefix + bone].layers[8] = True
armature.data.bones[prefix + bone].layers[0] = False
#Custom bones
for bone in self.custom_bones.values():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
armature.pose.bones[prefix + bone].bone_group_index = 9
armature.data.bones[prefix + bone].layers[9] = True
armature.data.bones[prefix + bone].layers[0] = False
#Reveals used layers
for i in [0,1,2,3,4,5,6,7,8, 9]:
armature.data.layers[i] = True
print("Bone groups set!")
def set_helper_bones(self):
vatproperties = bpy.context.scene.vatproperties
vatinfo = bpy.context.scene.vatinfo
armature = self.armature
new = False
for cat in self.helper_bones.keys():
for container, bone in self.helper_bones[cat].items():
if container == 'wrist' or container == 'ulna' or container == 'elbow' or container == 'knee' or container == 'quadricep' or container == 'shoulder' or container == 'thumbroot' or container == 'forearm_driven':
for index, bone in enumerate(bone):
if bone:
if index > 1:
break
prefix, bone = bone_convert(bone)
#Adds transforms to only these helper bones unless already existing
try:
armature.pose.bones[prefix + bone].constraints['Procedural Bone']
except:
transform = armature.pose.bones[prefix + bone].constraints.new('TRANSFORM')
new = True
#Initial parameters
transform.name = "Procedural Bone"
transform.target = self.armature
transform.map_from = 'ROTATION'
transform.map_to = 'ROTATION'
transform.target_space = 'LOCAL'
transform.owner_space = 'LOCAL'
#Hand rotation
if container == 'wrist' or container == 'ulna' or container == 'forearm_driven':
if vatinfo.special_viewmodel:
transform.from_min_y_rot = radians(-90)
transform.from_max_y_rot = radians(90)
else:
transform.from_min_x_rot = radians(-90)
transform.from_max_x_rot = radians(90)
prefix, bone = bone_convert(self.symmetrical_bones['arms']['hand'][index])
transform.subtarget = prefix + bone
if container == 'wrist':
transform.to_min_x_rot = radians(-75)
transform.to_max_x_rot = radians(75)
elif container == 'ulna':
if vatinfo.special_viewmodel:
transform.to_min_y_rot = radians(-50)
transform.to_max_y_rot = radians(50)
else:
transform.to_min_x_rot = radians(-50)
transform.to_max_x_rot = radians(50)
elif container == 'forearm_driven':
transform.to_min_x_rot = radians(-25)
transform.to_max_x_rot = radians(20)
#Forearm and thigh rotation
elif container == 'elbow' or container == 'knee' or container == 'quadricep':
if vatinfo.titanfall and container == 'elbow':
transform.from_min_y_rot = radians(-90)
transform.from_max_y_rot = radians(90)
transform.to_min_y_rot = radians(-45)
transform.to_max_y_rot = radians(45)
else:
transform.from_min_z_rot = radians(-90)
transform.from_max_z_rot = radians(90)
transform.to_min_z_rot = radians(-45)
transform.to_max_z_rot = radians(45)
if container == 'elbow':
prefix, bone = bone_convert(self.symmetrical_bones['arms']['forearm'][index])
transform.subtarget = prefix + bone
elif container == 'knee':
prefix, bone = bone_convert(self.symmetrical_bones['legs']['calf'][index])
transform.subtarget = prefix + bone
elif container == 'quadricep':
if not vatinfo.sbox:
prefix, bone = bone_convert(self.symmetrical_bones['legs']['thigh'][index])
transform.subtarget = prefix + bone
elif container == 'shoulder':
#Not for Titanfall characters
if not vatinfo.titanfall:
transform.from_min_y_rot = radians(-45)
transform.from_max_y_rot = radians(45)
#Nick exclusive
if self.helper_bones['arms']['wrist'] and self.helper_bones['arms']['wrist'][0] == 'h2.wrist':
transform.to_min_y_rot = radians(45)
transform.to_max_y_rot = radians(-45)
else:
transform.to_min_y_rot = radians(5)
transform.to_max_y_rot = radians(-5)
prefix, bone = bone_convert(self.symmetrical_bones['arms']['upperarm'][index])
transform.subtarget = prefix + bone
elif container == 'thumbroot':
transform.from_min_y_rot = radians(-45)
transform.from_max_y_rot = radians(45)
transform.from_min_z_rot = radians(-75)
transform.from_max_z_rot = radians(75)
if index == 0:
transform.to_min_y_rot = radians(30)
transform.to_max_y_rot = radians(-30)
else:
transform.to_min_y_rot = radians(-30)
transform.to_max_y_rot = radians(30)
transform.to_min_z_rot = radians(-45)
transform.to_max_z_rot = radians(45)
prefix, bone = bone_convert(self.symmetrical_bones['fingers']['finger0'][index])
transform.subtarget = prefix + bone
if new:
print("Procedural bones configured!")
if vatinfo.viewmodel:
vatproperties.bake_helper_bones = True
else:
vatproperties.bake_helper_bones = False
#Some functions (Namely creating new bones) do not add the newly created info to the object data until a mode change occurs at least once
def update(type, object=None):
if type == 0: #Simple update, used for making new bones show up in data
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')
elif type == 1 and object: #Used to work with edit_bones, since it's not possible to use in anything other than edit mode
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #You're required to be in edit mode to use 'data.edit_bones', else there will be no bone info given.
object.select_set(True)
bpy.context.view_layer.objects.active = object
bpy.ops.object.mode_set(mode='EDIT')
def convert_armature_to_source():
vatproperties = bpy.context.scene.vatproperties
pass
def generate_armature(type, action): #Creates or deletes the weight armature
vatinfo = bpy.context.scene.vatinfo
real_armature = bpy.data.armatures[arm.armature_real.name]
unit = vatinfo.unit
#Creation
if action == 0:
#Weight armature datablock
if type == 'weight':
arm.weight_armature_real = real_armature.copy()
arm.weight_armature_real.name = arm.armature_real.name + '.weight'
#Creation and link to current scene
arm.weight_armature = bpy.data.objects.new(arm.armature.name + '.weight', arm.weight_armature_real)
vatinfo.weight_armature = True
collection = arm.armature.users_collection[0]
collection.objects.link(arm.weight_armature)
armature = arm.weight_armature
#Animation armature datablock
elif type == 'anim':
arm.animation_armature_real = real_armature.copy()
arm.animation_armature_real.name = arm.armature_real.name + '.anim_setup'
#Creation and link to current scene
arm.animation_armature = bpy.data.objects.new(arm.armature.name + '.anim_setup', arm.animation_armature_real)
vatinfo.animation_armature = True
collection = arm.armature.users_collection[0]
collection.objects.link(arm.animation_armature)
armature = arm.animation_armature
#Focuses on newly created armature
update(1, armature)
##Unimportant bone removal##
#Removes bones such as weapon or attachment bones
if arm.other_bones:
for container, bone in arm.other_bones.items():
for bone in bone:
if bone:
if container == 'forward' or container == 'root' or container == 'ik' or bone == 'p2.ValveBiped':
prefix, bone = bone_convert(bone)
bone = armature.data.edit_bones[prefix + bone]
armature.data.edit_bones.remove(bone)
elif type == 'weight':
prefix, bone = bone_convert(bone)
bone = armature.data.edit_bones[prefix + bone]
armature.data.edit_bones.remove(bone)
#Keeps only the bare minimum bones for Rigify
if type == 'anim':
for cat in arm.helper_bones.keys():
for container, bone in arm.helper_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
armature.data.edit_bones.remove(ebone)
elif type == 'weight':
#Removes wrist helpers for viewmodels since i've never seen them used for anything and they mess with weight generation
for container, bone in arm.helper_bones['viewmodel'].items():
if container != 'thumbroot' and container != 'forearm_driven':
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
armature.data.edit_bones.remove(ebone)
##Setup for armatures, tweaking bone positions and the like##
arm.chainless_bones = []
arm.chain_start = []
#Temporal list with prefixes taken out
custom_bones = []
for cat in arm.custom_bones.keys():
for bone in arm.custom_bones[cat]:
if bone:
prefix, bone = bone_convert(bone)
custom_bones.append(bone)
#Custom bones, placed first so changes to the standard bones by them are overwritten later
for cat in arm.custom_bones.keys():
for bone in arm.custom_bones[cat]:
if bone:
prefix, bone2 = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone2]
pbone = armature.pose.bones[prefix + bone2]
marked = False
if ebone.parent:
parent = ebone.parent.name
if custom_bones.count(parent.replace(prefix, '')):
marked = True
parent = ebone.parent
#If bone's parent is not any of the default ones
if marked:
#Avoids Blender deleting the bone if the connection causes the child bone to have virtually 0 length
if ebone.tail != parent.tail and ebone.head != parent.head:
parent.tail = pbone.head
#Straightens the first bone of a line
if not ebone.children:
length = parent.length
parent.length = parent.length*2
ebone.tail = parent.tail
parent.length = length
if len(parent.children) < 2:
ebone.use_connect = True
if not ebone.use_connect and ebone.children:
arm.chain_start.append(bone)
else:
if not ebone.children:
arm.chainless_bones.append(bone)
if ebone.length < 0.3*unit:
pbone.rotation_quaternion[3] = -1
pbone.scale = 5,5,5
if not ebone.use_connect and ebone.children:
if type == 'anim':
pbone.rigify_type = 'basic.super_copy'
pbone.rigify_parameters.super_copy_widget_type = 'bone'
#arm.chain_start.append(bone)
#Isolated bones for the custom bones
if type == 'anim':
for cat in arm.custom_bones.keys():
for bone in arm.custom_bones[cat]:
if bone:
prefix, bone2 = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone2]
pbone = armature.pose.bones[prefix + bone2]
#Creates copy of bone that retains the original rotation for the retarget empties
isolatedbone = armature.data.edit_bones.new(prefix + bone2 + ".isolated")
isolatedbone.head = armature.pose.bones[prefix + bone2].head
isolatedbone.tail = armature.pose.bones[prefix + bone2].tail
isolatedbone.roll = armature.data.edit_bones[prefix + bone2].roll
isolatedbone.parent = armature.data.edit_bones[prefix + bone2]
isolatedbone.use_deform = False
isolatedbone.layers[28] = True
for i in range(0, 11):
isolatedbone.layers[i] = False
#Symmetrical bones
for cat in arm.symmetrical_bones.keys():
for container, bone in arm.symmetrical_bones[cat].items():
for index, bone in enumerate(bone):
if bone:
prefix, bone = bone_convert(bone)
if type == 'anim':
#Creates copy of bone that retains the original rotation for the retarget empties
if vatinfo.scheme == 0 and not vatinfo.sbox:
bone2 = armature_rename.bone_rename(1, bone, index)
isolatedbone = armature.data.edit_bones.new(prefix + bone2 + ".isolated")
else:
isolatedbone = armature.data.edit_bones.new(prefix + bone + ".isolated")
isolatedbone.head = armature.pose.bones[prefix + bone].head
isolatedbone.tail = armature.pose.bones[prefix + bone].tail
isolatedbone.roll = armature.data.edit_bones[prefix + bone].roll
isolatedbone.use_deform = False
isolatedbone.layers[28] = True
for i in range(0, 11):
| |
import sys
import pytest
import numpy as np
from msl.io.dataset import Dataset
def test_instantiate():
dset = Dataset(name='/data', parent=None, is_read_only=True, shape=(10, 10))
assert dset.name == '/data'
assert len(dset) == 10
assert dset.size == 100
assert dset.dtype == float
assert dset.dtype.names is None
dset = Dataset(name='dataset 1', parent=None, is_read_only=True, shape=(100,), dtype=int)
assert dset.name == 'dataset 1'
assert len(dset) == 100
assert dset.size == 100
assert dset.dtype == int
assert dset.dtype.names is None
dset = Dataset(name='mixed', parent=None, is_read_only=True, shape=(100,), dtype=[('x', float), ('y', int), ('z', str)])
assert dset.name == 'mixed'
assert len(dset) == 100
assert dset.size == 100
assert len(dset['x']) == 100
assert dset.dtype[0] == float
assert len(dset['y']) == 100
assert dset.dtype[1] == int
assert len(dset['z']) == 100
assert dset.dtype[2] == str
assert dset.dtype.names == ('x', 'y', 'z')
dset = Dataset(name='xxx', parent=None, is_read_only=True, data=[1, 2, 3])
assert len(dset) == 3
assert dset[0] == 1
assert dset[1] == 2
assert dset[2] == 3
d = dset[:]
assert len(d) == 3
assert d[0] == 1
assert d[1] == 2
assert d[2] == 3
d = dset[::2]
assert len(d) == 2
assert d[0] == 1
assert d[1] == 3
def test_metadata():
dset = Dataset(name='d', parent=None, is_read_only=False, shape=(100,),
dtype=int, order='F', temperature=21.3, lab='msl', x=-1)
# 'name' is absorbed by Vertex
# 'shape', 'dtype' and 'order' are kwargs that are absorbed by numpy
assert len(dset.metadata) == 3
assert dset.metadata['temperature'] == 21.3
assert dset.metadata['lab'] == 'msl'
assert dset.metadata['x'] == -1
assert not dset.metadata.is_read_only
dset.add_metadata(one=1, two=2, three=3)
assert len(dset.metadata) == 6
assert dset.metadata['one'] == 1
assert dset.metadata['two'] == 2
assert dset.metadata['three'] == 3
def test_field_access_as_attribute():
# no names defined in the dtype
dset = Dataset(name='data', parent=None, is_read_only=False, shape=(3, 3))
assert len(dset) == 3
assert dset.shape == (3, 3)
assert dset.dtype == float
assert dset.dtype.names is None
with pytest.raises(AttributeError):
_ = dset.there_are_no_field_names
# names are defined in the dtype
dset = Dataset(name='data', parent=None, is_read_only=False, shape=(100,),
dtype=[('x', float), ('y', int), ('z', str)])
assert len(dset['x']) == 100
assert len(dset.x) == 100
assert len(dset['y']) == 100
assert len(dset.y) == 100
dset.y[:] = 1
for val in dset.y:
assert val == 1
dset.x = np.arange(100, 200)
assert np.array_equal(dset.x + dset.y, np.arange(101, 201))
assert len(dset['z']) == 100
assert len(dset.z) == 100
assert dset['z'][0] == ''
assert dset.z[0] == ''
assert dset.dtype.names == ('x', 'y', 'z')
def test_read_only():
dset = Dataset(name='<NAME>', parent=None, is_read_only=True, shape=(100,), dtype=int)
assert dset.name == '<NAME>'
assert len(dset) == 100
assert dset.is_read_only
assert dset.metadata.is_read_only
# cannot modify data
with pytest.raises(ValueError):
dset[:] = 1
# cannot modify data
with pytest.raises(ValueError):
dset[0] = 1
# make writable
dset.is_read_only = False
assert not dset.is_read_only
assert not dset.metadata.is_read_only
# can modify data
dset[:] = 1
assert dset[0] == 1
# make read only again
dset.is_read_only = True
assert dset.is_read_only
assert dset.metadata.is_read_only
# cannot modify data
with pytest.raises(ValueError):
dset[:] = 1
# can make a dataset writeable but the metadata read-only
dset.is_read_only = False
assert not dset.is_read_only
assert not dset.metadata.is_read_only
dset.metadata.is_read_only = True
assert not dset.is_read_only
assert dset.metadata.is_read_only
dset[:] = 1
with pytest.raises(ValueError):
dset.add_metadata(some_more_info=1)
def test_copy():
orig = Dataset(name='abcdefg', parent=None, is_read_only=True, shape=(10,), dtype=int, voltage=1.2, current=5.3)
assert orig.is_read_only
assert orig.name == 'abcdefg'
copy = orig.copy()
assert isinstance(copy, Dataset)
assert copy.is_read_only
assert copy.metadata.is_read_only
assert copy.name == 'abcdefg'
for i in range(10):
assert orig[i] == copy[i]
assert orig.metadata['voltage'] == copy.metadata['voltage']
assert orig.metadata['current'] == copy.metadata['current']
copy.is_read_only = False
assert not copy.is_read_only
assert not copy.metadata.is_read_only
assert orig.is_read_only
assert orig.metadata.is_read_only
val = 7 if orig[1] != 7 else 8
copy[1] = val
assert copy[1] == val
assert orig[1] != copy[1]
def test_string_representation():
dset = Dataset(name='abcd', parent=None, data=[[1, 2], [3, 4]], is_read_only=True, foo='bar')
assert repr(dset) in ["<Dataset 'abcd' shape=(2, 2) dtype='<f8' (1 metadata)>",
"<Dataset 'abcd' shape=(2L, 2L) dtype='<f8' (1 metadata)>"]
assert str(dset) == ('array([[1., 2.],\n'
' [3., 4.]])')
assert str(dset.metadata) == "<Metadata 'abcd' {'foo': 'bar'}>"
# just for fun, test more index access
assert dset[0, 0] + dset[0, 1] == 3
assert all(dset[:, 0] + dset[:, 1] == [3, 7])
assert all(dset[0, :] + dset[1, :] == [4, 6])
def test_ndarray_attribute():
dset = Dataset(name='abcd', parent=None, data=[[1, 2], [3, 4]], is_read_only=True)
as_list = dset.tolist()
assert isinstance(as_list, list)
assert dset.tolist() == [[1, 2], [3, 4]]
assert dset.flatten().tolist() == [1, 2, 3, 4]
assert dset.max() == 4
assert dset.min() == 1
assert all(dset.max(axis=1) == [2, 4])
assert all(dset.max(axis=0) == [3, 4])
def test_scalar():
dset = Dataset(name='abcd', parent=None, data=5, is_read_only=True)
assert len(dset) == 1
assert dset.shape == ()
assert dset.size == 1
assert dset.data == 5.0
def test_add():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[1, 2, 3])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[4, 5, 6])
for rhs in ([4, 5, 6], d2):
result = d1 + rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([5., 7., 9.]))
for lhs in ([4, 5, 6], d2):
result = lhs + d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([5., 7., 9.]))
def test_sub():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[1, 2, 3])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[4, 5, 6])
for rhs in ([4, 5, 6], d2):
result = d1 - rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([-3., -3., -3.]))
for lhs in ([4, 5, 6], d2):
result = lhs - d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([3., 3., 3.]))
def test_mul():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[1, 2, 3])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[4, 5, 6])
for rhs in ([4, 5, 6], d2):
result = d1 * rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([4., 10., 18.]))
for lhs in ([4, 5, 6], d2):
result = lhs * d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([4., 10., 18.]))
def test_truediv():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[1, 2, 1])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[4, 4, 10])
for rhs in ([4., 4., 10.], d2):
result = d1 / rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([0.25, 0.5, 0.1]))
for lhs in ([4., 4., 10.], d2):
result = lhs / d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([4., 2., 10.]))
def test_floordiv():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[1e3, 1e4, 1e5])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[1e2, 1e3, 1e4])
for rhs in ([1e2, 1e3, 1e4], d2):
result = d1 // rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([10., 10., 10.]))
for lhs in ([1e2, 1e3, 1e4], d2):
result = lhs // d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([0., 0., 0.]))
def test_pow():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[1, 2, 3])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[4, 5, 6])
result = d1 ** 3
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([1., 8., 27.]))
result = pow(d1, 3)
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([1., 8., 27.]))
result = 3 ** d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([3., 9., 27.]))
result = pow(3, d1)
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([3., 9., 27.]))
for rhs in ([4., 5., 6.], d2):
result = d1 ** rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([1., 32., 729.]))
result = pow(d1, rhs)
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([1., 32., 729.]))
for lhs in ([4., 5., 6.], d2):
result = lhs ** d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([4., 25., 216.]))
result = pow(lhs, d1)
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([4., 25., 216.]))
@pytest.mark.skipif(sys.version_info[:2] < (3, 5), reason='the @ operator requires Python 3.5+')
def test_matmul():
import dataset_matmul
dataset_matmul.run()
def test_mod():
d = Dataset(name='/d', parent=None, is_read_only=True, data=list(range(7)))
result = d % 5
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([0, 1, 2, 3, 4, 0, 1]))
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[4, 7])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=[2, 3])
for rhs in ([2, 3], d2):
result = d1 % rhs
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([0, 1]))
for lhs in ([2, 3], d2):
result = lhs % d1
assert isinstance(result, np.ndarray)
assert np.array_equal(result, np.array([2, 3]))
def test_divmod():
d1 = Dataset(name='/d1', parent=None, is_read_only=True, data=[3, 7, 12, 52, 62])
d2 = Dataset(name='/d2', parent=None, is_read_only=True, data=np.arange(1, 6))
for rhs in ([1, 2, 3, 4, 5], d2):
div, mod = divmod(d1, rhs)
assert isinstance(div, np.ndarray)
assert np.array_equal(div, np.array([3, 3, 4, 13, 12]))
assert isinstance(mod, np.ndarray)
assert np.array_equal(mod, np.array([0, 1, 0, 0, 2]))
for lhs in ([1, 2, 3, 4, 5], d2):
div, mod = divmod(lhs, d1)
assert isinstance(div, np.ndarray)
assert np.array_equal(div, np.array([0, 0, 0, | |
"""
Some commonly used functions for defining a task.
"""
from __future__ import division
import numpy as np
import pdb
from sklearn.metrics import r2_score
#-----------------------------------------------------------------------------------------
# Define E/I populations
#-----------------------------------------------------------------------------------------
def generate_ei(N, pE=0.8):
"""
E/I signature.
Parameters
----------
N : int
Number of recurrent units.
pE : float, optional
Fraction of units that are excitatory. Default is the usual value for cortex.
"""
assert 0 <= pE <= 1
Nexc = int(pE*N)
Ninh = N - Nexc
idx = range(N)
EXC = idx[:Nexc]
INH = idx[Nexc:]
ei = np.ones(N, dtype=int)
ei[INH] *= -1
return ei, EXC, INH
#-----------------------------------------------------------------------------------------
# Functions for defining task epochs
#-----------------------------------------------------------------------------------------
def get_idx(t, interval):
start, end = interval
return list(np.where((start < t) & (t <= end))[0])
def get_epochs_idx(dt, epochs):
t = np.linspace(dt, epochs['T'], int(epochs['T']/dt))
#assert t[1] - t[0] == dt, "[ tasktools.get_epochs_idx ] dt doesn't fit into T."
return t, {k: get_idx(t, v) for k, v in epochs.items() if k != 'T'}
#-----------------------------------------------------------------------------------------
# Functions for generating epoch durations that are multiples of the time step
#-----------------------------------------------------------------------------------------
def uniform(rng, dt, xmin, xmax):
return (rng.uniform(xmin, xmax)//dt)*dt
def truncated_exponential(rng, dt, mean, xmin=0, xmax=np.inf):
while True:
x = rng.exponential(mean)
if xmin <= x < xmax:
return (x//dt)*dt
def truncated_normal(rng, dt, mean, sigma, xmin=-np.inf, xmax=np.inf):
while True:
x = rng.normal(mean, sigma)
if xmin <= x < xmax:
return (x//dt)*dt
#-----------------------------------------------------------------------------------------
# Functions for generating orientation tuning curves
#-----------------------------------------------------------------------------------------
def deg2rad(s):
return s*np.pi/180
def vonMises(s, spref, g=1, kappa=5, b=0, convert=True):
arg = s - spref
if convert:
arg = deg2rad(arg)
return g*np.exp(kappa*(np.cos(arg)-1)) + b
#-----------------------------------------------------------------------------------------
# Convert batch index to condition
#-----------------------------------------------------------------------------------------
def unravel_index(b, dims):
return np.unravel_index(b, dims, order='F')
#-----------------------------------------------------------------------------------------
# Functions for generating connection matrices
#-----------------------------------------------------------------------------------------
def generate_Crec(ei, p_exc=1, p_inh=1, rng=None, seed=1, allow_self=False):
if rng is None:
rng = np.random.RandomState(seed)
N = len(ei)
exc, = np.where(ei > 0)
inh, = np.where(ei < 0)
C = np.zeros((N, N))
for i in exc:
C[i,exc] = 1*(rng.uniform(size=len(exc)) < p_exc)
if not allow_self:
C[i,i] = 0
C[i,inh] = 1*(rng.uniform(size=len(inh)) < p_inh)
C[i,inh] *= np.sum(C[i,exc])/np.sum(C[i,inh])
for i in inh:
C[i,exc] = 1*(rng.uniform(size=len(exc)) < p_exc)
C[i,inh] = 1*(rng.uniform(size=len(inh)) < p_inh)
if not allow_self:
C[i,i] = 0
C[i,inh] *= np.sum(C[i,exc])/np.sum(C[i,inh])
C /= np.linalg.norm(C, axis=1)[:,np.newaxis]
return C
#-----------------------------------------------------------------------------------------
# Callbacks
#-----------------------------------------------------------------------------------------
def correct_2afc_bias(trials, z, rmin=0.45, rmax=0.55):
"""
Use to correct bias in the psychometric curve.
"""
ends = [len(trial['t'])-1 for trial in trials]
choices = [np.argmax(z[ends[i],i]) for i, end in enumerate(ends)]
r = choices.count(0)/choices.count(1)
x = max(min(1/(1 + r), rmax), rmin)
print(r, [x, 1-x])
#return None
return [x, 1-x]
#-----------------------------------------------------------------------------------------
# Performance measure
#-----------------------------------------------------------------------------------------
def performance_2afc(trials, z):
ends = [len(trial['t'])-1 for trial in trials]
choices = [np.argmax(z[ends[i],i]) for i, end in enumerate(ends)]
correct = [choice == trial['info']['choice']
for choice, trial in zip(choices, trials) if trial['info']]
return 100*sum(correct)/len(correct)
def performance_2afc_min_condition(trials, z):
ends = [len(trial['t'])-1 for trial in trials]
choices = [np.argmax(z[ends[i],i]) for i, end in enumerate(ends)]
correct = {}
for choice, trial in zip(choices, trials):
if not trial['info']:
continue
cond = tuple(trial['info'].values())
correct.setdefault(cond, []).append(choice == trial['info']['choice'])
correct = [sum(c)/len(c) for c in correct.values()]
return 100*min(correct)
def performance_cb(trials, z):
avg_vel = np.mean(z, axis=0)
endPoints = [np.cumsum(z[:,i])[-1] for i in np.arange(len(trials))]
num_correct = 0
num_trials = 0
for (i, trial) in enumerate(trials):
if not trial['info']:
continue
choice = trial['info']['choice']
if choice:
inWindow = endPoints[i] / 100 * trials[i]['info']['dt'] > 80 and endPoints[i] / 100 * trials[i]['info']['dt'] < 120
num_correct += inWindow
else:
inWindow = endPoints[i] / 100 * trials[i]['info']['dt'] < -80 and endPoints[i] / 100 * trials[i]['info']['dt'] > -120
num_correct += inWindow
num_trials += 1
return 100*num_correct / num_trials
def performance_cb_simple(trials, z):
post_delay = trials[0]['info']['post_delay']
dt = trials[0]['info']['dt']
ends = [len(trial['t'])-1 for trial in trials]
# The 50 here checks 50 ms before the post_delay period.
choices = [z[ends[i] - (50 + post_delay) // dt - 1, i][0] for i, end in enumerate(ends)]
num_correct = float(0)
num_trials = 0
for (i, trial) in enumerate(trials):
if trial['info']['catch']:
continue
choice = trial['info']['choice']
#if np.abs(choices[i] - choice) < 0.3:
# num_correct += 1
if np.sign(choices[i]) == choice:
num_correct += 1
num_trials += 1
return 100 * num_correct / num_trials
def performance_cb_simple_threshold(trials, z):
threshold = 0.5
post_delay = trials[0]['info']['post_delay']
dt = trials[0]['info']['dt']
ends = [len(trial['t'])-1 for trial in trials]
# The 50 here checks 50 ms before the post_delay period.
choices = [z[ends[i] - (50 + post_delay) // dt - 1, i][0] for i, end in enumerate(ends)]
num_correct = float(0)
num_trials = 0
for (i, trial) in enumerate(trials):
if trial['info']['catch']:
continue
choice = trial['info']['choice']
#if np.abs(choices[i] - choice) < 0.3:
# num_correct += 1
if np.sign(choices[i]) == choice & np.abs(choices[i]) > threshold:
num_correct += 1
num_trials += 1
return 100 * num_correct / num_trials
def performance_cb_simple_racers(trials, z):
post_delay = trials[0]['info']['post_delay']
dt = trials[0]['info']['dt']
ends = [len(trial['t'])-1 for trial in trials]
# The 50 here checks 50 ms before the post_delay period.
choices = [np.argmax(z[ends[i] - (50 + post_delay) // dt - 1, i]) for i, end in enumerate(ends)]
choices = np.array(choices) * 2 - 1
correct_choices = [trial['info']['choice'] for trial in trials if not trial['info']['catch']]
correct = [choice == trial['info']['choice'] for choice, trial in zip(choices, trials) if not trial['info']['catch']]
return 100 * sum(correct) / len(correct)
def performance_cb_simple_racers_cond(trials, z):
post_delay = trials[0]['info']['post_delay']
dt = trials[0]['info']['dt']
ends = [len(trial['t'])-1 for trial in trials]
# The 50 here checks 50 ms before the post_delay period.
choices = [np.argmax(z[ends[i] - (50 + post_delay) // dt - 1, i]) for i, end in enumerate(ends)]
choices = np.array(choices) * 2 - 1
correct_choices = [trial['info']['choice'] for trial in trials if not trial['info']['catch']]
correct = [choice == trial['info']['choice'] for choice, trial in zip(choices, trials) if not trial['info']['catch']]
num_left = np.sum(np.array(correct_choices) == -1)
num_right = np.sum(np.array(correct_choices) == 1)
correct_left = [choice == trial['info']['choice'] and trial['info']['choice'] == -1 for choice, trial in zip(choices, trials) if not trial['info']['catch']]
correct_right = [choice == trial['info']['choice'] and trial['info']['choice'] == 1 for choice, trial in zip(choices, trials) if not trial['info']['catch']]
return np.min((100 * np.sum(correct_left) / num_left, 100 * np.sum(correct_right) / num_right))
def performance_cb_simple_racers_cond_thresh(trials, z):
thresh = 0.6
post_delay = trials[0]['info']['post_delay']
dt = trials[0]['info']['dt']
ends = [len(trial['t'])-1 for trial in trials]
# The 50 here checks 50 ms before the post_delay period.
values = [z[ends[i] - (50 + post_delay) // dt - 1, i, np.argmax(z[ends[i] - (50 + post_delay) // dt - 1, i])] for i, end in enumerate(ends)]
choices = [np.argmax(z[ends[i] - (50 + post_delay) // dt - 1, i]) for i, end in enumerate(ends)]
choices = np.array(choices) * 2 - 1
correct_choices = [trial['info']['choice'] for trial in trials if not trial['info']['catch']]
correct = [choice == trial['info']['choice'] for choice, trial in zip(choices, trials) if not trial['info']['catch']]
num_left = np.sum(np.array(correct_choices) == -1)
num_right = np.sum(np.array(correct_choices) == 1)
correct_left = [choice == trial['info']['choice'] and trial['info']['choice'] == -1 and value > thresh for choice, trial, value in zip(choices, trials, values) if not trial['info']['catch']]
correct_right = [choice == trial['info']['choice'] and trial['info']['choice'] == 1 and value > thresh for choice, trial, value in zip(choices, trials, values) if not trial['info']['catch']]
return np.min((100 * np.sum(correct_left) / num_left, 100 * np.sum(correct_right) / num_right))
def get_targets(cond):
x = np.cos(cond * np.pi / 180)
y = np.sin(cond * np.pi / 180)
return (x,y)
def get_outputs(cond, movement_time, dt):
targ_x, targ_y = get_targets(cond)
out_x = np.zeros((movement_time))
out_y = np.zeros((movement_time))
# generate now the velocity profile -- this is a normal distribution centered around reach_time // 2
rt = 150
reach_time = 500
peak_vel = reach_time // 2
vel_var = (reach_time // 7)**2 # 3 stds on either side
t = np.arange(reach_time)
vel_profile = 1/np.sqrt(2*np.pi*vel_var) * np.exp(-(t - peak_vel)** 2 / (2*vel_var))
pos_profile = np.cumsum(vel_profile)
# normalize vel_profile now
vel_profile = vel_profile * 1 / np.max(vel_profile)
# this is the reach part of the trace
out_x[rt:rt+reach_time] = targ_x * pos_profile
out_y[rt:rt+reach_time] = targ_y * pos_profile
out_x[rt+reach_time:] = targ_x
out_y[rt+reach_time:] = targ_y
# this is the velocity part of the trace
vout_x = np.zeros((movement_time))
vout_y = np.zeros((movement_time))
vout_x[rt:rt+reach_time] = np.cos(cond * np.pi / 180) * vel_profile
vout_y[rt:rt+reach_time] = np.sin(cond * np.pi / 180) * vel_profile
return (vout_x[::dt], vout_y[::dt], out_x[::dt], out_y[::dt])
def performance_cora_r2(trials, z):
# z.shape | |
import random
from sys import exit
game_state = {
'discarded': [],
'active': {},
'colors': ['blue', 'green', 'red', 'white', 'yellow'],
'hints': 8,
'fuses': 3,
'game_over': False,
'current_player': 0,
'deck': [],
'hand_size': 5,
'recent_draw_index': -1
}
for c in game_state['colors']:
game_state['active'][c] = [] # List of cards at that index
def flatten(l):
return [item for sublist in l for item in sublist]
class Game:
def __init__(self, players):
self.last_turn = False
self.players = players
self.max_playable = 25
# Possible moves during a turn
def give_hint(self, value, color): # Pass None for one since only one piece may be given
if game_state['hints'] > 0:
game_state['hints'] -= 1
if value is None and color is not None or value is not None and color is None:
if value is None:
self.add_information(color, None, self.players[self.other_player_number()])
if color is None:
self.add_information(None, value, self.players[self.other_player_number()])
else:
print("Too much or not enough hint information")
self.change_player()
return True
else:
print("No tokens available to give hint")
return False
@staticmethod
def add_information(color, value, player):
for i in range(len(player.cards_known)):
if color is not None and player.hand[i].color == color:
player.cards_known[i].color = color
if value is not None and player.hand[i].value == value:
player.cards_known[i].value = value
def discard(self, player, card_index):
if game_state['hints'] < 8 and card_index in range(len(player.hand)):
game_state['hints'] += 1
game_state['discarded'].append(player.hand[card_index])
player.cards_known[card_index] = None
player.hand[card_index] = None
game_state['recent_draw_index'] = card_index
self.draw(player)
self.change_player()
return True
else:
print("Error on discard with " + str(game_state['hints']) + " tokens or " + str(card_index) + " not a valid index")
return False
# player = one playing the card
# card_index = which card in player hand
# pile = where to play that card, is a color
def play(self, player, card_index, pile):
if card_index in range(game_state['hand_size']):
# if the card being played is one greater than the last card on that pile,
# AND they're the same color, we play it
if player.hand[card_index].value is 1 or not game_state['active'][pile] \
or (game_state['active'][pile][-1].value is (player.hand[card_index].value - 1) and pile is player.hand[card_index].color):
game_state['active'][pile].append(player.hand[card_index])
player.cards_known[card_index] = None
player.hand[card_index] = None
game_state['recent_draw_index'] = card_index
self.draw(player)
else:
game_state['fuses'] -= 1
cur_fuses = 3 - game_state['fuses']
print("Play invalid: either value or color does not follow\nIgniting fuse number " + str(cur_fuses) + "...")
self.change_player()
return True
else:
print("card not in player's hand")
return False
@staticmethod
def draw(player):
if len(game_state['deck']) > 0:
new_card = game_state['deck'].pop()
index_changed = game_state['recent_draw_index']
player.hand[index_changed] = new_card
player.cards_known[index_changed] = Card(None, None)
return True
else:
print("Game should have already ended")
return False
@staticmethod
def is_over():
answer = False
if game_state['fuses'] is 0:
game_state['game_over'] = True
answer = True
if len(game_state['deck']) is 0:
answer = True
if len(sum(game_state['active'].values(), [])) == len(game_state['colors']) * 5:
game_state['game_over'] = True
answer = True
# Calculate final score if the game is over
if answer:
score_sum = 0
for color in game_state['colors']:
score_sum += max(get_values(game_state['active'][color]) if game_state['active'][color] else [0])
print("GAME OVER\nFinal score is " + str(score_sum))
return answer
def change_player(self):
game_state['current_player'] = ((game_state['current_player'] + 1) % len(self.players))
def other_player_number(self):
return (game_state['current_player'] + 1) % len(self.players)
# We can specialize our decisions based on how far along we are in the game
def is_early(self):
return 25 >= 25 - self.get_active_card_count() >= 17
def is_mid(self):
return 16 >= 25 - self.get_active_card_count() >= 10
def is_late(self):
return 9 >= 25 - self.get_active_card_count() >= 0
# Returns how many cards are in the piles
@staticmethod
def get_active_card_count():
return len(flatten(game_state['active'].values()))
# Returns the number of cards needed of a given value in the current active cards across all piles
@staticmethod
def n_value_needed(v):
if v < 1 or v > 5:
print("Value Error: Card value does not exist.")
return None
# For example, if we need a 4, that means there are 3's on top of a pile, so we subtract 1
v -= 1
n = 0
for color in game_state['colors']:
n = n + (1 if game_state['active'][color][-1].value is v else 0)
return n
class Card:
def __init__(self, color, value):
self.color = color
self.value = value
def __str__(self):
return self.color + " - " + str(self.value)
def __eq__(self, other):
if self is None or other is None:
return False
return self.color is other.color and self.value is other.value
def get_values(cards):
ret = []
for card in cards:
ret.append(card.value)
return ret
class Player:
def __init__(self, number):
self.hand = []
self.cards_known = []
self.number = number
self.initial_draw()
def num_cards(self, color, value):
if color is not None and value is not None:
count = 0
for card in self.hand:
if card == Card(color, value):
count += 1
return count
if color is None and value is not None:
count = 0
for card in self.hand:
if card.value == value:
count += 1
return count
if color is not None and value is None:
count = 0
for card in self.hand:
if card.color is color:
count += 1
return count
return -1
def num_known_cards(self, color, value):
if color is not None and value is not None:
count = 0
for card in self.cards_known:
if card == Card(color, value):
count += 1
return count
if color is None and value is not None:
count = 0
for card in self.cards_known:
if card.value == value:
count += 1
return count
if color is not None and value is None:
count = 0
for card in self.cards_known:
if card.color is color:
count += 1
return count
return -1
def print_hand(self):
i = 0
for card in self.cards_known:
print("Card at index " + str(i) + ": " +
card.color if card.color is not None else "Unknown color - " + str(card.value) if card.value is not None else "Unknown value")
i += 1
def print_full_hand(self):
for card in self.hand:
print(card + "\n")
# Draw 5 at the start of the game
def initial_draw(self):
for _ in range(game_state['hand_size']):
self.hand.append(game_state['deck'].pop())
self.cards_known.append(Card(None, None))
class AIPlayer(Player):
# NEVER LET THE AI SEE THEIR OWN HAND
def __init__(self, number):
super().__init__(number)
self.actions = ['p', 'h', 'd']
def ai_decide_initial_action(self):
potential_play = self.have_playable_card()
decision = -1
# Play if we have full information on a valid card. This is always the optimal play and so it has priority
if potential_play is not None:
return self.actions[0]
if game_state['hints'] >= 4 and not self.is_cards_known_complete():
return self.actions[1]
# If we have no hint tokens and we have no plays, we are practically forced to discard
# Improvements would take into account late game and fuse count to guess a likely (say, 50% + 15%*fuse count)
# This makes it more careful the closer we are to losing by igniting all the fuses. For example, It will not
# Guess here if there is only one fuse remaining unless it is 90% certain that it would be a successful play
if game_state['hints'] is 0 and potential_play is None:
decision = 2
decision = 1 if decision is -1 and game_state['hints'] > 0 else 2
return self.actions[decision]
def ai_decide_action_play_card(self):
play = self.have_playable_card()
index_of_play = self.hand.index(play)
return index_of_play, play.color
@staticmethod
def ai_decide_action_give_hint(game):
random.seed()
# Randomly pick if we should give a color hint or a value hint
if random.random() > 0.6:
# Give color hint
rand_color = game_state['colors'][random.randint(0, len(game_state['colors']) - 1)]
while game.players[game.other_player_number()].num_cards(rand_color, None) <= 0 < game.players[game.other_player_number()].num_known_cards(rand_color, None):
rand_color = game_state['colors'][random.randint(0, len(game_state['colors']) - 1)]
return None, rand_color
else:
weighted_list = [1, 1, 1, 2, 2, 3, 3, 4, 4, 5]
rand_value = weighted_list[random.randint(0, 9)]
while game.players[game.other_player_number()].num_cards(None, rand_value) <= 0 < game.players[game.other_player_number()].num_known_cards(None, rand_value):
rand_value = weighted_list[random.randint(0, 9)]
return rand_value, None
# Give value hint
def ai_decide_action_discard_card(self):
if self.get_first_useless() is not None:
index_to_discard = self.hand.index(self.get_first_useless())
return index_to_discard
else:
return random.randint(0, 4)
def have_playable_card(self):
for card in self.cards_known:
# If we have full info on a card
if card.color is not None and card.value is not None:
if card.value is 1 or not game_state['active'][card.color]:
return card
else:
# and if that card has a valid position to play on
active_card = game_state['active'][card.color][-1]
if active_card.value is card.value - 1:
# return it to play on
return card
# Also need to perform process of elimination | |
the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@return: A deferred that fires with a C{list} of affiliations as
C{tuple}s of (node identifier as C{unicode}, affiliation state
as C{str}). The affiliation can be C{'owner'}, C{'publisher'},
or C{'outcast'}.
@rtype: L{defer.Deferred}
"""
def create(requestor, service, nodeIdentifier):
"""
Called when a node creation request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The suggestion for the identifier of the node to
be created. If the request did not include a
suggestion for the node identifier, the value
is C{None}.
@type nodeIdentifier: C{unicode} or C{NoneType}
@return: A deferred that fires with a C{unicode} that represents
the identifier of the new node.
@rtype: L{defer.Deferred}
"""
def getConfigurationOptions():
"""
Retrieve all known node configuration options.
The returned dictionary holds the possible node configuration options
by option name. The value of each entry represents the specifics for
that option in a dictionary:
- C{'type'} (C{str}): The option's type (see
L{Field<wokkel.data_form.Field>}'s doc string for possible values).
- C{'label'} (C{unicode}): A human readable label for this option.
- C{'options'} (C{dict}): Optional list of possible values for this
option.
Example::
{
"pubsub#persist_items":
{"type": "boolean",
"label": "Persist items to storage"},
"pubsub#deliver_payloads":
{"type": "boolean",
"label": "Deliver payloads with event notifications"},
"pubsub#send_last_published_item":
{"type": "list-single",
"label": "When to send the last published item",
"options": {
"never": "Never",
"on_sub": "When a new subscription is processed"}
}
}
@rtype: C{dict}.
"""
def getDefaultConfiguration(requestor, service, nodeType):
"""
Called when a default node configuration request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeType: The type of node for which the configuration is
retrieved, C{'leaf'} or C{'collection'}.
@type nodeType: C{str}
@return: A deferred that fires with a C{dict} representing the default
node configuration. Keys are C{str}s that represent the
field name. Values can be of types C{unicode}, C{int} or
C{bool}.
@rtype: L{defer.Deferred}
"""
def getConfiguration(requestor, service, nodeIdentifier):
"""
Called when a node configuration retrieval request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The identifier of the node to retrieve the
configuration from.
@type nodeIdentifier: C{unicode}
@return: A deferred that fires with a C{dict} representing the node
configuration. Keys are C{str}s that represent the field name.
Values can be of types C{unicode}, C{int} or C{bool}.
@rtype: L{defer.Deferred}
"""
def setConfiguration(requestor, service, nodeIdentifier, options):
"""
Called when a node configuration change request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The identifier of the node to change the
configuration of.
@type nodeIdentifier: C{unicode}
@return: A deferred that fires with C{None} when the node's
configuration has been changed.
@rtype: L{defer.Deferred}
"""
def items(requestor, service, nodeIdentifier, maxItems, itemIdentifiers):
"""
Called when a items retrieval request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The identifier of the node to retrieve items
from.
@type nodeIdentifier: C{unicode}
"""
def retract(requestor, service, nodeIdentifier, itemIdentifiers):
"""
Called when a item retraction request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The identifier of the node to retract items
from.
@type nodeIdentifier: C{unicode}
"""
def purge(requestor, service, nodeIdentifier):
"""
Called when a node purge request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The identifier of the node to be purged.
@type nodeIdentifier: C{unicode}
"""
def delete(requestor, service, nodeIdentifier):
"""
Called when a node deletion request has been received.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The entity the request was addressed to.
@type service: L{jid.JID}
@param nodeIdentifier: The identifier of the node to be delete.
@type nodeIdentifier: C{unicode}
"""
class IPubSubResource(Interface):
def locateResource(request):
"""
Locate a resource that will handle the request.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
"""
def getInfo(requestor, service, nodeIdentifier):
"""
Get node type and meta data.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The publish-subscribe service entity.
@type service: L{jid.JID}
@param nodeIdentifier: Identifier of the node to request the info for.
@type nodeIdentifier: L{unicode}
@return: A deferred that fires with a dictionary. If not empty,
it must have the keys C{'type'} and C{'meta-data'} to keep
respectively the node type and a dictionary with the meta
data for that node.
@rtype: L{defer.Deferred}
"""
def getNodes(requestor, service, nodeIdentifier):
"""
Get all nodes contained by this node.
@param requestor: The entity the request originated from.
@type requestor: L{jid.JID}
@param service: The publish-subscribe service entity.
@type service: L{jid.JID}
@param nodeIdentifier: Identifier of the node to request the childs for.
@type nodeIdentifier: L{unicode}
@return: A deferred that fires with a list of child node identifiers.
@rtype: L{defer.Deferred}
"""
def getConfigurationOptions():
"""
Retrieve all known node configuration options.
The returned dictionary holds the possible node configuration options
by option name. The value of each entry represents the specifics for
that option in a dictionary:
- C{'type'} (C{str}): The option's type (see
L{Field<wokkel.data_form.Field>}'s doc string for possible values).
- C{'label'} (C{unicode}): A human readable label for this option.
- C{'options'} (C{dict}): Optional list of possible values for this
option.
Example::
{
"pubsub#persist_items":
{"type": "boolean",
"label": "Persist items to storage"},
"pubsub#deliver_payloads":
{"type": "boolean",
"label": "Deliver payloads with event notifications"},
"pubsub#send_last_published_item":
{"type": "list-single",
"label": "When to send the last published item",
"options": {
"never": "Never",
"on_sub": "When a new subscription is processed"}
}
}
@rtype: C{dict}.
"""
def publish(request):
"""
Called when a publish request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: deferred that fires on success.
@rtype: L{defer.Deferred}
"""
def subscribe(request):
"""
Called when a subscribe request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: A deferred that fires with a
L{Subscription<wokkel.pubsub.Subscription>}.
@rtype: L{defer.Deferred}
"""
def unsubscribe(request):
"""
Called when a subscribe request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: A deferred that fires with C{None} when unsubscription has
succeeded.
@rtype: L{defer.Deferred}
"""
def subscriptions(request):
"""
Called when a subscriptions retrieval request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: A deferred that fires with a C{list} of subscriptions as
L{Subscription<wokkel.pubsub.Subscription>}.
@rtype: L{defer.Deferred}
"""
def affiliations(request):
"""
Called when a affiliations retrieval request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: A deferred that fires with a C{list} of affiliations as
C{tuple}s of (node identifier as C{unicode}, affiliation state
as C{str}). The affiliation can be C{'owner'}, C{'publisher'},
or C{'outcast'}.
@rtype: L{defer.Deferred}
"""
def create(request):
"""
Called when a node creation request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: A deferred that fires with a C{unicode} that represents
the identifier of the new node.
@rtype: L{defer.Deferred}
"""
def default(request):
"""
Called when a default node configuration request has been received.
@param request: The publish-subscribe request.
@type request: L{wokkel.pubsub.PubSubRequest}
@return: A deferred that fires with a C{dict} representing the default
node configuration. Keys are C{str}s that represent the
field name. Values can be of types C{unicode}, C{int} or
C{bool}.
@rtype: L{defer.Deferred}
"""
def configureGet(request):
"""
Called when a node configuration retrieval request has been received.
@param request: The publish-subscribe request.
@type | |
* x,
),
x,
)
def replacement6302(A, B, C, a, b, c, d, n, p, x):
return Dist(
S(1) / d,
Subst(
Int(
(a + b * acosh(x)) ** n
* (C * x ** S(2) / d ** S(2) - C / d ** S(2)) ** p,
x,
),
x,
c + d * x,
),
x,
)
def replacement6303(A, B, C, a, b, c, d, e, f, m, n, p, x):
return Dist(
S(1) / d,
Subst(
Int(
(a + b * asinh(x)) ** n
* (C * x ** S(2) / d ** S(2) + C / d ** S(2)) ** p
* (f * x / d + (-c * f + d * e) / d) ** m,
x,
),
x,
c + d * x,
),
x,
)
def replacement6304(A, B, C, a, b, c, d, e, f, m, n, p, x):
return Dist(
S(1) / d,
Subst(
Int(
(a + b * acosh(x)) ** n
* (C * x ** S(2) / d ** S(2) - C / d ** S(2)) ** p
* (f * x / d + (-c * f + d * e) / d) ** m,
x,
),
x,
c + d * x,
),
x,
)
def replacement6305(a, b, c, d, x):
return (
Simp(x * sqrt(a + b * asinh(c + d * x ** S(2))), x)
- Simp(
sqrt(Pi)
* x
* (-c * sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* FresnelC(sqrt(-c / (Pi * b)) * sqrt(a + b * asinh(c + d * x ** S(2))))
/ (
sqrt(-c / b)
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
)
+ Simp(
sqrt(Pi)
* x
* (c * sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* FresnelS(sqrt(-c / (Pi * b)) * sqrt(a + b * asinh(c + d * x ** S(2))))
/ (
sqrt(-c / b)
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
)
)
def replacement6306(a, b, c, d, n, x):
return (
Dist(
S(4) * b ** S(2) * n * (n + S(-1)),
Int((a + b * asinh(c + d * x ** S(2))) ** (n + S(-2)), x),
x,
)
+ Simp(x * (a + b * asinh(c + d * x ** S(2))) ** n, x)
- Simp(
S(2)
* b
* n
* (a + b * asinh(c + d * x ** S(2))) ** (n + S(-1))
* sqrt(S(2) * c * d * x ** S(2) + d ** S(2) * x ** S(4))
/ (d * x),
x,
)
)
def replacement6307(a, b, c, d, x):
return Simp(
x
* (-c * sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* SinhIntegral((a + b * asinh(c + d * x ** S(2))) / (S(2) * b))
/ (
S(2)
* b
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
) + Simp(
x
* (c * cosh(a / (S(2) * b)) - sinh(a / (S(2) * b)))
* CoshIntegral((a + b * asinh(c + d * x ** S(2))) / (S(2) * b))
/ (
S(2)
* b
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
)
def replacement6308(a, b, c, d, x):
return Simp(
sqrt(S(2))
* sqrt(Pi)
* x
* (c + S(-1))
* (sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* Erf(sqrt(S(2)) * sqrt(a + b * asinh(c + d * x ** S(2))) / (S(2) * sqrt(b)))
/ (
S(4)
* sqrt(b)
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
) + Simp(
sqrt(S(2))
* sqrt(Pi)
* x
* (c + S(1))
* (-sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* Erfi(sqrt(S(2)) * sqrt(a + b * asinh(c + d * x ** S(2))) / (S(2) * sqrt(b)))
/ (
S(4)
* sqrt(b)
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
)
def replacement6309(a, b, c, d, x):
return (
-Simp(
sqrt(S(2) * c * d * x ** S(2) + d ** S(2) * x ** S(4))
/ (b * d * x * sqrt(a + b * asinh(c + d * x ** S(2)))),
x,
)
- Simp(
sqrt(Pi)
* x
* (-c / b) ** (S(3) / 2)
* (-c * sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* FresnelC(sqrt(-c / (Pi * b)) * sqrt(a + b * asinh(c + d * x ** S(2))))
/ (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
),
x,
)
+ Simp(
sqrt(Pi)
* x
* (-c / b) ** (S(3) / 2)
* (c * sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* FresnelS(sqrt(-c / (Pi * b)) * sqrt(a + b * asinh(c + d * x ** S(2))))
/ (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
),
x,
)
)
def replacement6310(a, b, c, d, x):
return (
Simp(
x
* (-c * sinh(a / (S(2) * b)) + cosh(a / (S(2) * b)))
* CoshIntegral((a + b * asinh(c + d * x ** S(2))) / (S(2) * b))
/ (
S(4)
* b ** S(2)
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
)
+ Simp(
x
* (c * cosh(a / (S(2) * b)) - sinh(a / (S(2) * b)))
* SinhIntegral((a + b * asinh(c + d * x ** S(2))) / (S(2) * b))
/ (
S(4)
* b ** S(2)
* (
c * sinh(asinh(c + d * x ** S(2)) / S(2))
+ cosh(asinh(c + d * x ** S(2)) / S(2))
)
),
x,
)
- Simp(
sqrt(S(2) * c * d * x ** S(2) + d ** S(2) * x ** S(4))
/ (S(2) * b * d * x * (a + b * asinh(c + d * x ** S(2)))),
x,
)
)
def replacement6311(a, b, c, d, n, x):
return (
Dist(
S(1) / (S(4) * b ** S(2) * (n + S(1)) * (n + S(2))),
Int((a + b * asinh(c + d * x ** S(2))) ** (n + S(2)), x),
x,
)
- Simp(
x
* (a + b * asinh(c + d * x ** S(2))) ** (n + S(2))
/ (S(4) * b ** S(2) * (n + S(1)) * (n + S(2))),
x,
)
+ Simp(
(a + b * asinh(c + d * x ** S(2))) ** (n + S(1))
* sqrt(S(2) * c * d * x ** S(2) + d ** S(2) * x ** S(4))
/ (S(2) * b * d * x * (n + S(1))),
x,
)
)
def replacement6312(a, b, d, x):
return (
Simp(
S(2)
* sqrt(a + b * acosh(d * x ** S(2) + S(1)))
* sinh(acosh(d * x ** S(2) + S(1)) | |
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.metrics import *
from tqdm import tqdm
import sklearn
import optuna
import pandas as pd
import numpy as np
import time
import sys
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style="darkgrid")
# disable chained assignments
pd.options.mode.chained_assignment = None
from automl_alex.databunch import DataBunch
from automl_alex.encoders import *
predict_proba_metrics = ['roc_auc_score', 'log_loss', 'brier_score_loss']
class ModelBase(object):
"""
Base class for a specific ML algorithm implementation factory,
i.e. it defines algorithm-specific hyperparameter space and generic methods for model training & inference
"""
pbar = 0
model = None
study = None
history_trials = []
history_trials_dataframe = pd.DataFrame()
def __init__(self,
X_train=None,
y_train=None,
X_test=None,
y_test=None,
cat_features=None,
clean_and_encod_data=True,
cat_encoder_names=['OneHotEncoder', 'HelmertEncoder', 'HashingEncoder', 'FrequencyEncoder'],
clean_nan=True,
num_generator_features=True,
group_generator_features=False,
frequency_enc_num_features=True,
normalization=True,
databunch=None,
model_param=None,
wrapper_params=None,
auto_parameters=True,
cv=10,
score_cv_folds=5, # how many folds are actually used
opt_lvl=3,
metric=None,
direction=None,
combined_score_opt=True,
metric_round=4,
cold_start=100,
gpu=False,
type_of_estimator=None, # classifier or regression
verbose=0,
random_state=42):
if type_of_estimator is not None:
self.type_of_estimator = type_of_estimator
if metric is not None:
self.metric = metric
else:
if self.type_of_estimator == 'classifier':
self.metric = sklearn.metrics.roc_auc_score
self.direction = 'maximize'
elif self.type_of_estimator == 'regression':
self.metric = sklearn.metrics.mean_squared_error
self.direction = 'minimize'
if direction is not None:
self.direction = direction
self._auto_parameters = auto_parameters
self._metric_round = metric_round
self._cv = cv
self._cold_start = cold_start
self._gpu = gpu
self._random_state = random_state
self._score_cv_folds = score_cv_folds
self._opt_lvl = opt_lvl
self._combined_score_opt = combined_score_opt
self.wrapper_params = wrapper_params
if wrapper_params is None:
self.wrapper_params = self._init_default_wrapper_params()
self.model_param = model_param
if model_param is None:
self.model_param = self._init_default_model_param()
self.history_trials = []
self.history_trials_dataframe = pd.DataFrame()
# dataset
if databunch:
self._data = databunch
else:
if X_train is not None:
self._data = DataBunch(X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
cat_features=cat_features,
clean_and_encod_data=clean_and_encod_data,
cat_encoder_names=cat_encoder_names,
clean_nan=clean_nan,
num_generator_features=num_generator_features,
group_generator_features=group_generator_features,
frequency_enc_num_features=frequency_enc_num_features,
verbose=verbose,
random_state=random_state,)
else:
raise Exception("no Data?")
#self._init_dataset()
def _init_default_wrapper_params(self,):
"""
Default wrapper_params
"""
wrapper_params = {}
return(wrapper_params)
def _init_default_model_param(self,):
"""
Default model_param
"""
model_param = {}
return(model_param)
def _fit(self, dataset, weights=None):
"""
Args:
X (np.array, shape (n_samples, n_features)): the input data
y (np.array, shape (n_samples, ) or (n_samples, n_outputs)): the target data
Return:
self
"""
raise NotImplementedError("Pure virtual class.")
def save_snapshot(self, filename):
"""
Return:
serializable internal model state snapshot.
"""
raise NotImplementedError("Pure virtual class.")
@staticmethod
def load_from_snapshot(self, filename):
"""
:snapshot serializable internal model state
loads from serializable internal model state snapshot.
"""
raise NotImplementedError("Pure virtual class.")
def _predict(self, dataset):
"""
Args:
dataset : the input data,
dataset.y may be None
Return:
np.array, shape (n_samples, ): predictions
"""
raise NotImplementedError("Pure virtual class.")
def is_possible_predict_proba(self):
"""
Return:
bool, whether model can predict proba
"""
raise NotImplementedError("Pure virtual class.")
def _predict_proba(self, X):
"""
Args:
dataset (np.array, shape (n_samples, n_features)): the input data
Return:
np.array, shape (n_samples, n_classes): predicted probabilities
"""
raise NotImplementedError("Pure virtual class.")
#@staticmethod
def get_model_opt_params(self, ):
"""
Return:
dict from parameter name to hyperopt distribution: default
parameter space
"""
raise NotImplementedError("Pure virtual class.")
def __calc_combined_score_opt__(self, direction, score, score_std):
"""
Args:
direction (str): 'minimize' or 'maximize'
score (float): the input score
score_std (float): the input score_std
Return:
score_opt (float): combined score
"""
if direction == 'maximize':
score_opt = score - score_std
else:
score_opt = score + score_std
return(score_opt)
def __auto_parameters_calc(self, possible_iters, verbose=1):
"""
Automatic determination of optimization parameters depending on the number of possible iterations
Args:
possible_iters (int): possible_iters
verbose (int): print status
Return:
early_stoping (int)
cv (int)
score_cv_folds (int)
opt_lvl (int)
cold_start (int)
"""
if verbose > 0:
print('> Start Auto calibration parameters')
if possible_iters > 100:
cv = 5
score_cv_folds = 2
opt_lvl = 1
cold_start = possible_iters // 2
early_stoping = 100
if possible_iters > 200:
score_cv_folds = 3
opt_lvl = 2
cold_start = (possible_iters / score_cv_folds) // 3
if possible_iters > 300:
cv = 10
score_cv_folds = 3
cold_start = (possible_iters / score_cv_folds) // 5
if possible_iters > 900:
score_cv_folds = 5
opt_lvl = 3
early_stoping = cold_start * 2
if possible_iters > 10000:
opt_lvl = 4
score_cv_folds = 10
cold_start = (possible_iters / score_cv_folds) // 10
early_stoping = cold_start * 2
if possible_iters > 25000:
opt_lvl = 5
score_cv_folds = 15
cold_start = (possible_iters / score_cv_folds) // 30
early_stoping = cold_start * 2
return(early_stoping, cv, score_cv_folds, opt_lvl, cold_start,)
def _tqdm_opt_print(self, pbar):
"""
Printing information in tqdm. Use pbar.
See the documentation for tqdm: https://github.com/tqdm/tqdm
"""
if pbar is not None:
if self.direction == 'maximize':
self.history_trials_dataframe = pd.DataFrame(self.history_trials).sort_values('score_opt', ascending=False)
else:
self.history_trials_dataframe = pd.DataFrame(self.history_trials).sort_values('score_opt', ascending=True)
best_trail = self.history_trials_dataframe.head(1)
best_model_name = best_trail['model_name'].iloc[0]
self.best_score = best_trail['score_opt'].iloc[0]
self.best_score_std = best_trail['score_std'].iloc[0]
best_metric_score = best_trail['model_score'].iloc[0]
message = f' | Model: {best_model_name} | OptScore: {self.best_score} | Best {self.metric.__name__}: {best_metric_score} '
if self._score_cv_folds > 1:
message+=f'+- {self.best_score_std}'
pbar.set_postfix_str(message)
pbar.update(1)
def _print_opt_parameters(self, early_stoping, feature_selection):
print('CV_Folds = ', self._cv)
print('Score_CV_Folds = ', self._score_cv_folds)
print('Feature_Selection = ', feature_selection)
print('Opt_lvl = ', self._opt_lvl)
print('Cold_start = ', self._cold_start)
print('Early_stoping = ', early_stoping)
print('Metric = ', self.metric.__name__)
print('Direction = ', self.direction)
def _opt_model(self, trial, model=None):
"""
Description of _opt_model:
Model extraction for optimization with new parameters
Created for a more flexible change of the model in optimization during class inheritance
Args:
trial (undefined):
model=None (None or class):
"""
if model is None:
model = self
model.model_param = model.get_model_opt_params(
trial=trial,
model=model,
opt_lvl=model._opt_lvl,
metric_name=model.metric.__name__,
)
return(model)
def _opt_feature_selector(self, columns, trial):
"""
Description of _opt_feature_selector
Args:
columns (list):
trial (undefined):
Returns:
selected columns (list)
"""
select_columns = {}
for colum in columns:
select_columns[colum] = trial.suggest_categorical(colum, [True, False])
select_columns_ = {k: v for k, v in select_columns.items() if v is True}
return(select_columns_.keys())
def _opt_core(self, timeout, early_stoping, feature_selection, verbose=1):
"""
Description of _opt_core:
in progress...
Args:
timeout (int):
early_stoping (int):
feature_selection (bool):
verbose=1 (int):
Returns:
history_trials_dataframe (pd.DataFrame)
"""
# X
X=self._data.X_train
# time model
start_time = time.time()
score, score_std = self.cross_val_score(X=X, folds=self._cv, score_folds=2, print_metric=False,)
iter_time = (time.time() - start_time)
if verbose > 0:
print(f'One iteration takes ~ {round(iter_time,1)} sec')
possible_iters = timeout // (iter_time)
if possible_iters < 100:
print("Not enough time to find the optimal parameters. \n \
Possible iters < 100. \n \
Please, Increase the 'timeout' parameter for normal optimization.")
raise Exception('Not enough time to find the optimal parameters')
# Auto_parameters
if self._auto_parameters:
early_stoping, self._cv, self._score_cv_folds, self._opt_lvl, self._cold_start = \
self.__auto_parameters_calc(possible_iters, verbose)
config = self.fit(print_metric=False,)
self.best_score = config['score_opt'].iloc[0]
if verbose > 0:
print('> Start optimization with the parameters:')
self._print_opt_parameters(early_stoping, feature_selection)
print('#'*50)
print(f'Default model OptScore = {round(self.best_score,4)}')
# OPTUNA objective
def objective(trial, fast_check=True):
# generate model
opt_model = self._opt_model(trial=trial)
# feature selector
if feature_selection:
select_columns = self._opt_feature_selector(
opt_model._data.X_train.columns,
trial=trial)
X=opt_model._data.X_train[select_columns]
# score
score, score_std = opt_model.cross_val_score(
X=X,
folds=opt_model._cv,
score_folds=opt_model._score_cv_folds,
print_metric=False,
)
# _combined_score_opt
if self._combined_score_opt:
score_opt = self.__calc_combined_score_opt__(self.direction, score, score_std)
else:
score_opt = score
score_opt = round(score_opt, self._metric_round)
# History trials
self.history_trials.append({
'score_opt': score_opt,
'model_score': score,
'score_std': score_std,
'model_name': opt_model.__name__,
'model_param': opt_model.model_param,
'wrapper_params': opt_model.wrapper_params,
'cat_encoders': opt_model._data.cat_encoder_names,
'columns': X.columns.values,
'cv_folds': opt_model._cv,
})
# verbose
if verbose >= 1:
self._tqdm_opt_print(pbar)
return score_opt
sampler=optuna.samplers.TPESampler(consider_prior=True,
prior_weight=1.0,
consider_magic_clip=True,
consider_endpoints=False,
n_startup_trials=self._cold_start,
n_ei_candidates=50,
seed=self._random_state)
if self.study is None:
self.study = optuna.create_study(direction=self.direction, sampler=sampler,)
if verbose < 2:
optuna.logging.disable_default_handler()
es = EarlyStoppingExceeded()
es.early_stop = early_stoping
es.early_stop_count = 0
es.best_score = None
es_callback = es.early_stopping_opt_minimize
if self.direction == 'maximize':
es_callback = es.early_stopping_opt_maximize
if verbose > 0:
disable_tqdm = False
else:
disable_tqdm = True
with tqdm(
file=sys.stdout,
desc="Optimize: ",
disable=disable_tqdm,
) as pbar:
try:
self.study.optimize(
objective,
timeout=timeout,
callbacks=[es_callback],
show_progress_bar=False,
)
except EarlyStoppingExceeded:
if verbose == 1:
print(f'\n EarlyStopping Exceeded: Best Score: {self.study.best_value}',
self.metric.__name__)
self.history_trials_dataframe = pd.DataFrame(self.history_trials).sort_values('score_opt', ascending=True)
if self.direction == 'maximize':
self.history_trials_dataframe = pd.DataFrame(self.history_trials).sort_values('score_opt', ascending=False)
return(self.history_trials_dataframe)
def opt(self,
timeout=100, # optimization time in seconds
auto_parameters=None,
cv_folds=None,
cold_start=None,
score_cv_folds=None,
opt_lvl=None,
direction=None,
early_stoping=100,
feature_selection=True,
verbose=1,):
"""
Description of opt:
in progress...
Args:
timeout=100 (int):
cv_folds=None (None or int):
cold_start=None (None or int):
score_cv_folds=None (None or int):
opt_lvl=None (None or int):
direction=None (None or str):
early_stoping=100 (int):
feature_selection=True (bool):
verbose=1 (int):
Returns:
history_trials (pd.DataFrame)
"""
if cv_folds is not None:
self._cv = cv_folds
if score_cv_folds is not None:
self._score_cv_folds = score_cv_folds
if cold_start is not | |
# If the player chooses to go up, decrease the net vertical velocity
if self.player_moving_up and self.player.y + self.player.radius < self.height:
net_vely += increment
# Apply the net vertical velocity to the player
self.player.vely = net_vely
# Update the player's position
self.player.update(dt)
self.player_label.x = self.player.x
self.player_label.y = self.player.y
# Update the window and the objects
def update(self, dt):
# Add to the set indices of blobs that have been killed
dead_blobs = set()
# If the player exists, check for collisions between the player and the other blobs
if self.player:
for blob, label in self.blobs.items():
if self.player and self.distance(self.player.x, self.player.y, blob.x, blob.y) < self.player.radius + blob.radius:
# The largest of the two will consume the other.
# If they're the same size, delete both of them
if self.player.mass > blob.mass:
# Update the player's mass and label
self.player.mass += blob.mass // 2
self.player_label.text = f"{self.player.mass}"
self.player_label.font_size = self.player.radius * 0.6
# Update the score
self.score = self.player.mass
self.score_label.text = f"Score: {self.score}"
# Make sure the player remains completely on the screen
self.adjust_blob_position(self.player)
# Add the blob to the set of dead blobs
dead_blobs.add(blob)
elif self.player.mass < blob.mass:
# Update the player's mass and label
blob.mass += self.player.mass // 2
label.text = f"{blob.mass}"
label.font_size = blob.radius * 0.6
blob.genome.kill_timer = 10 # Reset the kill timer
# Make sure the blob remains completely on the screen
self.adjust_blob_position(blob)
# Delete the player and the associated label
# Python GC will handle the objects once the reference is removed
self.player = None
self.player_label = None
else:
# Delete the blob
dead_blobs.add(blob)
# Delete the player and the associated label
# Python GC will handle the objects once the reference is removed
self.player = None
self.player_label = None
# Check for collisions among the blobs
for (blob_1, label_1), (blob_2, label_2) in combinations(self.blobs.items(), 2):
if self.distance(blob_1.x, blob_1.y, blob_2.x, blob_2.y) < blob_1.radius + blob_2.radius:
# The largest of the two will consume the other.
# If they're the same size, delete both of them
if blob_1.mass > blob_2.mass:
# Update the larger blob's mass and its associated label
blob_1.mass += blob_2.mass // 2
label_1.text = f"{blob_1.mass}"
label_1.font_size = blob_1.radius * 0.6
blob_1.genome.kill_timer = 10 # Reset the kill timer
# Make sure the larger blob remains completely on the screen
self.adjust_blob_position(blob_1)
# Add the smaller blob's index to the set of dead blobs
dead_blobs.add(blob_2)
elif blob_1.mass < blob_2.mass:
# Update the blob's mass and its associated label
blob_2.mass += blob_1.mass // 2
label_2.text = f"{blob_2.mass}"
label_2.font_size = blob_2.radius * 0.6
blob_2.genome.kill_timer = 10 # Reset the kill timer
# Make sure the larger blob remains completely on the screen
self.adjust_blob_position(blob_2)
# Add the smaller blob's index to the set of dead blobs
dead_blobs.add(blob_1)
else:
# Add both blobs' indices to the set of dead blobs
dead_blobs.add(blob_1)
dead_blobs.add(blob_2)
# If NEAT is enabled, decrement the kill timer. If any reach 0, delete the blob genome.
# Also check for blobs that collided with the window border
if self.enable_neat:
for blob in self.blobs.keys():
if blob.genome.kill_timer <= 0 or (
blob.x - blob.radius <= 0 or blob.x + blob.radius >= self.width - 1
or blob.y - blob.radius <= 0 or blob.y + blob.radius >= self.height - 1):
# Penalize the genome heavily
blob.genome.fitness -= 200
# Add the blob genome to the list of dead blobs
dead_blobs.add(blob)
else:
blob.genome.kill_timer -= dt
# Remove the dead objects from the game
for blob in dead_blobs:
# Penalize the genome for dying
blob.genome.fitness -= 100
# Eliminate the genome
del self.blobs[blob]
# Update the number of blobs remaining if NEAT is enabled
if self.enable_neat:
self.number_of_blobs_label.text = f"Blobs: {len(self.blobs)}"
# If no blobs remain, terminate the current generation
if not self.blobs:
self.reset()
pyglet.app.exit()
# Check for collisions between the player and the food if the player is alive
if self.player:
for food in self.foods:
if self.distance(self.player.x, self.player.y, food.x, food.y) < self.player.radius + food.width / 2:
# Update the player and the associated label
self.player.mass += 1
self.player_label.text = f"{self.player.mass}"
self.player_label.font_size = self.player.radius * 0.6
# Update the score
self.score = self.player.mass
self.score_label.text = f"Score: {self.score}"
# Make sure the player remains completely on the screen
self.adjust_blob_position(self.player)
# Delete the current food item and generate a new one
self.foods.remove(food)
self.generate_food()
# Check for collisions between the blob NPCs and the food
for blob, label in self.blobs.items():
for food in self.foods:
# Keep checking the blobs until we find that a blob ate the food item
food_consumed = False
if not food_consumed and self.distance(blob.x, blob.y, food.x, food.y) < blob.radius + food.width:
# Update the blob's mass and its associated label
blob.mass += 1
label.text = f"{blob.mass}"
label.font_size = blob.radius * 0.6
# Reset the kill timer
blob.genome.kill_timer = 10
# Make sure the blob remains completely on the screen
self.adjust_blob_position(blob)
# Delete the current food item and generate a new one
self.foods.remove(food)
self.generate_food()
# Reward the genome for finding food
blob.genome.fitness += 1
# Terminate the inner loop
food_consumed = True
# Let the neural networks make decisions for their respective blobs
for blob in self.blobs.keys():
# Get the closest blob
closest_blob = None
min_blob_distance = float("Inf")
for other_blob in self.blobs.keys():
distance = self.distance(blob.x, blob.y, other_blob.x, other_blob.y)
if blob is not other_blob and distance < min_blob_distance:
closest_blob = other_blob
min_blob_distance = distance
# If the player exists and is the closest blob, save it as the closest blob.
# Otherwise, save the the current blob as the closest blob if no other blob exists
if self.player: # The player exists
if closest_blob: # At least one other blob exists
distance = self.distance(self.player.x, self.player.y, closest_blob.x, closest_blob.y)
if distance < min_blob_distance:
closest_blob = self.player
min_blob_distance = distance
else: # There is only one blob NPC alive
closest_blob = self.player
min_blob_distance = distance
else:
if not closest_blob: # No other blob NPC exists
closest_blob = blob
# If there is a closest blob, extract the needed attributes.
# Otherwise, set some values
closest_blob_x = closest_blob.x if closest_blob else self.width / 2
closest_blob_y = closest_blob.y if closest_blob else self.height / 2
closest_blob_mass = closest_blob.mass if closest_blob else 0
# Get the closest food item
closest_food = None
min_food_distance = float("Inf")
for food in self.foods:
distance = self.distance(blob.x, blob.y, food.x, food.y)
if distance < min_food_distance:
closest_food = food
min_food_distance = distance
# Activate the genome's neural network which will determine the blob's next move
output = blob.neural_net.activate((
blob.x, # X-coordinate of the blob
blob.y, # Y-coordinate of the blob
#blob.x - blob.radius, # Distance from the left window border
#self.width - blob.x - blob.radius, # Distance from the right window border
#blob.y - blob.radius, # Distance from the top window border
#self.height - blob.y - blob.radius, # Distance from the bottom window border
blob.x - closest_food.x, # Distance to the food item on the x-axis
blob.y - closest_food.y, # Distance to the food item on the y-axis
blob.x - closest_blob_x if closest_blob else self.width / 2, # Distance to the blob on the x-axis
blob.y - closest_blob_y if closest_blob else self.height / 2, # Distance to the blob on the y-axis
blob.mass - closest_blob_mass if closest_blob else 0 # Difference between the masses of the two blobs
))
# Change the velocity by a specified amount
increment = 100
# Horizontal movement of the blob
if output[0] < -0.5 and blob.x - blob.radius >= 0: # Left
blob.velx = -increment
elif output[0] > 0.5 and blob.x + blob.radius < self.width: # Right
blob.velx = increment
else: # No movement
blob.velx = 0
# Vertical movement of the blob
if output[1] < -0.5 and blob.y - blob.radius >= 0: # Down
blob.vely = -increment
elif output[1] > 0.5 and blob.y | |
= NONE
if len(self._styles) and self.caret.index <= len(self._styles):
cstyle = self._styles[self.caret.index-1]
self._styles.insert(self.caret.index, cstyle)
self.caret.index += 1
self._text = txt
self._layout()
def deleteCaretLeft(self):
if self.caret.index > 0:
txt = self._text
ci = self.caret.index
txt = txt[:ci-1] + txt[ci:]
self._styles = self._styles[:ci-1]+self._styles[ci:]
self.caret.index -= 1
self._text = txt
self._layout()
def deleteCaretRight(self):
ci = self.caret.index
if ci < len(self._text):
txt = self._text
txt = txt[:ci] + txt[ci+1:]
self._styles = self._styles[:ci]+self._styles[ci+1:]
self._text = txt
self._layout()
def _layout(self):
"""Layout the text, calculating the vertex locations
"""
def getLineWidthFromPix(pixVal):
return pixVal / self._pixelScaling + self.padding * 2
rgb = self._foreColor.render('rgba1')
font = self.glFont
# the vertices are initially pix (natural for freetype)
# then we convert them to the requested units for self._vertices
# then they are converted back during rendering using standard BaseStim
visible_text = self._text
vertices = np.zeros((len(visible_text) * 4, 2), dtype=np.float32)
self._charIndices = np.zeros((len(visible_text)), dtype=int)
self._colors = np.zeros((len(visible_text) * 4, 4), dtype=np.double)
self._texcoords = np.zeros((len(visible_text) * 4, 2), dtype=np.double)
self._glIndices = np.zeros((len(visible_text) * 4), dtype=int)
# the following are used internally for layout
self._lineNs = np.zeros(len(visible_text), dtype=int)
self._lineTops = [] # just length of nLines
self._lineBottoms = []
self._lineLenChars = [] #
self._lineWidths = [] # width in stim units of each line
self._lineHeight = font.height * self.lineSpacing
if np.isnan(self._requestedSize[0]):
lineMax = float('inf')
else:
lineMax = (self._requestedSize[0] - self.padding) * self._pixelScaling
current = [0, 0]
fakeItalic = 0.0
fakeBold = 0.0
# for some reason glyphs too wide when using alpha channel only
if font.atlas.format == 'alpha':
alphaCorrection = 1 / 3.0
else:
alphaCorrection = 1
if self._lineBreaking == 'default':
wordLen = 0
charsThisLine = 0
wordsThisLine = 0
lineN = 0
for i, charcode in enumerate(self._text):
printable = True # unless we decide otherwise
# handle formatting codes
if self._styles[i] == NONE:
fakeItalic = 0.0
fakeBold = 0.0
elif self._styles[i] == ITALIC:
fakeItalic = 0.1 * font.size
elif self._styles[i] == ITALIC:
fakeBold = 0.3 * font.size
# handle newline
if charcode == '\n':
printable = False
# handle printable characters
if printable:
glyph = font[charcode]
if showWhiteSpace and charcode == " ":
glyph = font[u"·"]
elif charcode == " ":
# glyph size of space is smaller than actual size, so use size of dot instead
glyph.size = font[u"·"].size
xBotL = current[0] + glyph.offset[0] - fakeItalic - fakeBold / 2
xTopL = current[0] + glyph.offset[0] - fakeBold / 2
yTop = current[1] + glyph.offset[1]
xBotR = xBotL + glyph.size[0] * alphaCorrection + fakeBold
xTopR = xTopL + glyph.size[0] * alphaCorrection + fakeBold
yBot = yTop - glyph.size[1]
# Adjust for norm
if self.units == 'norm':
ratio = self.win.size[1]/self.win.size[0]
xBotL *= ratio
xTopL *= ratio
xBotR *= ratio
xTopR *= ratio
u0 = glyph.texcoords[0]
v0 = glyph.texcoords[1]
u1 = glyph.texcoords[2]
v1 = glyph.texcoords[3]
else:
glyph = font[u"·"]
x = current[0] + glyph.offset[0]
yTop = current[1] + glyph.offset[1]
yBot = yTop - glyph.size[1]
xBotL = x
xTopL = x
xBotR = x
xTopR = x
u0 = glyph.texcoords[0]
v0 = glyph.texcoords[1]
u1 = glyph.texcoords[2]
v1 = glyph.texcoords[3]
theseVertices = [[xTopL, yTop], [xBotL, yBot],
[xBotR, yBot], [xTopR, yTop]]
texcoords = [[u0, v0], [u0, v1],
[u1, v1], [u1, v0]]
vertices[i * 4:i * 4 + 4] = theseVertices
self._texcoords[i * 4:i * 4 + 4] = texcoords
self._colors[i*4 : i*4+4, :4] = rgb
self._lineNs[i] = lineN
current[0] = current[0] + glyph.advance[0] + fakeBold / 2
current[1] = current[1] + glyph.advance[1]
# are we wrapping the line?
if charcode == "\n":
lineWPix = current[0]
current[0] = 0
current[1] -= self._lineHeight
lineN += 1
charsThisLine += 1
self._lineLenChars.append(charsThisLine)
self._lineWidths.append(getLineWidthFromPix(lineWPix))
charsThisLine = 0
wordsThisLine = 0
elif charcode in wordBreaks:
wordLen = 0
charsThisLine += 1
wordsThisLine += 1
elif printable:
wordLen += 1
charsThisLine += 1
# end line with auto-wrap on space
if current[0] >= lineMax and wordLen > 0 and wordsThisLine > 1:
# move the current word to next line
lineBreakPt = vertices[(i - wordLen + 1) * 4, 0]
wordWidth = current[0] - lineBreakPt
# shift all chars of the word left by wordStartX
vertices[(i - wordLen + 1) * 4: (i + 1) * 4, 0] -= lineBreakPt
vertices[(i - wordLen + 1) * 4: (i + 1) * 4, 1] -= self._lineHeight
# update line values
self._lineNs[i - wordLen + 1: i + 1] += 1
self._lineLenChars.append(charsThisLine - wordLen)
self._lineWidths.append(getLineWidthFromPix(lineBreakPt))
lineN += 1
# and set current to correct location
current[0] = wordWidth
current[1] -= self._lineHeight
charsThisLine = wordLen
wordsThisLine = 1
# have we stored the top/bottom of this line yet
if lineN + 1 > len(self._lineTops):
self._lineBottoms.append(current[1] + font.descender)
self._lineTops.append(current[1] + self._lineHeight
+ font.descender/2)
# finally add length of this (unfinished) line
self._lineWidths.append(getLineWidthFromPix(current[0]))
self._lineLenChars.append(charsThisLine)
elif self._lineBreaking == 'uax14':
# get a list of line-breakable points according to UAX#14
breakable_points = list(get_breakable_points(self._text))
text_seg = list(break_units(self._text, breakable_points))
styles_seg = list(break_units(self._styles, breakable_points))
lineN = 0
charwidth_list = []
segwidth_list = []
y_advance_list = []
vertices_list = []
texcoords_list = []
# calculate width of each segments
for this_seg in range(len(text_seg)):
thisSegWidth = 0 # width of this segment
for i, charcode in enumerate(text_seg[this_seg]):
printable = True # unless we decide otherwise
# handle formatting codes
if styles_seg[this_seg][i] == NONE:
fakeItalic = 0.0
fakeBold = 0.0
elif styles_seg[this_seg][i] == ITALIC:
fakeItalic = 0.1 * font.size
elif styles_seg[this_seg][i] == ITALIC:
fakeBold = 0.3 * font.size
# handle newline
if charcode == '\n':
printable = False
# handle printable characters
if printable:
if showWhiteSpace and charcode == " ":
glyph = font[u"·"]
else:
glyph = font[charcode]
xBotL = glyph.offset[0] - fakeItalic - fakeBold / 2
xTopL = glyph.offset[0] - fakeBold / 2
yTop = glyph.offset[1]
xBotR = xBotL + glyph.size[0] * alphaCorrection + fakeBold
xTopR = xTopL + glyph.size[0] * alphaCorrection + fakeBold
yBot = yTop - glyph.size[1]
u0 = glyph.texcoords[0]
v0 = glyph.texcoords[1]
u1 = glyph.texcoords[2]
v1 = glyph.texcoords[3]
else:
glyph = font[u"·"]
x = glyph.offset[0]
yTop = glyph.offset[1]
yBot = yTop - glyph.size[1]
xBotL = x
xTopL = x
xBotR = x
xTopR = x
u0 = glyph.texcoords[0]
v0 = glyph.texcoords[1]
u1 = glyph.texcoords[2]
v1 = glyph.texcoords[3]
# calculate width and update segment width
w = glyph.advance[0] + fakeBold / 2
thisSegWidth += w
# keep vertices, texcoords, width and y_advance of this character
vertices_list.append([[xTopL, yTop], [xBotL, yBot],
[xBotR, yBot], [xTopR, yTop]])
texcoords_list.append([[u0, v0], [u0, v1],
[u1, v1], [u1, v0]])
charwidth_list.append(w)
y_advance_list.append(glyph.advance[1])
# append width of this segment to the list
segwidth_list.append(thisSegWidth)
# concatenate segments to build line
lines = []
while text_seg:
line_width = 0
for i in range(len(text_seg)):
# if this segment is \n, break line here.
if text_seg[i][-1] == '\n':
i+=1 # increment index to include \n to current line
break
# concatenate next segment
line_width += segwidth_list[i]
# break if line_width is greater than lineMax
if lineMax < line_width:
break
else:
# if for sentence finished without break, all segments
# should be concatenated.
i = len(text_seg)
p = max(1, i)
# concatenate segments and remove from segment list
lines.append("".join(text_seg[:p]))
del text_seg[:p], segwidth_list[:p] #, avoid[:p]
# build lines
i = 0 # index of the current character
if lines:
for line in lines:
for c in line:
theseVertices = vertices_list[i]
#update vertices
for j in range(4):
theseVertices[j][0] += current[0]
theseVertices[j][1] += current[1]
texcoords = texcoords_list[i]
vertices[i * 4:i * 4 + 4] = theseVertices
self._texcoords[i * 4:i * 4 + 4] = texcoords
self._colors[i*4 : i*4+4, :4] = rgb
self._lineNs[i] = lineN
current[0] = current[0] + charwidth_list[i]
current[1] = current[1] + y_advance_list[i]
# have we stored the top/bottom of this line yet
if lineN + 1 > len(self._lineTops):
self._lineBottoms.append(current[1] + font.descender)
self._lineTops.append(current[1] + self._lineHeight
+ font.descender/2)
# next chacactor
i += 1
# prepare for next line
current[0] = 0
current[1] -= self._lineHeight
lineBreakPt = vertices[(i-1) * 4, 0]
self._lineLenChars.append(len(line))
self._lineWidths.append(getLineWidthFromPix(lineBreakPt))
# need not | |
'HLT_Ele45_WPLoose_Gsf_v4',
'HLT_Ele50_CaloIdVT_GsfTrkIdT_PFJet140_v4',
'HLT_Ele50_CaloIdVT_GsfTrkIdT_PFJet165_v4',
'HLT_Ele50_IsoVVVL_PFHT400_v3',
'HLT_Ele8_CaloIdL_TrackIdL_IsoVL_PFJet30_v4',
'HLT_Ele8_CaloIdM_TrackIdM_PFJet30_v6',
'HLT_FullTracks_Multiplicity100_v2',
'HLT_FullTracks_Multiplicity130_v2',
'HLT_FullTracks_Multiplicity150_v2',
'HLT_FullTracks_Multiplicity80_v2',
'HLT_GlobalRunHPDNoise_v4',
'HLT_HISinglePhoton10_v3',
'HLT_HISinglePhoton15_v3',
'HLT_HISinglePhoton20_v3',
'HLT_HISinglePhoton40_v3',
'HLT_HISinglePhoton60_v3',
'HLT_HT2000_v3',
'HLT_HT200_DisplacedDijet40_DisplacedTrack_v3',
'HLT_HT200_v3',
'HLT_HT2500_v3',
'HLT_HT250_DisplacedDijet40_DisplacedTrack_v4',
'HLT_HT275_v3',
'HLT_HT325_v3',
'HLT_HT350_DisplacedDijet40_DisplacedTrack_v4',
'HLT_HT350_DisplacedDijet40_Inclusive_v3',
'HLT_HT350_DisplacedDijet80_DisplacedTrack_v4',
'HLT_HT350_DisplacedDijet80_Tight_DisplacedTrack_v4',
'HLT_HT400_DisplacedDijet40_Inclusive_v4',
'HLT_HT410to430_v3',
'HLT_HT425_v3',
'HLT_HT430to450_v3',
'HLT_HT450to470_v3',
'HLT_HT470to500_v3',
'HLT_HT500_DisplacedDijet40_Inclusive_v4',
'HLT_HT500to550_v3',
'HLT_HT550_DisplacedDijet40_Inclusive_v4',
'HLT_HT550to650_v3',
'HLT_HT575_v3',
'HLT_HT650_DisplacedDijet80_Inclusive_v4',
'HLT_HT650_v4',
'HLT_HT750_DisplacedDijet80_Inclusive_v4',
'HLT_HcalNZS_v3',
'HLT_HcalPhiSym_v3',
'HLT_IsoMu16_eta2p1_MET30_LooseIsoPFTau50_Trk30_eta2p1_v2',
'HLT_IsoMu16_eta2p1_MET30_v2',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_SingleL1_v5',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_v5',
'HLT_IsoMu18_v3',
'HLT_IsoMu19_eta2p1_LooseIsoPFTau20_SingleL1_v2',
'HLT_IsoMu19_eta2p1_LooseIsoPFTau20_v2',
'HLT_IsoMu19_eta2p1_MediumIsoPFTau32_Trk1_eta2p1_Reg_v2',
'HLT_IsoMu20_v4',
'HLT_IsoMu21_eta2p1_LooseIsoPFTau20_SingleL1_v2',
'HLT_IsoMu21_eta2p1_MediumIsoPFTau32_Trk1_eta2p1_Reg_v2',
'HLT_IsoMu22_eta2p1_v2',
'HLT_IsoMu22_v3',
'HLT_IsoMu24_v2',
'HLT_IsoMu27_v5',
'HLT_IsoTkMu18_v4',
'HLT_IsoTkMu20_v6',
'HLT_IsoTkMu22_eta2p1_v3',
'HLT_IsoTkMu22_v4',
'HLT_IsoTkMu24_v3',
'HLT_IsoTkMu27_v6',
'HLT_IsoTrackHB_v2',
'HLT_IsoTrackHE_v2',
'HLT_JetE30_NoBPTX3BX_v3',
'HLT_JetE30_NoBPTX_v3',
'HLT_JetE50_NoBPTX3BX_v2',
'HLT_JetE70_NoBPTX3BX_v2',
'HLT_L1BeamGasMinus_v2',
'HLT_L1BeamGasPlus_v2',
'HLT_L1BptxMinus_v2',
'HLT_L1BptxPlus_v2',
'HLT_L1BptxXOR_v2',
'HLT_L1FatEvents_v1',
'HLT_L1MinimumBiasHF_AND_v2',
'HLT_L1MinimumBiasHF_OR_v2',
'HLT_L1NotBptxOR_v2',
'HLT_L1SingleMu18_v1',
'HLT_L1SingleMuOpen_DT_v3',
'HLT_L1SingleMuOpen_v3',
'HLT_L1_TripleJet_VBF_v5',
'HLT_L2DoubleMu23_NoVertex_v4',
'HLT_L2DoubleMu28_NoVertex_2Cha_Angle2p5_Mass10_v4',
'HLT_L2DoubleMu38_NoVertex_2Cha_Angle2p5_Mass10_v4',
'HLT_L2Mu10_NoVertex_NoBPTX3BX_v2',
'HLT_L2Mu10_NoVertex_NoBPTX_v3',
'HLT_L2Mu10_v2',
'HLT_L2Mu35_NoVertex_3Sta_NoBPTX3BX_v2',
'HLT_L2Mu40_NoVertex_3Sta_NoBPTX3BX_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET110_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET120_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET80_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET90_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_v4',
'HLT_MET200_v3',
'HLT_MET250_v3',
'HLT_MET300_v3',
'HLT_MET600_v3',
'HLT_MET60_IsoTrk35_Loose_v2',
'HLT_MET700_v3',
'HLT_MET75_IsoTrk50_v4',
'HLT_MET90_IsoTrk50_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu100_PFMHTNoMu100_IDTight_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu110_PFMHTNoMu110_IDTight_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight_v4',
'HLT_MonoCentralPFJet80_PFMETNoMu90_PFMHTNoMu90_IDTight_v4',
'HLT_Mu10_CentralPFJet30_BTagCSV_p13_v2',
'HLT_Mu10_TrkIsoVVL_DiPFJet40_DEta3p5_MJJ750_HTT350_PFMETNoMu60_v2',
'HLT_Mu12_Photon25_CaloIdL_L1ISO_v6',
'HLT_Mu12_Photon25_CaloIdL_L1OR_v6',
'HLT_Mu12_Photon25_CaloIdL_v6',
'HLT_Mu14er_PFMET100_v2',
'HLT_Mu15_IsoVVVL_BTagCSV_p067_PFHT400_v3',
'HLT_Mu15_IsoVVVL_PFHT350_PFMET50_v4',
'HLT_Mu15_IsoVVVL_PFHT350_v4',
'HLT_Mu15_IsoVVVL_PFHT400_PFMET50_v2',
'HLT_Mu15_IsoVVVL_PFHT400_v2',
'HLT_Mu15_IsoVVVL_PFHT600_v5',
'HLT_Mu16_TkMu0_dEta18_Onia_v3',
'HLT_Mu16_TkMu0_dEta18_Phi_v3',
'HLT_Mu16_eta2p1_MET30_v2',
'HLT_Mu17_Mu8_DZ_v4',
'HLT_Mu17_Mu8_SameSign_DZ_v3',
'HLT_Mu17_Mu8_SameSign_v3',
'HLT_Mu17_Mu8_v3',
'HLT_Mu17_Photon22_CaloIdL_L1ISO_v4',
'HLT_Mu17_Photon30_CaloIdL_L1ISO_v6',
'HLT_Mu17_Photon35_CaloIdL_L1ISO_v6',
'HLT_Mu17_TkMu8_DZ_v3',
'HLT_Mu17_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v4',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v4',
'HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v3',
'HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_v3',
'HLT_Mu17_TrkIsoVVL_v3',
'HLT_Mu17_v3',
'HLT_Mu20_Mu10_DZ_v3',
'HLT_Mu20_Mu10_SameSign_DZ_v3',
'HLT_Mu20_Mu10_SameSign_v2',
'HLT_Mu20_Mu10_v3',
'HLT_Mu20_v3',
'HLT_Mu23NoFiltersNoVtx_Photon23_CaloIdL_v5',
'HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu23_TrkIsoVVL_Ele8_CaloIdL_TrackIdL_IsoVL_v4',
'HLT_Mu24_eta2p1_v4',
'HLT_Mu25_TkMu0_dEta18_Onia_v4',
'HLT_Mu27_Ele37_CaloIdL_GsfTrkIdVL_v3',
'HLT_Mu27_TkMu8_v3',
'HLT_Mu27_v4',
'HLT_Mu28NoFiltersNoVtxDisplaced_Photon28_CaloIdL_v5',
'HLT_Mu28NoFiltersNoVtx_CentralCaloJet40_v3',
'HLT_Mu28NoFiltersNoVtx_DisplacedJet40_Loose_v3',
'HLT_Mu300_v2',
'HLT_Mu30_Ele30_CaloIdL_GsfTrkIdVL_v5',
'HLT_Mu30_TkMu11_v3',
'HLT_Mu30_eta2p1_PFJet150_PFJet50_v2',
'HLT_Mu33NoFiltersNoVtxDisplaced_DisplacedJet50_Loose_v3',
'HLT_Mu33NoFiltersNoVtxDisplaced_DisplacedJet50_Tight_v3',
'HLT_Mu33NoFiltersNoVtxDisplaced_Photon33_CaloIdL_v5',
'HLT_Mu350_v2',
'HLT_Mu37_Ele27_CaloIdL_GsfTrkIdVL_v3',
'HLT_Mu38NoFiltersNoVtxDisplaced_DisplacedJet60_Loose_v3',
'HLT_Mu38NoFiltersNoVtxDisplaced_DisplacedJet60_Tight_v3',
'HLT_Mu38NoFiltersNoVtx_DisplacedJet60_Loose_v3',
'HLT_Mu38NoFiltersNoVtx_Photon38_CaloIdL_v5',
'HLT_Mu3_PFJet40_v3',
'HLT_Mu3er_PFHT140_PFMET125_v3',
'HLT_Mu40_TkMu11_v3',
'HLT_Mu40_eta2p1_PFJet200_PFJet50_v4',
'HLT_Mu42NoFiltersNoVtx_Photon42_CaloIdL_v5',
'HLT_Mu45_eta2p1_v4',
'HLT_Mu50_IsoVVVL_PFHT400_v2',
'HLT_Mu50_v4',
'HLT_Mu55_v3',
'HLT_Mu6_PFHT200_PFMET100_v2',
'HLT_Mu6_PFHT200_PFMET80_BTagCSV_p067_v2',
'HLT_Mu7p5_L2Mu2_Jpsi_v3',
'HLT_Mu7p5_L2Mu2_Upsilon_v3',
'HLT_Mu7p5_Track2_Jpsi_v3',
'HLT_Mu7p5_Track2_Upsilon_v3',
'HLT_Mu7p5_Track3p5_Jpsi_v3',
'HLT_Mu7p5_Track3p5_Upsilon_v3',
'HLT_Mu7p5_Track7_Jpsi_v3',
'HLT_Mu7p5_Track7_Upsilon_v3',
'HLT_Mu8_DiEle12_CaloIdL_TrackIdL_v6',
'HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT250_v4',
'HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT300_v7',
'HLT_Mu8_TrkIsoVVL_DiPFJet40_DEta3p5_MJJ750_HTT300_PFMETNoMu60_v1',
'HLT_Mu8_TrkIsoVVL_Ele17_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_v6',
'HLT_Mu8_TrkIsoVVL_v4',
'HLT_Mu8_v4',
'HLT_PFHT125_v2',
'HLT_PFHT200_DiPFJetAve90_PFAlphaT0p57_v4',
'HLT_PFHT200_DiPFJetAve90_PFAlphaT0p63_v4',
'HLT_PFHT200_PFAlphaT0p51_v4',
'HLT_PFHT200_v3',
'HLT_PFHT250_DiPFJetAve90_PFAlphaT0p55_v4',
'HLT_PFHT250_DiPFJetAve90_PFAlphaT0p58_v4',
'HLT_PFHT250_v3',
'HLT_PFHT300_DiPFJetAve90_PFAlphaT0p53_v4',
'HLT_PFHT300_DiPFJetAve90_PFAlphaT0p54_v4',
'HLT_PFHT300_PFMET100_v3',
'HLT_PFHT300_PFMET110_v3',
'HLT_PFHT300_v4',
'HLT_PFHT350_DiPFJetAve90_PFAlphaT0p52_v4',
'HLT_PFHT350_DiPFJetAve90_PFAlphaT0p53_v4',
'HLT_PFHT350_v5',
'HLT_PFHT400_DiPFJetAve90_PFAlphaT0p51_v4',
'HLT_PFHT400_DiPFJetAve90_PFAlphaT0p52_v4',
'HLT_PFHT400_SixJet30_DoubleBTagCSV_p056_v2',
'HLT_PFHT400_SixJet30_v4',
'HLT_PFHT400_v4',
'HLT_PFHT450_SixJet40_BTagCSV_p056_v2',
'HLT_PFHT450_SixJet40_v4',
'HLT_PFHT475_v4',
'HLT_PFHT550_4JetPt50_v3',
'HLT_PFHT600_v5',
'HLT_PFHT650_4JetPt50_v3',
'HLT_PFHT650_WideJetMJJ900DEtaJJ1p5_v5',
'HLT_PFHT650_WideJetMJJ950DEtaJJ1p5_v5',
'HLT_PFHT650_v5',
'HLT_PFHT750_4JetPt50_v5',
'HLT_PFHT800_v4',
'HLT_PFHT900_v3',
'HLT_PFJet140_v5',
'HLT_PFJet15_NoCaloMatched_v4',
'HLT_PFJet200_v5',
'HLT_PFJet25_NoCaloMatched_v2',
'HLT_PFJet260_v5',
'HLT_PFJet320_v5',
'HLT_PFJet400_v5',
'HLT_PFJet40_v6',
'HLT_PFJet450_v5',
'HLT_PFJet500_v5',
'HLT_PFJet60_v6',
'HLT_PFJet80_v5',
'HLT_PFMET100_PFMHT100_IDTight_v4',
'HLT_PFMET110_PFMHT110_IDTight_v4',
'HLT_PFMET120_BTagCSV_p067_v3',
'HLT_PFMET120_Mu5_v3',
'HLT_PFMET120_PFMHT120_IDTight_v4',
'HLT_PFMET170_BeamHaloCleaned_v2',
'HLT_PFMET170_HBHECleaned_v4',
'HLT_PFMET170_NotCleaned_v3',
'HLT_PFMET300_v3',
'HLT_PFMET400_v3',
'HLT_PFMET500_v3',
'HLT_PFMET600_v3',
'HLT_PFMET90_PFMHT90_IDTight_v4',
'HLT_PFMETNoMu100_PFMHTNoMu100_IDTight_v4',
'HLT_PFMETNoMu110_PFMHTNoMu110_IDTight_v4',
'HLT_PFMETNoMu120_PFMHTNoMu120_IDTight_v4',
'HLT_PFMETNoMu90_PFMHTNoMu90_IDTight_v4',
'HLT_PFTau120_eta2p1_v2',
'HLT_Photon120_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon120_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon120_R9Id90_HE10_IsoM_v6',
'HLT_Photon120_v6',
'HLT_Photon135_PFMET100_v4',
'HLT_Photon165_HE10_v6',
'HLT_Photon165_R9Id90_HE10_IsoM_v6',
'HLT_Photon175_v6',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon22_R9Id90_HE10_IsoM_v5',
'HLT_Photon22_v5',
'HLT_Photon250_NoHE_v5',
'HLT_Photon26_R9Id85_OR_CaloId24b40e_Iso50T80L_Photon16_AND_HE10_R9Id65_Eta2_Mass60_v5',
'HLT_Photon300_NoHE_v5',
'HLT_Photon30_R9Id90_HE10_IsoM_v6',
'HLT_Photon30_v6',
'HLT_Photon36_R9Id85_OR_CaloId24b40e_Iso50T80L_Photon22_AND_HE10_R9Id65_Eta2_Mass15_v5',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon36_R9Id90_HE10_IsoM_v6',
'HLT_Photon36_v6',
'HLT_Photon42_R9Id85_OR_CaloId24b40e_Iso50T80L_Photon25_AND_HE10_R9Id65_Eta2_Mass15_v5',
'HLT_Photon500_v4',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon50_R9Id90_HE10_IsoM_v6',
'HLT_Photon50_v6',
'HLT_Photon600_v4',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon75_R9Id90_HE10_IsoM_v6',
'HLT_Photon75_v6',
'HLT_Photon90_CaloIdL_PFHT500_v6',
'HLT_Photon90_CaloIdL_PFHT600_v5',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon90_R9Id90_HE10_IsoM_v6',
'HLT_Photon90_v6',
'HLT_Physics_v4',
'HLT_PixelTracks_Multiplicity110ForEndOfFill_v3',
'HLT_PixelTracks_Multiplicity135ForEndOfFill_v3',
'HLT_PixelTracks_Multiplicity160ForEndOfFill_v3',
'HLT_PixelTracks_Multiplicity60ForEndOfFill_v2',
'HLT_PixelTracks_Multiplicity85ForEndOfFill_v2',
'HLT_QuadJet45_DoubleBTagCSV_p087_v3',
'HLT_QuadJet45_TripleBTagCSV_p087_v3',
'HLT_QuadMuon0_Dimuon0_Jpsi_v3',
'HLT_QuadMuon0_Dimuon0_Upsilon_v3',
'HLT_QuadPFJet_BTagCSV_p016_VBF_Mqq460_v2',
'HLT_QuadPFJet_BTagCSV_p016_VBF_Mqq500_v2',
'HLT_QuadPFJet_BTagCSV_p016_p11_VBF_Mqq200_v2',
'HLT_QuadPFJet_BTagCSV_p016_p11_VBF_Mqq240_v2',
'HLT_QuadPFJet_VBF_v5',
'HLT_Random_v2',
'HLT_Rsq0p02_MR300_TriPFJet80_60_40_BTagCSV_p063_p20_Mbb60_200_v2',
'HLT_Rsq0p02_MR300_TriPFJet80_60_40_DoubleBTagCSV_p063_Mbb60_200_v2',
'HLT_Rsq0p25_v3',
'HLT_Rsq0p30_v3',
'HLT_RsqMR240_Rsq0p09_MR200_4jet_v3',
'HLT_RsqMR240_Rsq0p09_MR200_v3',
'HLT_RsqMR270_Rsq0p09_MR200_4jet_v3',
'HLT_RsqMR270_Rsq0p09_MR200_v3',
'HLT_SingleCentralPFJet170_CFMax0p1_v2',
'HLT_TkMu20_v4',
'HLT_TkMu24_eta2p1_v5',
'HLT_TkMu27_v5',
'HLT_TkMu50_v3',
'HLT_TripleMu_12_10_5_v3',
'HLT_TripleMu_5_3_3_v1',
'HLT_TrkMu15_DoubleTrkMu5NoFiltersNoVtx_v4',
'HLT_TrkMu17_DoubleTrkMu8NoFiltersNoVtx_v4',
'HLT_VBF_DisplacedJet40_DisplacedTrack_2TrackIP2DSig5_v3',
'HLT_VBF_DisplacedJet40_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_Hadronic_2PromptTrack_v3',
'HLT_VBF_DisplacedJet40_Hadronic_v3',
'HLT_VBF_DisplacedJet40_TightID_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_TightID_Hadronic_v3',
'HLT_VBF_DisplacedJet40_VTightID_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_VTightID_Hadronic_v3',
'HLT_VBF_DisplacedJet40_VVTightID_DisplacedTrack_v3',
'HLT_VBF_DisplacedJet40_VVTightID_Hadronic_v3',
'HLT_VLooseIsoPFTau120_Trk50_eta2p1_v2',
'HLT_VLooseIsoPFTau140_Trk50_eta2p1_v2',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_TCDS_v1',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v2',
'HLT_ZeroBias_IsolatedBunches_v3',
'HLT_ZeroBias_v4' ) ),
ParkingHT410to430 = cms.vstring('HLT_HT410to430_v3'),
ParkingHT430to450 = cms.vstring('HLT_HT430to450_v3'),
ParkingHT450to470 = cms.vstring('HLT_HT450to470_v3'),
ParkingHT470to500 = cms.vstring('HLT_HT470to500_v3'),
ParkingHT500to550 = cms.vstring('HLT_HT500to550_v3'),
ParkingHT550to650 = cms.vstring('HLT_HT550to650_v3'),
ParkingHT650 = cms.vstring('HLT_HT650_v4'),
ParkingScoutingMonitor = cms.vstring('DST_CaloJet40_BTagScouting_v4',
'DST_CaloJet40_CaloBTagScouting_v3',
'DST_CaloJet40_CaloScouting_PFScouting_v4',
'DST_DoubleMu3_Mass10_BTagScouting_v4',
'DST_DoubleMu3_Mass10_CaloScouting_PFScouting_v3',
'DST_HT250_CaloBTagScouting_v2',
'DST_HT250_CaloScouting_v4',
'DST_HT410_BTagScouting_v4',
'DST_HT410_PFScouting_v4',
'DST_HT450_BTagScouting_v4',
'DST_HT450_PFScouting_v4',
'DST_L1DoubleMu_BTagScouting_v4',
'DST_L1DoubleMu_CaloScouting_PFScouting_v3',
'DST_L1HTT_BTagScouting_v4',
'DST_L1HTT_CaloBTagScouting_v3',
'DST_L1HTT_CaloScouting_PFScouting_v4',
'DST_ZeroBias_BTagScouting_v4',
'DST_ZeroBias_CaloScouting_PFScouting_v3',
'HLT_HT410to430_v3',
'HLT_HT430to450_v3',
'HLT_HT450to470_v3',
'HLT_HT470to500_v3',
'HLT_HT500to550_v3',
'HLT_HT550to650_v3',
'HLT_HT650_v4'),
RPCMonitor = cms.vstring('AlCa_RPCMuonNoHits_v3',
'AlCa_RPCMuonNoTriggers_v3',
'AlCa_RPCMuonNormalisation_v3'),
ScoutingCaloCommissioning = cms.vstring('DST_CaloJet40_CaloBTagScouting_v3',
'DST_CaloJet40_CaloScouting_PFScouting_v4',
'DST_L1HTT_CaloBTagScouting_v3',
'DST_L1HTT_CaloScouting_PFScouting_v4'),
ScoutingCaloHT = cms.vstring('DST_HT250_CaloBTagScouting_v2',
'DST_HT250_CaloScouting_v4'),
ScoutingPFCommissioning = cms.vstring('DST_CaloJet40_BTagScouting_v4',
'DST_CaloJet40_CaloScouting_PFScouting_v4',
'DST_L1DoubleMu_BTagScouting_v4',
'DST_L1DoubleMu_CaloScouting_PFScouting_v3',
'DST_L1HTT_BTagScouting_v4',
'DST_L1HTT_CaloScouting_PFScouting_v4',
'DST_ZeroBias_BTagScouting_v4',
'DST_ZeroBias_CaloScouting_PFScouting_v3'),
ScoutingPFHT = cms.vstring('DST_HT410_BTagScouting_v4',
'DST_HT410_PFScouting_v4',
'DST_HT450_BTagScouting_v4',
'DST_HT450_PFScouting_v4'),
ScoutingPFMuons = cms.vstring('DST_DoubleMu3_Mass10_BTagScouting_v4',
'DST_DoubleMu3_Mass10_CaloScouting_PFScouting_v3'),
SingleElectron = cms.vstring('HLT_Ele105_CaloIdVT_GsfTrkIdT_v6',
'HLT_Ele115_CaloIdVT_GsfTrkIdT_v5',
'HLT_Ele15_IsoVVVL_BTagCSV_p067_PFHT400_v4',
'HLT_Ele15_IsoVVVL_PFHT350_PFMET50_v5',
'HLT_Ele15_IsoVVVL_PFHT350_v5',
'HLT_Ele15_IsoVVVL_PFHT400_PFMET50_v3',
'HLT_Ele15_IsoVVVL_PFHT400_v3',
'HLT_Ele15_IsoVVVL_PFHT600_v6',
'HLT_Ele22_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v5',
'HLT_Ele22_eta2p1_WPLoose_Gsf_v6',
'HLT_Ele23_WPLoose_Gsf_WHbbBoost_v5',
'HLT_Ele23_WPLoose_Gsf_v6',
'HLT_Ele24_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v4',
'HLT_Ele24_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_v4',
'HLT_Ele24_eta2p1_WPLoose_Gsf_v4',
'HLT_Ele250_CaloIdVT_GsfTrkIdT_v4',
'HLT_Ele25_WPTight_Gsf_v4',
'HLT_Ele25_eta2p1_WPLoose_Gsf_v4',
'HLT_Ele25_eta2p1_WPTight_Gsf_v4',
'HLT_Ele27_WPLoose_Gsf_WHbbBoost_v5',
'HLT_Ele27_WPLoose_Gsf_v4',
'HLT_Ele27_WPTight_Gsf_L1JetTauSeeded_v1',
'HLT_Ele27_WPTight_Gsf_v4',
'HLT_Ele27_eta2p1_WPLoose_Gsf_DoubleMediumIsoPFTau32_Trk1_eta2p1_Reg_v4',
'HLT_Ele27_eta2p1_WPLoose_Gsf_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v5',
'HLT_Ele27_eta2p1_WPLoose_Gsf_DoubleMediumIsoPFTau40_Trk1_eta2p1_Reg_v6',
'HLT_Ele27_eta2p1_WPLoose_Gsf_HT200_v5',
'HLT_Ele27_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v4',
'HLT_Ele27_eta2p1_WPLoose_Gsf_v5',
'HLT_Ele27_eta2p1_WPTight_Gsf_v5',
'HLT_Ele300_CaloIdVT_GsfTrkIdT_v4',
'HLT_Ele32_eta2p1_WPLoose_Gsf_LooseIsoPFTau20_SingleL1_v4',
'HLT_Ele32_eta2p1_WPTight_Gsf_v5',
'HLT_Ele35_CaloIdVT_GsfTrkIdT_PFJet150_PFJet50_v4',
'HLT_Ele35_WPLoose_Gsf_v4',
'HLT_Ele45_CaloIdVT_GsfTrkIdT_PFJet200_PFJet50_v6',
'HLT_Ele45_WPLoose_Gsf_L1JetTauSeeded_v1',
'HLT_Ele45_WPLoose_Gsf_v4',
'HLT_Ele50_CaloIdVT_GsfTrkIdT_PFJet140_v4',
'HLT_Ele50_CaloIdVT_GsfTrkIdT_PFJet165_v4',
'HLT_Ele50_IsoVVVL_PFHT400_v3'),
SingleMuon = cms.vstring('HLT_DoubleIsoMu17_eta2p1_noDzCut_v2',
'HLT_DoubleIsoMu17_eta2p1_v4',
'HLT_IsoMu16_eta2p1_MET30_LooseIsoPFTau50_Trk30_eta2p1_v2',
'HLT_IsoMu16_eta2p1_MET30_v2',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_SingleL1_v5',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_v5',
'HLT_IsoMu18_v3',
'HLT_IsoMu19_eta2p1_LooseIsoPFTau20_SingleL1_v2',
'HLT_IsoMu19_eta2p1_LooseIsoPFTau20_v2',
'HLT_IsoMu19_eta2p1_MediumIsoPFTau32_Trk1_eta2p1_Reg_v2',
'HLT_IsoMu20_v4',
'HLT_IsoMu21_eta2p1_LooseIsoPFTau20_SingleL1_v2',
'HLT_IsoMu21_eta2p1_MediumIsoPFTau32_Trk1_eta2p1_Reg_v2',
'HLT_IsoMu22_eta2p1_v2',
'HLT_IsoMu22_v3',
'HLT_IsoMu24_v2',
'HLT_IsoMu27_v5',
'HLT_IsoTkMu18_v4',
'HLT_IsoTkMu20_v6',
'HLT_IsoTkMu22_eta2p1_v3',
'HLT_IsoTkMu22_v4',
'HLT_IsoTkMu24_v3',
'HLT_IsoTkMu27_v6',
'HLT_L1SingleMu18_v1',
'HLT_L1SingleMuOpen_v3',
'HLT_L2Mu10_v2',
'HLT_Mu10_TrkIsoVVL_DiPFJet40_DEta3p5_MJJ750_HTT350_PFMETNoMu60_v2',
'HLT_Mu15_IsoVVVL_BTagCSV_p067_PFHT400_v3',
'HLT_Mu15_IsoVVVL_PFHT350_PFMET50_v4',
'HLT_Mu15_IsoVVVL_PFHT350_v4',
'HLT_Mu15_IsoVVVL_PFHT400_PFMET50_v2',
'HLT_Mu15_IsoVVVL_PFHT400_v2',
'HLT_Mu15_IsoVVVL_PFHT600_v5',
'HLT_Mu16_eta2p1_MET30_v2',
'HLT_Mu20_v3',
'HLT_Mu24_eta2p1_v4',
'HLT_Mu27_v4',
'HLT_Mu28NoFiltersNoVtx_CentralCaloJet40_v3',
'HLT_Mu28NoFiltersNoVtx_DisplacedJet40_Loose_v3',
'HLT_Mu300_v2',
'HLT_Mu30_eta2p1_PFJet150_PFJet50_v2',
'HLT_Mu33NoFiltersNoVtxDisplaced_DisplacedJet50_Loose_v3',
'HLT_Mu33NoFiltersNoVtxDisplaced_DisplacedJet50_Tight_v3',
'HLT_Mu350_v2',
'HLT_Mu38NoFiltersNoVtxDisplaced_DisplacedJet60_Loose_v3',
'HLT_Mu38NoFiltersNoVtxDisplaced_DisplacedJet60_Tight_v3',
'HLT_Mu38NoFiltersNoVtx_DisplacedJet60_Loose_v3',
'HLT_Mu40_eta2p1_PFJet200_PFJet50_v4',
'HLT_Mu45_eta2p1_v4',
'HLT_Mu50_IsoVVVL_PFHT400_v2',
'HLT_Mu50_v4',
'HLT_Mu55_v3',
'HLT_Mu8_TrkIsoVVL_DiPFJet40_DEta3p5_MJJ750_HTT300_PFMETNoMu60_v1',
'HLT_TkMu20_v4',
'HLT_TkMu24_eta2p1_v5',
'HLT_TkMu27_v5',
'HLT_TkMu50_v3'),
SinglePhoton = cms.vstring('HLT_Photon120_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon120_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon120_R9Id90_HE10_IsoM_v6',
'HLT_Photon120_v6',
'HLT_Photon135_PFMET100_v4',
'HLT_Photon165_HE10_v6',
'HLT_Photon165_R9Id90_HE10_IsoM_v6',
'HLT_Photon175_v6',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon22_R9Id90_HE10_IsoM_v5',
'HLT_Photon22_v5',
'HLT_Photon250_NoHE_v5',
'HLT_Photon300_NoHE_v5',
'HLT_Photon30_R9Id90_HE10_IsoM_v6',
'HLT_Photon30_v6',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon36_R9Id90_HE10_IsoM_v6',
'HLT_Photon36_v6',
'HLT_Photon500_v4',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon50_R9Id90_HE10_IsoM_v6',
'HLT_Photon50_v6',
'HLT_Photon600_v4',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon75_R9Id90_HE10_IsoM_v6',
'HLT_Photon75_v6',
'HLT_Photon90_CaloIdL_PFHT500_v6',
'HLT_Photon90_CaloIdL_PFHT600_v5',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_PFMET40_v6',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_VBF_v5',
'HLT_Photon90_R9Id90_HE10_IsoM_v6',
'HLT_Photon90_v6'),
Tau = cms.vstring('HLT_DoubleMediumIsoPFTau32_Trk1_eta2p1_Reg_v2',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v3',
'HLT_DoubleMediumIsoPFTau40_Trk1_eta2p1_Reg_v5',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET110_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET120_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET80_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_MET90_v2',
'HLT_LooseIsoPFTau50_Trk30_eta2p1_v4',
'HLT_PFTau120_eta2p1_v2',
'HLT_VLooseIsoPFTau120_Trk50_eta2p1_v2',
'HLT_VLooseIsoPFTau140_Trk50_eta2p1_v2'),
TestEnablesEcalHcal = cms.vstring('HLT_EcalCalibration_v3',
'HLT_HcalCalibration_v2'),
TestEnablesEcalHcalDQM = cms.vstring('HLT_EcalCalibration_v3',
'HLT_HcalCalibration_v2'),
ZeroBias = cms.vstring('HLT_Random_v2',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_TCDS_v1',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v2',
'HLT_ZeroBias_IsolatedBunches_v3',
'HLT_ZeroBias_v4')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound'),
numberOfStreams = cms.untracked.uint32(0),
numberOfThreads = cms.untracked.uint32(4),
sizeOfStackForThreadsInKB = cms.untracked.uint32(10240),
wantSummary = cms.untracked.bool(True)
)
process.streams = cms.PSet(
ALCAELECTRON = cms.vstring('AlCaElectron'),
ALCALUMIPIXELS = cms.vstring('AlCaLumiPixels'),
ALCAP0 = cms.vstring('AlCaP0'),
ALCAPHISYM = cms.vstring('AlCaPhiSym'),
Calibration = cms.vstring('TestEnablesEcalHcal'),
DQM = cms.vstring('OnlineMonitor'),
DQMCalibration = cms.vstring('TestEnablesEcalHcalDQM'),
DQMEventDisplay = cms.vstring('EventDisplay'),
EcalCalibration = cms.vstring('EcalLaser'),
Express = cms.vstring('ExpressPhysics'),
HLTMonitor = cms.vstring('HLTMonitor'),
NanoDST = cms.vstring('L1Accept'),
Parking = cms.vstring('ParkingHT410to430',
'ParkingHT430to450',
'ParkingHT450to470',
'ParkingHT470to500',
'ParkingHT500to550',
'ParkingHT550to650',
'ParkingHT650'),
ParkingHLTPhysics = cms.vstring('HLTPhysics0',
'HLTPhysics1',
'HLTPhysics2',
'HLTPhysics3'),
PhysicsCommissioning = cms.vstring('Commissioning',
'HLTPhysics',
'HcalHPDNoise',
'HcalNZS',
'MonteCarlo',
'NoBPTX',
'ZeroBias'),
PhysicsEGamma = cms.vstring('DoubleEG',
'SingleElectron',
'SinglePhoton'),
PhysicsEndOfFill = cms.vstring('EmptyBX',
'FSQJets',
'HINCaloJets',
'HINPFJets',
'HINPhoton',
'HighMultiplicity85EOF',
'HighMultiplicityEOF',
'L1MinimumBias'),
PhysicsHadronsTaus = cms.vstring('BTagCSV',
'BTagMu',
'DisplacedJet',
'HTMHT',
'JetHT',
'MET',
'Tau'),
PhysicsMuons = cms.vstring('Charmonium',
'DoubleMuon',
'DoubleMuonLowMass',
'MuOnia',
'MuonEG',
| |
<https://arxiv.org/abs/1303.0518>.
Only implemented for single-dimensional output.
.. testsetup::
import numpy as np
from sklearn.linear_model import lasso_path
Parameters
----------
alpha : string | float, optional, default 'auto'.
Constant that multiplies the L1 term. Defaults to 'auto'.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`.LinearRegression` object.
n_alphas : int, optional, default 100
How many alphas to try if alpha='auto'
alpha_cov : string | float, optional, default 'auto'
The regularization alpha that is used when constructing the pseudo inverse of
the covariance matrix Theta used to for correcting the lasso coefficient. Each
such regression corresponds to the regression of one feature on the remainder
of the features.
n_alphas_cov : int, optional, default 10
How many alpha_cov to try if alpha_cov='auto'.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
precompute : True | False | array-like, default False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
n_jobs : int or None, default None
How many jobs to use whenever parallelism is invoked
Attributes
----------
coef_ : array, shape (n_features,)
Parameter vector (w in the cost function formula).
intercept_ : float
Independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
selected_alpha_ : float
Penalty chosen through cross-validation, if alpha='auto'.
coef_stderr_ : array, shape (n_features,)
Estimated standard errors for coefficients (see ``coef_`` attribute).
intercept_stderr_ : float
Estimated standard error intercept (see ``intercept_`` attribute).
"""
def __init__(self, alpha='auto', n_alphas=100, alpha_cov='auto', n_alphas_cov=10,
fit_intercept=True, precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False,
random_state=None, selection='cyclic', n_jobs=None):
self.n_jobs = n_jobs
self.n_alphas = n_alphas
self.alpha_cov = alpha_cov
self.n_alphas_cov = n_alphas_cov
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=False, random_state=random_state,
selection=selection)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit debiased lasso model.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Input data.
y : array, shape (n_samples,)
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
"""
self.selected_alpha_ = None
if self.alpha == 'auto':
# Select optimal penalty
self.alpha = self._get_optimal_alpha(X, y, sample_weight)
self.selected_alpha_ = self.alpha
else:
# Warn about consistency
warnings.warn("Setting a suboptimal alpha can lead to miscalibrated confidence intervals. "
"We recommend setting alpha='auto' for optimality.")
# Convert X, y into numpy arrays
X, y = check_X_y(X, y, y_numeric=True, multi_output=False)
# Fit weighted lasso with user input
super().fit(X, y, sample_weight, check_input)
# Center X, y
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=False,
copy=self.copy_X, check_input=check_input, sample_weight=sample_weight, return_mean=True)
# Calculate quantities that will be used later on. Account for centered data
y_pred = self.predict(X) - self.intercept_
self._theta_hat = self._get_theta_hat(X, sample_weight)
self._X_offset = X_offset
# Calculate coefficient and error variance
num_nonzero_coefs = np.count_nonzero(self.coef_)
self._error_variance = np.average((y - y_pred)**2, weights=sample_weight) / \
(1 - num_nonzero_coefs / X.shape[0])
self._mean_error_variance = self._error_variance / X.shape[0]
self._coef_variance = self._get_unscaled_coef_var(
X, self._theta_hat, sample_weight) * self._error_variance
# Add coefficient correction
coef_correction = self._get_coef_correction(
X, y, y_pred, sample_weight, self._theta_hat)
self.coef_ += coef_correction
# Set coefficients and intercept standard errors
self.coef_stderr_ = np.sqrt(np.diag(self._coef_variance))
if self.fit_intercept:
self.intercept_stderr_ = np.sqrt(
self._X_offset @ self._coef_variance @ self._X_offset +
self._mean_error_variance
)
else:
self.intercept_stderr_ = 0
# Set intercept
self._set_intercept(X_offset, y_offset, X_scale)
# Return alpha to 'auto' state
if self.selected_alpha_ is not None:
self.alpha = 'auto'
return self
def prediction_stderr(self, X):
"""Get the standard error of the predictions using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
Returns
-------
prediction_stderr : array like, shape (n_samples, )
The standard error of each coordinate of the output at each point we predict
"""
# Note that in the case of no intercept, X_offset is 0
if self.fit_intercept:
X = X - self._X_offset
# Calculate the variance of the predictions
var_pred = np.sum(np.matmul(X, self._coef_variance) * X, axis=1)
if self.fit_intercept:
var_pred += self._mean_error_variance
pred_stderr = np.sqrt(var_pred)
return pred_stderr
def predict_interval(self, X, alpha=0.1):
"""Build prediction confidence intervals using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
alpha: optional float in [0, 1] (Default=0.1)
The overall level of confidence of the reported interval.
The alpha/2, 1-alpha/2 confidence interval is reported.
Returns
-------
(y_lower, y_upper) : tuple of arrays, shape (n_samples, )
Returns lower and upper interval endpoints.
"""
lower = alpha / 2
upper = 1 - alpha / 2
y_pred = self.predict(X)
# Calculate prediction confidence intervals
sd_pred = self.prediction_stderr(X)
y_lower = y_pred + \
np.apply_along_axis(lambda s: norm.ppf(
lower, scale=s), 0, sd_pred)
y_upper = y_pred + \
np.apply_along_axis(lambda s: norm.ppf(
upper, scale=s), 0, sd_pred)
return y_lower, y_upper
def coef__interval(self, alpha=0.1):
"""Get a confidence interval bounding the fitted coefficients.
Parameters
----------
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(coef_lower, coef_upper) : tuple of arrays, shape (n_coefs, )
Returns lower and upper interval endpoints for the coefficients.
"""
lower = alpha / 2
upper = 1 - alpha / 2
return self.coef_ + np.apply_along_axis(lambda s: norm.ppf(lower, scale=s), 0, self.coef_stderr_), \
self.coef_ + np.apply_along_axis(lambda s: norm.ppf(upper, scale=s), 0, self.coef_stderr_)
def intercept__interval(self, alpha=0.1):
"""Get a confidence interval bounding the fitted intercept.
Parameters
----------
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(intercept_lower, intercept_upper) : tuple floats
Returns lower and upper interval endpoints for the intercept.
"""
lower = alpha / 2
upper = 1 - alpha / 2
if self.fit_intercept:
return self.intercept_ + norm.ppf(lower, scale=self.intercept_stderr_), self.intercept_ + \
norm.ppf(upper, scale=self.intercept_stderr_),
else:
return 0.0, 0.0
def _get_coef_correction(self, X, y, y_pred, sample_weight, theta_hat):
# Assumes flattened y
n_samples, _ = X.shape
y_res = np.ndarray.flatten(y) - y_pred
# Compute weighted residuals
if sample_weight is not None:
y_res_scaled = y_res * sample_weight / np.sum(sample_weight)
else:
y_res_scaled = y_res / n_samples
delta_coef = np.matmul(
theta_hat, np.matmul(X.T, y_res_scaled))
return delta_coef
def _get_optimal_alpha(self, X, y, sample_weight):
# To be done once per target. Assumes y can be flattened.
cv_estimator = WeightedLassoCV(cv=5, n_alphas=self.n_alphas, fit_intercept=self.fit_intercept,
precompute=self.precompute, copy_X=True,
max_iter=self.max_iter, tol=self.tol,
random_state=self.random_state,
selection=self.selection,
n_jobs=self.n_jobs)
cv_estimator.fit(X, y.flatten(), | |
"""Classes for migrating repos"""
import os
import re
import dateutil.parser
import datetime
from itertools import chain
import typing
import logging
from typing import (
List,
Any,
Optional,
Sequence,
Set,
)
import networkx as nx
from conda_forge_tick.path_lengths import cyclic_topological_sort
from conda_forge_tick.utils import (
frozen_to_json_friendly,
LazyJson,
)
from conda_forge_tick.make_graph import make_outputs_lut_from_graph
from conda_forge_tick.contexts import MigratorContext, FeedstockContext
if typing.TYPE_CHECKING:
from ..migrators_types import (
AttrsTypedDict,
MigrationUidTypedDict,
PackageName,
)
from conda_forge_tick.utils import JsonFriendly
LOGGER = logging.getLogger("conda_forge_tick.migrators.core")
def _sanitized_muids(pred: List[dict]) -> List["JsonFriendly"]:
lst = []
for pr in pred:
d: "JsonFriendly" = {"data": pr["data"], "keys": pr["keys"]}
lst.append(d)
return lst
def _parse_bad_attr(attrs: "AttrsTypedDict", not_bad_str_start: str) -> bool:
"""Overlook some bad entries"""
bad = attrs.get("bad", False)
if isinstance(bad, str):
return not bad.startswith(not_bad_str_start)
else:
return bad
def _gen_active_feedstocks_payloads(nodes, gx):
for node in nodes:
try:
payload = gx.nodes[node]["payload"]
except KeyError as e:
print(node)
raise e
# we don't need to look at archived feedstocks
# they are always "migrated"
if payload.get("archived", False):
continue
else:
yield node, payload
class MiniMigrator:
post_migration = False
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
"""If true don't act upon node
Parameters
----------
attrs : dict
The node attributes
Returns
-------
bool :
True if node is to be skipped
"""
return True
def migrate(self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any) -> None:
"""Perform the migration, updating the ``meta.yaml``
Parameters
----------
recipe_dir : str
The directory of the recipe
attrs : dict
The node attributes
Returns
-------
namedtuple or bool:
If namedtuple continue with PR, if False scrap local folder
"""
return
class Migrator:
"""Base class for Migrators"""
rerender = True
# bump this if the migrator object needs a change mid migration
migrator_version = 0
allow_empty_commits = False
build_patterns = (
(re.compile(r"(\s*?)number:\s*([0-9]+)"), "number: {}"),
(
re.compile(r'(\s*?){%\s*set build_number\s*=\s*"?([0-9]+)"?\s*%}'),
"{{% set build_number = {} %}}",
),
(
re.compile(r'(\s*?){%\s*set build\s*=\s*"?([0-9]+)"?\s*%}'),
"{{% set build = {} %}}",
),
)
def __init__(
self,
pr_limit: int = 0,
# TODO: Validate this?
obj_version: Optional[int] = None,
piggy_back_migrations: Optional[Sequence[MiniMigrator]] = None,
check_solvable=True,
):
self.piggy_back_migrations = piggy_back_migrations or []
self.pr_limit = pr_limit
self.obj_version = obj_version
self.ctx: MigratorContext = None
self.check_solvable = check_solvable
def bind_to_ctx(self, migrator_ctx: MigratorContext) -> None:
self.ctx = migrator_ctx
def downstream_children(
self,
feedstock_ctx: FeedstockContext,
limit: int = 5,
) -> List["PackageName"]:
"""Utility method for getting a list of follow on packages"""
return [
a[1]
for a in list(
self.ctx.effective_graph.out_edges(feedstock_ctx.package_name),
)
][:limit]
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
"""If true don't act upon node
Parameters
----------
attrs : dict
The node attributes
not_bad_str_start : str, optional
If the 'bad' notice starts with the string then it is not
to be excluded. For example, rebuild migrations don't need
to worry about if the upstream can be fetched. Defaults to ``''``
Returns
-------
bool :
True if node is to be skipped
"""
# never run on archived feedstocks
# don't run on things we've already done
# don't run on bad nodes
__name = attrs.get("name", "")
def parse_already_pred() -> bool:
pr_data = frozen_to_json_friendly(self.migrator_uid(attrs))
migrator_uid: "MigrationUidTypedDict" = typing.cast(
"MigrationUidTypedDict",
pr_data["data"],
)
already_migrated_uids: typing.Iterable["MigrationUidTypedDict"] = list(
z["data"] for z in attrs.get("PRed", [])
)
already_pred = migrator_uid in already_migrated_uids
if already_pred:
ind = already_migrated_uids.index(migrator_uid)
LOGGER.debug(f"{__name}: already PRed: uid: {migrator_uid}")
if "PR" in attrs.get("PRed", [])[ind]:
if isinstance(attrs.get("PRed", [])[ind]["PR"], LazyJson):
with attrs.get("PRed", [])[ind]["PR"] as mg_attrs:
LOGGER.debug(
"%s: already PRed: PR file: %s"
% (__name, mg_attrs.file_name),
)
html_url = mg_attrs.get("html_url", "no url")
LOGGER.debug(f"{__name}: already PRed: url: {html_url}")
return already_pred
if attrs.get("archived", False):
LOGGER.debug("%s: archived" % __name)
bad_attr = _parse_bad_attr(attrs, not_bad_str_start)
if bad_attr:
LOGGER.debug("%s: bad attr" % __name)
return attrs.get("archived", False) or parse_already_pred() or bad_attr
def get_possible_feedstock_branches(self, attrs: "AttrsTypedDict") -> List[str]:
"""Return the valid possible branches to which to apply this migration to
for the given attrs.
Parameters
----------
attrs : dict
The node attributes
Returns
-------
branches : list of str
List if valid branches for this migration.
"""
branches = ["main"]
try:
branches += (
attrs.get("conda-forge.yml", {})
.get("bot", {})
.get("abi_migration_branches", [])
)
except Exception:
LOGGER.exception(f"Invalid value for {attrs.get('conda-forge.yml', {})=}")
# make sure this is always a string
return [str(b) for b in branches]
def run_pre_piggyback_migrations(
self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any
) -> "MigrationUidTypedDict":
"""Perform any pre piggyback migrations, updating the feedstock.
Parameters
----------
recipe_dir : str
The directory of the recipe
attrs : dict
The node attributes
"""
for mini_migrator in self.piggy_back_migrations:
if mini_migrator.post_migration:
continue
if not mini_migrator.filter(attrs):
mini_migrator.migrate(recipe_dir, attrs, **kwargs)
def run_post_piggyback_migrations(
self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any
) -> "MigrationUidTypedDict":
"""Perform any post piggyback migrations, updating the feedstock.
Parameters
----------
recipe_dir : str
The directory of the recipe
attrs : dict
The node attributes
"""
for mini_migrator in self.piggy_back_migrations:
if not mini_migrator.post_migration:
continue
if not mini_migrator.filter(attrs):
mini_migrator.migrate(recipe_dir, attrs, **kwargs)
def migrate(
self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any
) -> "MigrationUidTypedDict":
"""Perform the migration, updating the ``meta.yaml``
Parameters
----------
recipe_dir : str
The directory of the recipe
attrs : dict
The node attributes
Returns
-------
namedtuple or bool:
If namedtuple continue with PR, if False scrap local folder
"""
return self.migrator_uid(attrs)
def pr_body(self, feedstock_ctx: FeedstockContext, add_label_text=True) -> str:
"""Create a PR message body
Returns
-------
body: str
The body of the PR message
:param feedstock_ctx:
"""
body = "{}\n\n"
if add_label_text:
body += (
"If this PR was opened in error or needs to be updated please add "
"the `bot-rerun` label to this PR. The bot will close this PR and "
"schedule another one. If you do not have permissions to add this "
"label, you can use the phrase "
"<code>@<space/>conda-forge-admin, please rerun bot</code> "
"in a PR comment to have the `conda-forge-admin` add it for you.\n\n"
)
body += (
"<sub>"
"This PR was created by the [regro-cf-autotick-bot](https://github.com/regro/cf-scripts). " # noqa
"The **regro-cf-autotick-bot** is a service to automatically "
"track the dependency graph, migrate packages, and "
"propose package version updates for conda-forge. "
"Feel free to drop us a line if there are any "
"[issues](https://github.com/regro/cf-scripts/issues)! "
+ f"This PR was generated by {self.ctx.session.circle_build_url}, please use this URL for debugging." # noqa
+ "</sub>"
)
return body
def commit_message(self, feedstock_ctx: FeedstockContext) -> str:
"""Create a commit message
:param feedstock_ctx:
"""
return f"migration: {self.__class__.__name__}"
def pr_title(self, feedstock_ctx: FeedstockContext) -> str:
"""Title for PR
:param feedstock_ctx:
"""
return "PR from Regro-cf-autotick-bot"
def remote_branch(self, feedstock_ctx: FeedstockContext) -> str:
"""Branch to use on local and remote
:param feedstock_context:
"""
return "bot-pr"
def migrator_uid(self, attrs: "AttrsTypedDict") -> "MigrationUidTypedDict":
"""Make a unique id for this migrator and node attrs
Parameters
----------
attrs
Node attrs
Returns
-------
nt: frozen_to_json_friendly
The unique id as a frozen_to_json_friendly (so it can be
used as keys in dicts)
"""
d: "MigrationUidTypedDict" = {
"migrator_name": self.__class__.__name__,
"migrator_version": self.migrator_version,
"bot_rerun": False,
}
# Carveout for old migrators w/o obj_versions
if self.obj_version:
d["migrator_object_version"] = self.obj_version
branch = attrs.get("branch", "main")
if branch != "main" and branch != "master":
d["branch"] = branch
return d
def order(
self,
graph: nx.DiGraph,
total_graph: nx.DiGraph,
) -> Sequence["PackageName"]:
"""Order to run migrations in
Parameters
----------
graph : nx.DiGraph
The graph of migratable PRs
Returns
-------
"""
top_level = {
node
for node in graph
if not list(graph.predecessors(node))
or list(graph.predecessors(node)) == [node]
}
return cyclic_topological_sort(graph, top_level)
def set_build_number(self, filename: str) -> None:
"""Bump the build number of the specified recipe.
Parameters
----------
filename : str
Path the the meta.yaml
"""
for p, n in self.build_patterns:
with open(filename) as f:
raw = f.read()
lines = raw.splitlines()
for i, line in enumerate(lines):
m = p.match(line)
if m is not None:
old_build_number = int(m.group(2))
new_build_number = self.new_build_number(old_build_number)
lines[i] = m.group(1) + n.format(new_build_number)
upd = "\n".join(lines) + "\n"
with open(filename, "w") as f:
f.write(upd)
def new_build_number(self, old_number: int) -> int:
"""Determine the new build number to use.
Parameters
----------
old_number : int
Old build number detected
Returns
-------
new_build_number
"""
increment = getattr(self, "bump_number", 1)
return old_number + increment
@classmethod
def migrator_label(cls) -> dict:
# This is the label that the bot will attach to a pr made by the bot
return {
"name": f"bot-{cls.__name__.lower()}",
"description": (cls.__doc__ or | |
<reponame>bwhmather/python-payment-terminal
from collections import OrderedDict
from .fields import (
BBSField, DelimitedField,
ConstantField, EnumField,
IntegerField, PriceField,
TextField, FormattedTextField,
DateTimeField,
)
import logging
log = logging.getLogger('payment_terminal')
class BBSMessageMeta(type):
def __new__(mcs, cls, bases, d):
fields = OrderedDict()
# inherit fields from first base class with `_fields` attribute
for base in bases:
if hasattr(base, '_fields'):
fields.update(base._fields)
break
# read fields from class body
for name, field in d.items():
if isinstance(field, BBSField):
fields[name] = field
d['_fields'] = fields
return type.__new__(mcs, cls, bases, d)
@classmethod
def __prepare__(mcs, cls, bases):
# the dictionary to use to store class attributes
# need to return OrderedDict rather than default dict as attribute
# order affects parsing
return OrderedDict()
class BBSMessageBase(object):
def __init__(self, **kwargs):
for name, field in self._fields.items():
if name in kwargs:
value = kwargs[name]
elif hasattr(field, 'default'):
value = field.default
else:
raise TypeError('missing required argument: %r' % name)
setattr(self, name, value)
def pack(self):
return b''.join(
field.pack(getattr(self, name))
for name, field in self._fields.items()
)
@classmethod
def unpack_fields(cls, data):
fields = OrderedDict()
offset = 0
for name, field in cls._fields.items():
fields[name], size = field.unpack(data[offset:])
offset += size
return fields
@classmethod
def unpack(cls, data):
return cls(**cls.unpack_fields(data))
def __repr__(self):
parts = [self.__class__.__name__]
parts += (
"%s=%r" % (name, getattr(self, name))
for name in self._fields
)
return "<%s>" % " ".join(parts)
class BBSMessage(BBSMessageBase, metaclass=BBSMessageMeta):
is_response = False
class DisplayTextMessage(BBSMessage):
type = ConstantField(b'\x41')
prompt_customer = EnumField({
b'\x31': True,
b'\x30': False,
}, default=True)
expects_input = EnumField({
b'\x31': True,
b'\x30': False,
}, default=False)
mode = ConstantField(b'\x30')
text = TextField()
def __init__(self, text, **kwargs):
# allow `text` to be passed in as a positional argument
super(DisplayTextMessage, self).__init__(text=text, **kwargs)
class PrintTextMessage(BBSMessage):
type = ConstantField(b'\x42')
sub_type = EnumField({b'\x20': 'formatted'})
media = EnumField({
b'\x20': 'print_on_receipt',
b'\x21': 'print_on_journal',
b'\x22': 'print_on_both',
}, default='print_on_both')
mode = EnumField({b'\x2a': 'normal_text'})
commands = FormattedTextField()
class ResetTimerMessage(BBSMessage):
type = ConstantField(b'\x43')
seconds = IntegerField(3)
def __init__(self, seconds, **kwargs):
# allow `seconds` to be passed in as a positional argument
super(ResetTimerMessage, self).__init__(seconds=seconds, **kwargs)
class LocalModeMessage(BBSMessage):
type = ConstantField(b'\x44')
result = EnumField({
# indicates transaction OK
b'\x20': 'success',
# indicates transaction/operation rejected
b'\x21': 'failure',
})
acc = EnumField({
# indicates standard update of accumulator
b'\x20': 'standard',
# indicates transaction is finalised as Offline transaction
b'\x22': 'offline',
# indicates no update of accumulator
b'\x30': 'none',
})
# 2 digit issuer number is indicating the card issuer. Used if the
# transaction was accepted. As long as the data is available, the data
# shall be sent regardless if transaction is rejected or accepted.
issuer_id = IntegerField(2)
# Variable field lenght, Max. 19 digit if present. The Primary Account
# Number from the card holder. The PAN shall not be sent if some parts of
# the card number is replaced with "*" in the printout. The PAN field is of
# restricted use, due to security regulations
pan = DelimitedField(TextField(19), optional=True, delimiter=b';')
# 14 byte numeric data. Timestamp in format YYYYMMDDHHMMSS. The timestamp
# shall be the same data as received from the Host to the terminal in the
# response message
timestamp = DelimitedField(DateTimeField(), delimiter=b';')
# Cardholder Verification Method
ver_method = DelimitedField(EnumField({
# transaction is PIN based, also to be used if reversal transaction
b'\x30': 'pin_based',
# transaction is signature based
b'\x31': 'signature_based',
# no CVM. Only amount is verified by cardholder
b'\x32': 'not_verified',
# transaction is a Loyalty Transaction. Used for data capture
# transactions. No accounts are debited or credited
b'\x33': 'loyalty_transaction',
}), delimiter=b';')
# 3 byte, numeric data. The current session number received from the HOST.
# The session number is uncertain in case that the transaction is an
# Offline transaction. This number is changed on reconciliation.
session_num = DelimitedField(IntegerField(3), delimiter=b';')
# 12 byte, Alphanumeric data (H20-H7F). The STAN_AUTH and the TIMESTAMP
# will identify the transaction.
# * On-line: The STAN (System Trace Audit Number) is the 6 first bytes,
# and the Authorisation Code is the 6 last bytes.
# * Off-line: STAN=9xxxx9 where x is the message number for the actual
# transaction AUTH = <H20H20H20H20H20H20>
stan_auth = DelimitedField(TextField(12), delimiter=b';')
# 4 bytes numeric data (H30 .. H39). This is the customer number if the
# transaction was Pre-Auth transaction. Must be used as reference in
# Transfer Amount - Adjustment transaction.
seq_no = DelimitedField(IntegerField(4), delimiter=b';')
# 11 bytes numeric data (H30 .. H39). Normally not used. Only used in
# Restaurant or Hotel environmet where TIP is added to the purchase amount
# on the ITU. Used in the Purchase or Adjustment transaction.
tip = DelimitedField(PriceField(), optional=True, delimiter=b';')
class KeyboardInputRequestMessage(BBSMessage):
type = ConstantField(b'\x46')
# Indicates if the entered chars should be echoed on the ECR display or not
echo = EnumField({
b'\x20': True,
b'\x21': False,
})
# Minimum and maximum number of chars to enter (as a decimal ascii string)
min_chars = TextField(2)
max_chars = TextField(2)
class KeyboardInputMessage(BBSMessage):
type = ConstantField(b'\x55')
is_response = True
text = TextField()
# XXX how are you supposed to parse this
delimiter = EnumField({
b'0': 'enter',
b'9': 'escape',
})
def __init__(self, text, **kwargs):
# allow `text` to be passed in as a positional argument
super(KeyboardInputMessage, self).__init__(text=text, **kwargs)
@classmethod
def unpack_fields(cls, data):
# currently special cased because of fixed size `delimiter` field
# following variable length `text` field.
# TODO yuck yuck yuck
fields = OrderedDict()
fields['type'], size = cls.type.unpack(data)
text_data = data[size:-cls.delimiter.size]
fields['text'], size = cls.text.unpack(text_data)
delimiter_data = data[-cls.delimiter.size:]
fields['delimiter'], size = cls.delimiter.unpack(delimiter_data)
class SendDataMessageBase(BBSMessage):
type = ConstantField(b'\x46')
code = TextField(2)
is_last_block = EnumField({
b'\x32': True,
b'\x31': False,
})
seq = TextField(4) # ignored
length = TextField(3) # ignored
class SendReportsDataHeaderMessage(SendDataMessageBase):
code = ConstantField(b'\x30\x31')
site_number = TextField(6)
session_number = TextField(3)
timestamp = DateTimeField()
class SendReconciliationDataAmountsMessage(SendDataMessageBase):
code = ConstantField(b'\x30\x32')
issuer_id = TextField(2)
num_transactions = IntegerField(4)
# TODO
class SendDataMessage(SendDataMessageBase):
code = EnumField({
subfunction.code.value: subfunction
for subfunction in [
SendReportsDataHeaderMessage,
SendReconciliationDataAmountsMessage,
# TODO
]
})
@classmethod
def unpack(cls, data):
self = super(SendDataMessage, cls).unpack(data)
return self.code.unpack(data)
class TransferAmountMessage(BBSMessage):
type = ConstantField(b'\x51')
timestamp = DateTimeField() # not used
id_no = TextField(6) # not used
# Normally set to "0000". If set in Pre-Auth, the number is a reference to
# a previous Preauth. If set in Adjustment transaction, the field shall be
# set to the corresponding number received in the Local Mode from the
# Pre-Authorisation.
seq_no = TextField(4) # TODO
# Operator identification. A fixed field with 4 characters. If not
# implemented by the ECR vendor, the field should be filled with zeroes
# (H30's).
operator_id = TextField(4)
# Not used, but tested by the ITU because of error prevention)
mode = EnumField({
b'\x30': None,
})
transfer_type = EnumField({
b'\x30': 'eft_authorisation',
b'\x31': 'return_of_goods',
b'\x32': 'reversal',
b'\x33': 'purchase_with_cashback',
b'\x34': 'pre_authorisation',
b'\x35': 'adjustment',
b'\x36': 'balance_inquiry',
b'\x37': 'complete_receipt',
b'\x38': 'deposit',
b'\x39': 'cash_withdrawal',
b'\x3a': 'load_epurse_card',
b'\x3b': 'merchandise_purchase',
b'\x3c': 'merchandise_reversal',
b'\x3d': 'merchandise_correction',
})
amount = PriceField(11)
# Not used, but tested by the ITU because of error prevention)
unused_type = EnumField({
b'\x30': None,
})
# Only used if transfer_type == 'purchase_with_ashback' (H33), else it will
# be filled with H20.
cashback_amount = PriceField(11)
is_top_up = EnumField({
b'\x30': True,
b'\x31': False,
})
art_amount = PriceField(11)
data = DelimitedField(TextField(), delimiter=b';')
# TODO ART#
class TransferCardDataMessage(BBSMessage):
type = ConstantField(b'\x52')
block = EnumField({b'\x30': None})
track = EnumField({
b'\x32': 'track_2',
b'\x33': 'track_1',
b'\x40': 'manual',
})
# TODO DATA and FS
class AdministrationMessage(BBSMessage):
type = ConstantField(b'\x53')
timestamp = DateTimeField()
id_no = TextField(6)
seq_no = TextField(4)
opt = TextField(4)
# TODO single character keyboard input
adm_code = EnumField({
b'\x30\x30': 'not_used',
b'\x30\x39': 'not_used',
# SEND from ECR should be mapped by ITU to perform RECONCILIATION
# function.
b'\x31\x30': 'send',
# KLAR, validation key. Refer to the NOTE for details
b'\x31\x31': 'ready',
# AVBRYT, cancellation key. Refer to the NOTE for details.
b'\x31\x32': 'cancel',
# FEIL, correction key.
b'\x31\x33': 'error',
# ANNUL from ECR should be mapped by ITU to perform REVERSAL
# transaction.
b'\x31\x34': 'reverse',
b'\x31\x35': 'balance_inquiry_transaction',
b'\x31\x36': 'x_report',
b'\x31\x37': 'z_report',
b'\x31\x38': 'send_offline_transactions',
b'\x31\x39': 'turnover_report',
b'\x31\x3A': 'print_eot_transactions',
b'\x31\x3B': 'not_used',
b'\x31\x3C': 'not_used',
b'\x31\x3D': 'not_used',
b'\x31\x3E': 'not_used',
})
fs = ConstantField(b'\x1C')
class DeviceAttributeRequestMessage(BBSMessage):
type = | |
'9e4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b9e)and b3e==''\
and board.s8e+board.s7e+board.s6e+board.s5e+board.s4e=='':
moves = '9e3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b9e)and b2e==''\
and board.s8e+board.s7e+board.s6e+board.s5e+board.s4e+board.s3e=='':
moves = '9e2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b9e)and b1e==''\
and board.s8e+board.s7e+board.s6e+board.s5e+board.s4e+board.s3e+board.s2e=='':
moves = '9e1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b9e)and b7g==''\
and board.s8f=='':
moves = '9e7g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b9e)and b6h==''\
and board.s8f+board.s7g=='':
moves = '9e6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b9e)and b5i==''\
and board.s8f+board.s7g+board.s6h=='':
moves = '9e5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b9e)and b5a==''\
and board.s6b+board.s7c+board.s8d=='':
moves = '9e5a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b9e)and b6b==''\
and board.s7c+board.s8d=='':
moves = '9e6b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b9e)and b7c==''\
and board.s8d=='':
moves = '9e7c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b9e)and b5a==''\
and board.s6b+board.s7c+board.s8d=='':
moves = '9e5a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b9e)and b6b==''\
and board.s7c+board.s8d=='':
moves = '9e6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B',Bboard.b9e)and b7c==''\
and board.s8d=='':
moves = '9e7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.b1f !='':
if re.match(r'[PLSGRK+]', Bboard.b1f)and b1e=='':
moves = '1f1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b1f)and b2e=='':
moves = '1f2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b1f)and b2f=='':
moves = '1f2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b1f)and b1g=='':
moves = '1f1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b1f)and b2g=='':
moves = '1f2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b1f)and b2d=='':
moves = '1f2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b1f)and b1a==''\
and board.s1b+board.s1c+board.s1d+board.s1e=='':
moves = '1f1a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b1f)and b1a==''\
and board.s1b+board.s1c+board.s1d+board.s1e=='':
moves = '1f1a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b1f)and b1b==''\
and board.s1c+board.s1d+board.s1e=='':
moves = '1f1b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b1f)and b1b==''\
and board.s1c+board.s1d+board.s1e=='':
moves = '1f1b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|L', Bboard.b1f)and b1c==''\
and board.s1d+board.s1e=='':
moves = '1f1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b1f)and b1c==''\
and board.s1d+board.s1e=='':
moves = '1f1c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R|L', Bboard.b1f)and b1d==''\
and board.s1e=='':
moves = '1f1d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b1h==''\
and board.s1g=='':
moves = '1f1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b1i==''\
and board.s1g+board.s1h=='':
moves = '1f1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b3f==''\
and board.s2f=='':
moves = '1f3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b4f==''\
and board.s2f+board.s3f=='':
moves = '1f4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b5f==''\
and board.s2f+board.s3f+board.s4f=='':
moves = '1f5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b6f==''\
and board.s2f+board.s3f+board.s4f+board.s5f=='':
moves = '1f6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b7f==''\
and board.s2f+board.s3f+board.s4f+board.s5f+board.s6f=='':
moves = '1f7f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b8f==''\
and board.s2f+board.s3f+board.s4f+board.s5f+board.s6f+board.s7f=='':
moves = '1f8f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b1f)and b9f==''\
and board.s2f+board.s3f+board.s4f+board.s5f+board.s6f+board.s7f+board.s8f=='':
moves = '1f9f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b1f)and b3d==''\
and board.s2e=='':
moves = '1f3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1f)and b4c==''\
and board.s2e+board.s3d=='':
moves = '1f4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1f)and b5b==''\
and board.s2e+board.s3d+board.s4c=='':
moves = '1f5b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1f)and b6a==''\
and board.s2e+board.s3d+board.s4c+board.s5b=='':
moves = '1f6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1f)and b4i==''\
and board.s3h+board.s2g=='':
moves = '1f4i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1f)and b3h==''\
and board.s2g=='':
moves = '1f3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Bboard.b1f)and b4c==''\
and board.s2e+board.s3d=='':
moves = '1f4c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Bboard.b1f)and b5b==''\
and board.s2e+board.s3d+board.s4c=='':
moves = '1f5b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Bboard.b1f)and b6a==''\
and board.s2e+board.s3d+board.s4c+board.s5b=='':
moves = '1f6a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.b2f !='':
if re.match(r'[PLSGRK+]', Bboard.b2f)and b2e=='':
moves = '2f2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b2f)and b1e=='':
moves = '2f1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b2f)and b3e=='':
moves = '2f3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b2f)and b1f=='':
moves = '2f1f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b2f)and b3f=='':
moves = '2f3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b2f)and b2g=='':
moves = '2f2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b2f)and b1g=='':
moves = '2f1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b2f)and b3g=='':
moves = '2f3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b2f)and b1d=='':
moves = '2f1d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b2f)and b3d=='':
moves = '2f3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b2f)and b2a==''\
and board.s2b+board.s2c+board.s2d+board.s2e=='':
moves = '2f2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b2f)and b2a==''\
and board.s2b+board.s2c+board.s2d+board.s2e=='':
moves = '2f2a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b2f)and b2b==''\
and board.s2c+board.s2d+board.s2e=='':
moves = '2f2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b2f)and b2b==''\
and board.s2c+board.s2d+board.s2e=='':
moves = '2f2b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|L', Bboard.b2f)and b2c==''\
and board.s2d+board.s2e=='':
moves = '2f2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b2f)and b2c==''\
and board.s2d+board.s2e=='':
moves = '2f2c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R|L', Bboard.b2f)and b2d==''\
and board.s2e=='':
moves = '2f2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b2h==''\
and board.s2g=='':
moves = '2f2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b2i==''\
and board.s2g+board.s2h=='':
moves = '2f2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b4f==''\
and board.s3f=='':
moves = '2f4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b5f==''\
and board.s3f+board.s4f=='':
moves = '2f5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b6f==''\
and board.s3f+board.s4f+board.s5f=='':
moves = '2f6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b7f==''\
and board.s3f+board.s4f+board.s5f+board.s6f=='':
moves = '2f7f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b8f==''\
and board.s3f+board.s4f+board.s5f+board.s6f+board.s7f=='':
moves = '2f8f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2f)and b9f==''\
and board.s3f+board.s4f+board.s5f+board.s6f+board.s7f+board.s8f=='':
moves = '2f9f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b2f)and b4d==''\
and board.s3e=='':
moves = '2f4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b2f)and b5c==''\
and board.s3e+board.s4d=='':
moves = '2f5c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b2f)and b6b==''\
and board.s3e+board.s4d+board.s5c=='':
moves = '2f6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b2f)and b7a==''\
and board.s3e+board.s4d+board.s5c+board.s6b=='':
moves = '2f7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b2f)and b5i==''\
and board.s4h+board.s3g=='':
moves = '2f5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b2f)and b4h==''\
and board.s3g=='':
moves = '2f4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Bboard.b2f)and b5c==''\
and board.s3e+board.s4d=='':
moves = '2f5c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Bboard.b2f)and b6b==''\
and board.s3e+board.s4d+board.s5c=='':
moves = '2f6b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B', Bboard.b2f)and b7a==''\
and board.s3e+board.s4d+board.s5c+board.s6b=='':
moves = '2f7a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.b3f !='':
if re.match(r'[PLSGRK+]', Bboard.b3f)and b3e=='':
moves = '3f3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b3f)and b2e=='':
moves = '3f2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b3f)and b4e=='':
moves = '3f4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b3f)and b2f=='':
moves = '3f2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b3f)and b4f=='':
moves = '3f4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b3f)and b3g=='':
moves = '3f3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b3f)and b2g=='':
moves = '3f2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b3f)and b4g=='':
moves = '3f4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b3f)and b2d=='':
moves = '3f2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b3f)and b4d=='':
moves = '3f4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b3f)and b3a==''\
and board.s3b+board.s3c+board.s3d+board.s3e=='':
moves = '3f3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b3f)and b3a==''\
and | |
[robot, hammer]
option_vars = [robot, hammer]
option = PickHammer
preconditions = {
LiftedAtom(HandEmpty, [robot]),
LiftedAtom(HammerGraspable, [hammer])
}
add_effects = {LiftedAtom(HoldingHammer, [hammer])}
delete_effects = {LiftedAtom(HandEmpty, [robot])}
nsrts.add(
NSRT("PickHammer", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# PickBolt
robot = Variable("?robot", robot_type)
bolt = Variable("?bolt", bolt_type)
parameters = [robot, bolt]
option_vars = [robot, bolt]
option = PickBolt
preconditions = {
LiftedAtom(HandEmpty, [robot]),
}
add_effects = {LiftedAtom(HoldingBolt, [bolt])}
delete_effects = {
LiftedAtom(HandEmpty, [robot]),
}
nsrts.add(
NSRT("PickBolt", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# PickWrench
robot = Variable("?robot", robot_type)
wrench = Variable("?wrench", wrench_type)
parameters = [robot, wrench]
option_vars = [robot, wrench]
option = PickWrench
preconditions = {LiftedAtom(HandEmpty, [robot])}
add_effects = {LiftedAtom(HoldingWrench, [wrench])}
delete_effects = {LiftedAtom(HandEmpty, [robot])}
nsrts.add(
NSRT("PickWrench", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# PlaceScrewdriverBack
robot = Variable("?robot", robot_type)
screwdriver = Variable("?screwdriver", screwdriver_type)
parameters = [robot, screwdriver]
option_vars = [robot]
option = Place
preconditions = {
LiftedAtom(HoldingScrewdriver, [screwdriver]),
LiftedAtom(ScrewdriverGraspable, [screwdriver])
}
add_effects = {LiftedAtom(HandEmpty, [robot])}
delete_effects = {LiftedAtom(HoldingScrewdriver, [screwdriver])}
nsrts.add(
NSRT("PlaceScrewdriverBack", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, placeback_sampler))
# PlaceHammerBack
robot = Variable("?robot", robot_type)
hammer = Variable("?hammer", hammer_type)
parameters = [robot, hammer]
option_vars = [robot]
option = Place
preconditions = {
LiftedAtom(HoldingHammer, [hammer]),
LiftedAtom(HammerGraspable, [hammer])
}
add_effects = {LiftedAtom(HandEmpty, [robot])}
delete_effects = {LiftedAtom(HoldingHammer, [hammer])}
nsrts.add(
NSRT("PlaceHammerBack", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, placeback_sampler))
# PlaceWrenchBack
robot = Variable("?robot", robot_type)
wrench = Variable("?wrench", wrench_type)
parameters = [robot, wrench]
option_vars = [robot]
option = Place
preconditions = {LiftedAtom(HoldingWrench, [wrench])}
add_effects = {LiftedAtom(HandEmpty, [robot])}
delete_effects = {LiftedAtom(HoldingWrench, [wrench])}
nsrts.add(
NSRT("PlaceWrenchBack", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, placeback_sampler))
# PlaceScrewOnContraption
robot = Variable("?robot", robot_type)
screw = Variable("?screw", screw_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, screw, contraption]
option_vars = [robot]
option = Place
preconditions = {LiftedAtom(HoldingScrew, [screw])}
add_effects = {
LiftedAtom(HandEmpty, [robot]),
LiftedAtom(ScrewPlaced, [screw, contraption])
}
delete_effects = {LiftedAtom(HoldingScrew, [screw])}
nsrts.add(
NSRT("PlaceScrewOnContraption", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars,
placeoncontraption_sampler))
# PlaceNailOnContraption
robot = Variable("?robot", robot_type)
nail = Variable("?nail", nail_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, nail, contraption]
option_vars = [robot]
option = Place
preconditions = {LiftedAtom(HoldingNail, [nail])}
add_effects = {
LiftedAtom(HandEmpty, [robot]),
LiftedAtom(NailPlaced, [nail, contraption])
}
delete_effects = {LiftedAtom(HoldingNail, [nail])}
nsrts.add(
NSRT("PlaceNailOnContraption", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars,
placeoncontraption_sampler))
# PlaceBoltOnContraption
robot = Variable("?robot", robot_type)
bolt = Variable("?bolt", bolt_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, bolt, contraption]
option_vars = [robot]
option = Place
preconditions = {LiftedAtom(HoldingBolt, [bolt])}
add_effects = {
LiftedAtom(HandEmpty, [robot]),
LiftedAtom(BoltPlaced, [bolt, contraption])
}
delete_effects = {LiftedAtom(HoldingBolt, [bolt])}
nsrts.add(
NSRT("PlaceBoltOnContraption", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars,
placeoncontraption_sampler))
# FastenScrewWithScrewdriver
robot = Variable("?robot", robot_type)
screw = Variable("?screw", screw_type)
screwdriver = Variable("?screwdriver", screwdriver_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, screw, screwdriver, contraption]
option_vars = [robot, screw, screwdriver, contraption]
option = FastenScrewWithScrewdriver
preconditions = {
LiftedAtom(HoldingScrewdriver, [screwdriver]),
LiftedAtom(ScrewPlaced, [screw, contraption])
}
add_effects = {LiftedAtom(ScrewFastened, [screw])}
delete_effects = set()
nsrts.add(
NSRT("FastenScrewWithScrewdriver", parameters, preconditions,
add_effects, delete_effects, set(), option, option_vars,
null_sampler))
# FastenScrewByHand
robot = Variable("?robot", robot_type)
screw = Variable("?screw", screw_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, screw, contraption]
option_vars = [robot, screw, contraption]
option = FastenScrewByHand
preconditions = {
LiftedAtom(HandEmpty, [robot]),
LiftedAtom(ScrewPlaced, [screw, contraption])
}
add_effects = {LiftedAtom(ScrewFastened, [screw])}
delete_effects = set()
nsrts.add(
NSRT("FastenScrewByHand", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# FastenNailWithHammer
robot = Variable("?robot", robot_type)
nail = Variable("?nail", nail_type)
hammer = Variable("?hammer", hammer_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, nail, hammer, contraption]
option_vars = [robot, nail, hammer, contraption]
option = FastenNailWithHammer
preconditions = {
LiftedAtom(HoldingHammer, [hammer]),
LiftedAtom(NailPlaced, [nail, contraption])
}
add_effects = {LiftedAtom(NailFastened, [nail])}
delete_effects = set()
nsrts.add(
NSRT("FastenNailWithHammer", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# FastenBoltWithWrench
robot = Variable("?robot", robot_type)
bolt = Variable("?bolt", bolt_type)
wrench = Variable("?wrench", wrench_type)
contraption = Variable("?contraption", contraption_type)
parameters = [robot, bolt, wrench, contraption]
option_vars = [robot, bolt, wrench, contraption]
option = FastenBoltWithWrench
preconditions = {
LiftedAtom(HoldingWrench, [wrench]),
LiftedAtom(BoltPlaced, [bolt, contraption])
}
add_effects = {LiftedAtom(BoltFastened, [bolt])}
delete_effects = set()
nsrts.add(
NSRT("FastenBoltWithWrench", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
return nsrts
def _get_playroom_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for Playroom Env."""
block_type, robot_type, door_type, dial_type, region_type = \
_get_types_by_names(CFG.env,
["block", "robot", "door", "dial", "region"])
On, OnTable, GripperOpen, Holding, Clear, NextToTable, NextToDoor, \
NextToDial, InRegion, Borders, Connects, IsBoringRoom, IsPlayroom, \
IsBoringRoomDoor, IsPlayroomDoor, DoorOpen, DoorClosed, LightOn, \
LightOff = \
_get_predicates_by_names(
"playroom", ["On", "OnTable", "GripperOpen", "Holding", "Clear",
"NextToTable", "NextToDoor", "NextToDial", "InRegion", "Borders",
"Connects", "IsBoringRoom", "IsPlayroom", "IsBoringRoomDoor",
"IsPlayroomDoor", "DoorOpen", "DoorClosed", "LightOn", "LightOff"])
Pick, Stack, PutOnTable, MoveToDoor, MoveDoorToTable, \
MoveDoorToDial, OpenDoor, CloseDoor, TurnOnDial, \
TurnOffDial = _get_options_by_names("playroom",
["Pick", "Stack", "PutOnTable", "MoveToDoor",
"MoveDoorToTable", "MoveDoorToDial", "OpenDoor", "CloseDoor",
"TurnOnDial", "TurnOffDial"])
nsrts = set()
# PickFromTable
block = Variable("?block", block_type)
robot = Variable("?robot", robot_type)
parameters = [robot, block]
option_vars = [robot, block]
option = Pick
preconditions = {
LiftedAtom(OnTable, [block]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(NextToTable, [robot])
}
add_effects = {LiftedAtom(Holding, [block])}
delete_effects = {
LiftedAtom(OnTable, [block]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
def pickfromtable_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal, rng # unused
assert len(objs) == 2
_, block = objs
assert block.is_instance(block_type)
# find rotation of robot that faces the table
x, y = state.get(block, "pose_x"), state.get(block, "pose_y")
cls = PlayroomEnv
table_x = (cls.table_x_lb + cls.table_x_ub) / 2
table_y = (cls.table_y_lb + cls.table_y_ub) / 2
rotation = np.arctan2(table_y - y, table_x - x) / np.pi
return np.array([rotation], dtype=np.float32)
pickfromtable_nsrt = NSRT("PickFromTable", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, pickfromtable_sampler)
nsrts.add(pickfromtable_nsrt)
# Unstack
block = Variable("?block", block_type)
otherblock = Variable("?otherblock", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, otherblock, robot]
option_vars = [robot, block]
option = Pick
preconditions = {
LiftedAtom(On, [block, otherblock]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(NextToTable, [robot])
}
add_effects = {
LiftedAtom(Holding, [block]),
LiftedAtom(Clear, [otherblock])
}
delete_effects = {
LiftedAtom(On, [block, otherblock]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
def unstack_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal, rng # unused
assert len(objs) == 3
block, _, _ = objs
assert block.is_instance(block_type)
# find rotation of robot that faces the table
x, y = state.get(block, "pose_x"), state.get(block, "pose_y")
cls = PlayroomEnv
table_x = (cls.table_x_lb + cls.table_x_ub) / 2
table_y = (cls.table_y_lb + cls.table_y_ub) / 2
rotation = np.arctan2(table_y - y, table_x - x) / np.pi
return np.array([rotation], dtype=np.float32)
unstack_nsrt = NSRT("Unstack",
parameters, preconditions, add_effects, delete_effects,
set(), option, option_vars, unstack_sampler)
nsrts.add(unstack_nsrt)
# Stack
block = Variable("?block", block_type)
otherblock = Variable("?otherblock", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, otherblock, robot]
option_vars = [robot, otherblock]
option = Stack
preconditions = {
LiftedAtom(Holding, [block]),
LiftedAtom(Clear, [otherblock]),
LiftedAtom(NextToTable, [robot])
}
add_effects = {
LiftedAtom(On, [block, otherblock]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
delete_effects = {
LiftedAtom(Holding, [block]),
LiftedAtom(Clear, [otherblock])
}
def stack_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal, rng # unused
assert len(objs) == 3
_, otherblock, _ = objs
assert otherblock.is_instance(block_type)
# find rotation of robot that faces the table
x, y = state.get(otherblock, "pose_x"), state.get(otherblock, "pose_y")
cls = PlayroomEnv
table_x = (cls.table_x_lb + cls.table_x_ub) / 2
table_y = (cls.table_y_lb + cls.table_y_ub) / 2
rotation = np.arctan2(table_y - y, table_x - x) / np.pi
return np.array([rotation], dtype=np.float32)
stack_nsrt = NSRT("Stack",
parameters, preconditions, add_effects, delete_effects,
set(), option, option_vars, stack_sampler)
nsrts.add(stack_nsrt)
# PutOnTable
block = Variable("?block", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, robot]
option_vars = [robot]
option = PutOnTable
preconditions = {
LiftedAtom(Holding, [block]),
LiftedAtom(NextToTable, [robot])
}
add_effects = {
LiftedAtom(OnTable, [block]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
delete_effects = {LiftedAtom(Holding, [block])}
def putontable_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del state, goal, objs # unused
x = rng.uniform()
y = rng.uniform()
# find rotation of robot that faces the table
cls = PlayroomEnv
table_x = (cls.table_x_lb + cls.table_x_ub) / 2
table_y = (cls.table_y_lb + cls.table_y_ub) / 2
rotation = np.arctan2(table_y - y, table_x - x) / np.pi
return np.array([x, y, rotation], dtype=np.float32)
putontable_nsrt = NSRT("PutOnTable", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, putontable_sampler)
nsrts.add(putontable_nsrt)
# AdvanceThroughDoor
robot = Variable("?robot", robot_type)
| |
#!/usr/bin/env python
# coding: utf-8
from typing import List, Dict, Optional, Any, Tuple
import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from tokenizers import Tokenizer
from datasets import load_dataset
from pytorch_lightning import LightningDataModule
CMD_START_TOKEN = '>>>['
CMD_END_TOKEN = ']<<<'
GAME_START_CMD = 'start'
def _span_len(span):
return span[1]-span[0]+1
class PlaythroughDataset(Dataset):
TARGET_CMD_TOKENS = "cmd_tokens" # one data sample per cmd token of each step of each game (ending on each token)
TARGET_CMD_PROMPTS = "cmd_prompts" # one data sample per step of each game (ending on the cmd_end token)
def __init__(self, data, block_size, cmd_markers: Tuple[int,int] = None, game_start_tok:int = None,
pad_tok:int=0, span_filtering=None, batch_size=1):
self.block_size = block_size
self.batch_size = batch_size
self.data = np.array(data) # make a copy of the given list of token ids
self.cmd_spans = None
self.game_spans = [] # each span (index in cmd_spans of game start, index into cmd_spans of start of next game)
self.game_start_tok = game_start_tok
self.pad_tok = pad_tok
self.span_filtering = span_filtering
if cmd_markers:
self.cmd_start = cmd_markers[0]
self.cmd_end = cmd_markers[1]
cmd_start_idxs = np.where(self.data == self.cmd_start)[0]
if cmd_start_idxs.size > 0:
cmd_end_idxs = np.where(self.data[cmd_start_idxs[0]:] == self.cmd_end)[0]
if cmd_end_idxs.size > 0:
if cmd_end_idxs.size < cmd_start_idxs.size: # fewer end markers than starts
cmd_start_idxs = cmd_start_idxs[:cmd_end_idxs.size] # truncate to same length
np_spans = np.stack((cmd_start_idxs, cmd_end_idxs), axis=1)
# if np_spans[0][0] == 0:
# np_spans = np_spans[1:] # skip initial 'start' command
self.cmd_spans = np_spans
print("PlaythroughDataset cmd_spans =", self.cmd_spans)
current_game = [None, None]
for ispan, span in enumerate(self.cmd_spans):
assert np.all(span[0] < span[1]), f"Bad dataset: inconsistent cmd markers: {self.cmd_spans}"
if self.data[span[0]+1] == game_start_tok:
if self.data[span[0]+2] != self.cmd_end:
print("WARNING: skipping false start", span)
continue
if current_game[0] is None:
current_game[0] = ispan
elif current_game[1] is None:
current_game[1] = ispan
self.game_spans.append((current_game[0], current_game[1]))
current_game = [ispan, None]
else:
assert False, f"Shouldn't be possible: {current_game} {ispan} {span}"
assert ispan == len(self.cmd_spans)-1, f"{ispan} {len(self.cmd_spans)}"
if current_game[0] is not None:
assert current_game[1] is None, f"{current_game} {ispan} {span}"
self.game_spans.append((current_game[0], -1))
print("#################### # Games in dataset:", len(self.game_spans))
print(self.game_spans[0:3], self.game_spans[-2:])
else:
self.cmd_start = None
self.cmd_end = None
self.cmd_spans = None
self.build_index()
def print_info(self, name="PlaythroughDataset"):
num_cmd_tokens = 0
num_cmd_tokens = 0
if self.cmd_spans is not None:
num_spans = len(self.cmd_spans)
for span in self.cmd_spans:
num_cmd_tokens += _span_len(span)
print(f"{name} filtering={self.span_filtering} datalen={len(self.data)}"
f" len={len(self)} #games={self.num_games} #cmd_spans={num_spans} #cmd_tokens={num_cmd_tokens}")
@property
def num_games(self):
return len(self.game_spans)
def get_num_steps(self, igame:int): # returns number of steps in the playthrough data for a single game
if igame < 0 or igame >= len(self.game_spans):
return None
game_span = self.game_spans[igame]
if game_span[1] < 0: # last game in the dataset
return len(self.cmd_spans) - game_span[0]
return game_span[1] - game_span[0] # number of cmd_spans
def get_token_idxs(self, igame, start_step=0, end_step=-1, inclusive=(True,True)):
# returns a span: start and end index into the token id data
# inclusive[0]: include the command sequence at the beginning of the start_step
# inclusive[1]: include the command sequence at the end of the end_step (if there is one)
assert 0 <= igame < len(self.game_spans), f"{igame} {len(self.game_spans)}"
game_span = self.game_spans[igame]
num_game_steps = self.get_num_steps(igame)
if end_step < 0:
assert end_step == -1
end_step = num_game_steps
elif start_step >= num_game_steps or end_step > num_game_steps:
print(f"WARNING: get_token_idxs({start_step}, {end_step}) out of range for game {igame} {game_span}")
end_step = min(num_game_steps, end_step)
start_step = min(num_game_steps-1, start_step)
icmd_start = game_span[0] + start_step # index into self.cmd_spans
icmd_end = game_span[0] + end_step # index into self.cmd_spans
start_cmd_span = self.cmd_spans[icmd_start]
if inclusive[0]:
start_idx = start_cmd_span[0]
else:
start_idx = start_cmd_span[1]+1
if icmd_end >= len(self.cmd_spans):
end_cmd_span = (len(self.data), len(self.data)-1) # fake span of length zero
else:
end_cmd_span = self.cmd_spans[icmd_end]
if not inclusive[1] or end_step == num_game_steps: # don't include the next cmd sequence
end_idx = end_cmd_span[0]-1
else:
end_idx = end_cmd_span[1]
return (start_idx, end_idx), _span_len(start_cmd_span), _span_len(end_cmd_span)
def num_steps_total(self) -> int:
n_total = 0
for igame in range(self.num_games):
n_total += self.get_num_steps(igame)
return n_total
def _add_to_index(self, value):
pos = len(self._index)
if value not in self._index:
self._index[value] = pos
assert len(self._index) == pos+1
return len(self._index)
def build_index(self):
self._index = {} # NOTE: WE DEPEND ON DICTIONARIES PRESERVING INSERTION ORDER (Python 3.6+)
if self.cmd_spans is None:
return # we can't index anything
if self.span_filtering == PlaythroughDataset.TARGET_CMD_TOKENS \
or self.span_filtering == PlaythroughDataset.TARGET_CMD_PROMPTS:
# index only within-game spans that end within a cmd_span
for igame in range(self.num_games):
# game_start_idx = self.get_token_idxs(igame)[0][0] # idx of start of this game
for step in range(self.get_num_steps(igame)):
# from token spans that end with a cmd sequence
if self.span_filtering == PlaythroughDataset.TARGET_CMD_PROMPTS:
# one data sample per step of each game
self._add_to_index((igame, step))
else: # self.span_filtering == PlaythroughDataset.TARGET_CMD_TOKENS # return a record ending at each tokan
# one data sample per cmd token of each step of each game
span, cmd0_len, cmd1_len = self.get_token_idxs(igame, 0, step, inclusive=(True,True))
game_start_idx = span[0] # idx of start of this game
# if _span_len(span) >= self.block_size:
for j in range(cmd1_len): # for each subspan that ends within the next_cmd token seq
_start_idx = span[1]-self.block_size-j+1
if _start_idx < game_start_idx:
# print(f"Discarding span {subspan} from eval index")
# continue # skip this one, it doesn't satisfy reqs
_start_idx = game_start_idx # clip to start of game (block will be padded on retrieval)
subspan = (_start_idx, span[1]-j) # clipped span, len == block_size or less
self._add_to_index(subspan) # this subspan gets included in the dataset
else: # index all within-game spans of len self.block_size
if self.span_filtering:
assert False, f"Misconfiguration Error: unrecognized span_filtering option: {self.span_filtering}"
for igame in range(self.num_games):
span, _, _ = self.get_token_idxs(igame) # all the tokens for this game
if _span_len(span) < self.block_size+1:
print(f"_index does not include game {igame} because it is too short {span}")
continue
for j in range(_span_len(span)-self.block_size): # for each subspan of len blocksize
subspan = (span[0]+j, span[0]+j+self.block_size+1)
self._add_to_index(subspan[0]) # this subspan gets included in the dataset
self._index_by_idx = list(self._index) # python array: O(1) for access by position (idx)
def __len__(self):
if self._index:
return len(self._index)
return len(self.data) - self.block_size
def __getitem__(self, idx):
# grab a chunk of (block_size + 1) characters from the data
if self._index:
assert self._index_by_idx
assert len(self._index) == len(self._index_by_idx) # consistency check
if self.span_filtering == PlaythroughDataset.TARGET_CMD_PROMPTS:
igame, istep = self._index_by_idx[idx]
return self.get_cmd_prompt_for_gamestep(igame, istep, continuation=-10) #+random extra len from 0 to 10
elif self.span_filtering == PlaythroughDataset.TARGET_CMD_TOKENS:
start_idx, end_idx = self._index_by_idx[idx]
#return self.get_left_padded_block(start_idx, end_idx)
return self.get_right_padded_block(start_idx, end_idx)
#else:
assert False, f"UNSUPPORTED span_filtering={self.span_filtering} ({idx}:{self._index[idx]})"
idx = self._index_by_idx[idx]
chunk = self.data[idx:idx + self.block_size + 1]
"""
arrange data and targets so that the first i elements of x
will be asked to predict the i-th element of y. Notice that
the eventual language model will actually make block_size
individual predictions at the same time based on this data,
so we are being clever and amortizing the cost of the forward
pass of the network. So for example if block_size is 4, then
we could e.g. sample a chunk of text "hello", the integers in
x will correspond to "hell" and in y will be "ello". This will
then actually "multitask" 4 separate examples at the same time
in the language model:
- given just "h", please predict "e" as next
- given "he" please predict "l" next
- given "hel" predict "l" next
- given "hell" predict "o" next
In addition, because the DataLoader will create batches of examples,
every forward/backward pass during traning will simultaneously train
a LOT of predictions, amortizing a lot of computation. In particular,
for a batched input of integers X (B, T) where B is batch size and
T is block_size and Y (B, T), the network will during training be
simultaneously training to make B*T predictions, all at once! Of course,
at test time we can paralellize across batch B, but unlike during training
we cannot parallelize across the time dimension T - we have to run
a forward pass of the network to recover the next single character of the
sequence along each batch dimension, and repeatedly always feed in a next
character to get the next one.
So yes there is a big asymmetry between train/test time of | |
import os
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import glob
import cv2
import json
import utils.data_utils as util_
from . import data_augmentation
from models.clip import tokenize
import matplotlib.pyplot as plt
import IPython
import random
from datasets.ocid_data_loader import OCIDObject
NUM_VIEWS_PER_SCENE = 5 # 7
BACKGROUND_LABEL = 0
TABLE_LABEL = 1
OBJECTS_LABEL = 2
###### Some utilities #####
def worker_init_fn(worker_id):
""" Use this to bypass issue with PyTorch dataloaders using deterministic RNG for Numpy
https://github.com/pytorch/pytorch/issues/5059
"""
np.random.seed(np.random.get_state()[1][0] + worker_id)
############# Synthetic Tabletop Object Dataset #############
class Tabletop_Object_Dataset(Dataset):
""" Data loader for Tabletop Object Dataset
"""
def __init__(self, base_dir, train_or_test, config):
self.base_dir = base_dir
self.config = config
self.train_or_test = train_or_test
self.use_object_descriptions = config['use_object_descriptions']
# False if 'TOD' in self.base_dir else True #
split_suffix = '{}_set'.format(train_or_test)
# Get a list of all scenes
if train_or_test == 'train':
split_suffix = 'training_set'
self.scene_dirs = sorted(glob.glob(os.path.join(self.base_dir, split_suffix) + '/*'))
self.len = len(self.scene_dirs) * NUM_VIEWS_PER_SCENE
self.name = 'TableTop'
class_id = '03797390'
# class_description_file = os.path.join(base_dir, 'shapenet_class_descriptions.txt'.format(class_id))
class_description_file = os.path.join(base_dir, 'shapenet_class_{}.txt'.format(class_id))
with open(class_description_file, 'r+') as f:
self.object_description_list = f.readlines()
self.shapenet_taxonomy = json.load(open(os.path.join(base_dir, 'shapenet_taxonomy.json')))
if 'v6' in os.path.join(self.base_dir, split_suffix):
global OBJECTS_LABEL
OBJECTS_LABEL = 4
def __len__(self):
return self.len
def process_rgb(self, rgb_img):
""" Process RGB image
- random color warping
"""
rgb_img = rgb_img.astype(np.float32)
if self.config['use_data_augmentation']:
# rgb_img = data_augmentation.random_color_warp(rgb_img)
pass
rgb_img = data_augmentation.standardize_image(rgb_img)
return rgb_img
def process_depth(self, depth_img):
""" Process depth channel
TODO: CHANGE THIS
- change from millimeters to meters
- cast to float32 data type
- add random noise
- compute xyz ordered point cloud
"""
# millimeters -> meters
depth_img = (depth_img / 1000.).astype(np.float32)
# add random noise to depth
if self.config['use_data_augmentation']:
depth_img = data_augmentation.add_noise_to_depth(depth_img, self.config)
# depth_img = data_augmentation.dropout_random_ellipses(depth_img, self.config)
# Compute xyz ordered point cloud
xyz_img = util_.compute_xyz(depth_img, self.config)
if self.config['use_data_augmentation']:
xyz_img = data_augmentation.add_noise_to_xyz(xyz_img, depth_img, self.config)
return xyz_img
def sample_random_description(self, des):
return random.sample(des.split(','), 1)[0]
def get_scene_object_descriptions(self, object_descriptions):
object_prompts = []
valid_indexes = []
filter_words = ['anonymous', 'challenge', '@']
filter_word = lambda x: np.sum([w in x.lower() for w in filter_words]) == 0
threshold_length = [3, 6]
for idx in range(len(object_descriptions)):
target_object_file = object_descriptions[idx]['mesh_filename']
class_id, object_id = target_object_file.split('/')[3], target_object_file.split('/')[4]
related_obj_desription = [l.split(',')[-2] for l in self.object_description_list if object_id in l]
related_class_desription = [self.sample_random_description(info["name"]) for info in self.shapenet_taxonomy if class_id in info['synsetId']]
# print("class description:", related_class_desription)
# print("object description:", related_obj_desription)
if not self.use_object_descriptions:
if len(related_class_desription) > 0:
obj_prompt = related_class_desription[-1]
object_prompts.append(obj_prompt)
valid_indexes.append(idx)
else:
if len(related_obj_desription) > 0:
# filter
obj_prompt = related_obj_desription[-1]
prompt_len = len(obj_prompt.split(' '))
if filter_word(obj_prompt) and prompt_len >= threshold_length[0] and prompt_len < threshold_length[1]:
object_prompts.append(obj_prompt)
valid_indexes.append(idx)
# print(object_prompts)
return object_prompts, valid_indexes
def __getitem__(self, idx):
cv2.setNumThreads(0) # some hack to make sure pyTorch doesn't deadlock. Found at https://github.com/pytorch/pytorch/issues/1355. Seems to work for me
# Get scene directory
scene_idx = idx // NUM_VIEWS_PER_SCENE
scene_dir = self.scene_dirs[scene_idx]
# Get view number
view_num = idx % NUM_VIEWS_PER_SCENE
# RGB image
rgb_img_filename = os.path.join(scene_dir, f"rgb_{view_num:05d}.jpeg")
# print(rgb_img_filename)
rgb_img = cv2.imread(rgb_img_filename)
rgb_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2RGB)
rgb_img = self.process_rgb(rgb_img)
# Depth image
depth_img_filename = os.path.join(scene_dir, f"depth_{view_num:05d}.png")
depth_img = cv2.imread(depth_img_filename, cv2.IMREAD_ANYDEPTH) # This reads a 16-bit single-channel image. Shape: [H x W]
xyz_img = self.process_depth(depth_img)
# Labels
foreground_labels_filename = os.path.join(scene_dir, f"segmentation_{view_num:05d}.png")
foreground_labels = util_.imread_indexed(foreground_labels_filename)
scene_description_filename = os.path.join(scene_dir, "scene_description.txt")
scene_description = json.load(open(scene_description_filename))
scene_description['view_num'] = view_num
object_descriptions = scene_description['object_descriptions']
label_abs_path = '/'.join(foreground_labels_filename.split('/')[-2:]) # Used for evaluation
# Turn these all into torch tensors
rgb_img = data_augmentation.array_to_tensor(rgb_img) # Shape: [3 x H x W]
xyz_img = data_augmentation.array_to_tensor(xyz_img) # Shape: [3 x H x W]
foreground_labels = data_augmentation.array_to_tensor(foreground_labels) # Shape: [H x W]
############### language prompt and mask labels #####################
# get shapenet object descriptions from shapenet org
target_object_classname = ''
target_object_label = ''
object_num = len(object_descriptions)
base_idx = 2
obj_prompts, valid_indices = self.get_scene_object_descriptions(object_descriptions)
assert len(obj_prompts) == len(valid_indices)
c = np.random.randint(len(valid_indices))
target_object_idx = valid_indices[c]
target_name = obj_prompts[c]
target_label_mask = foreground_labels == target_object_idx + base_idx
for i in range(len(valid_indices)):
if obj_prompts[i] == target_name:
target_label_mask |= foreground_labels == valid_indices[i] + base_idx
target_label_mask = target_label_mask.float()
prompt = 'a {} on a table in a simulation engine'.format(target_name)
# print("prompt:", prompt)
if not target_label_mask.sum() > 100 * 100: # hardcoded pixel numer
# regenerate sample
return self[np.random.randint(self.len)]
VIS = False
if VIS:
fig = plt.figure(figsize=(12.8 * 3, 4.8 * 3))
fig.suptitle('scene: {} prompt: {}'.format(scene_idx, prompt), fontsize=30)
ax = fig.add_subplot(1, 3, 1)
plt.imshow((data_augmentation.unstandardize_image(rgb_img.T.numpy())).transpose(1,0,2))
ax = fig.add_subplot(1, 3, 2)
plt.imshow(xyz_img.T.numpy().transpose(1,0,2))
ax = fig.add_subplot(1, 3, 3)
mask = target_label_mask.numpy()
plt.imshow(mask)
plt.show()
tokenize_prompt = tokenize([prompt]).detach().numpy()[0]
return {'rgb' : rgb_img,
'xyz' : xyz_img,
'foreground_labels' : foreground_labels,
'target_labels': target_label_mask,
'scene_dir' : scene_dir,
'prompt': prompt,
'view_num' : view_num,
'label_abs_path' : label_abs_path,
'tokenize_prompt': tokenize_prompt,
}
def get_OCID_train_dataloader(base_dir, config, batch_size=8, num_workers=4, shuffle=True):
config = config.copy()
dataset = OCIDObject('train', config)
return DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
worker_init_fn=worker_init_fn)
def get_OCID_test_dataloader(base_dir, config, batch_size=8, num_workers=4, shuffle=True):
config = config.copy()
dataset = OCIDObject('test', config)
return DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
worker_init_fn=worker_init_fn)
def get_TOD_train_dataloader(base_dir, config, batch_size=8, num_workers=4, shuffle=True):
config = config.copy()
dataset = Tabletop_Object_Dataset(base_dir, 'train', config)
return DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
worker_init_fn=worker_init_fn)
def get_TOD_test_dataloader(base_dir, config, batch_size=8, num_workers=4, shuffle=False):
config = config.copy()
dataset = Tabletop_Object_Dataset(base_dir, 'test', config)
return DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
worker_init_fn=worker_init_fn)
############# RGB Images Dataset (Google Open Images) #############
class RGB_Objects_Dataset(Dataset):
""" Data loader for Tabletop Object Dataset
"""
def __init__(self, base_dir, start_list_file, train_or_test, config):
self.base_dir = base_dir
self.config = config
self.train_or_test = train_or_test
# Get a list of all instance labels
f = open(base_dir + start_list_file)
lines = [x.strip() for x in f.readlines()]
self.starts = lines
self.len = len(self.starts)
self.name = 'RGB_Objects'
def __len__(self):
return self.len
def pad_crop_resize(self, img, morphed_label, label):
""" Crop the image around the label mask, then resize to 224x224
"""
H, W, _ = img.shape
# Get tight box around label/morphed label
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)
_xmin, _ymin, _xmax, _ymax = util_.mask_to_tight_box(morphed_label)
x_min = min(x_min, _xmin); y_min = min(y_min, _ymin); x_max = max(x_max, _xmax); y_max = max(y_max, _ymax)
# Make bbox square
x_delta = x_max - x_min
y_delta = y_max - y_min
if x_delta > y_delta:
y_max = y_min + x_delta
else:
x_max = x_min + y_delta
sidelength = x_max - x_min
padding_percentage = np.random.beta(self.config['padding_alpha'], self.config['padding_beta'])
padding_percentage = max(padding_percentage, self.config['min_padding_percentage'])
padding = int(round(sidelength * padding_percentage))
if padding == 0:
print(f'Whoa, padding is 0... sidelength: {sidelength}, %: {padding_percentage}')
padding = 25 # just make it 25 pixels
# Pad and be careful of boundaries
x_min = max(x_min - padding, 0)
x_max = min(x_max + padding, W-1)
y_min = max(y_min - padding, 0)
y_max = min(y_max + padding, H-1)
# Crop
if (y_min == y_max) or (x_min == x_max):
print('Fuck... something is wrong:', x_min, y_min, x_max, y_max)
print(morphed_label)
print(label)
img_crop = img[y_min:y_max+1, x_min:x_max+1]
morphed_label_crop = morphed_label[y_min:y_max+1, x_min:x_max+1]
label_crop = label[y_min:y_max+1, x_min:x_max+1]
# Resize
img_crop = cv2.resize(img_crop, (224,224))
morphed_label_crop = cv2.resize(morphed_label_crop, (224,224))
label_crop = cv2.resize(label_crop, (224,224))
return img_crop, morphed_label_crop, label_crop
def transform(self, img, label):
""" Process RGB image
- standardize_image
- random color warping
- random horizontal flipping
"""
img = img.astype(np.float32)
# Data augmentation for mask
morphed_label = label.copy()
if self.config['use_data_augmentation']:
if np.random.rand() < self.config['rate_of_morphological_transform']:
morphed_label = data_augmentation.random_morphological_transform(morphed_label, self.config)
if np.random.rand() < self.config['rate_of_translation']:
morphed_label = data_augmentation.random_translation(morphed_label, self.config)
if np.random.rand() < self.config['rate_of_rotation']:
morphed_label = data_augmentation.random_rotation(morphed_label, self.config)
sample = np.random.rand()
if sample < self.config['rate_of_label_adding']:
morphed_label = data_augmentation.random_add(morphed_label, self.config)
elif sample < self.config['rate_of_label_adding'] + self.config['rate_of_label_cutting']:
morphed_label = data_augmentation.random_cut(morphed_label, self.config)
if np.random.rand() < self.config['rate_of_ellipses']:
morphed_label = data_augmentation.random_ellipses(morphed_label, self.config)
# Next, crop the mask with some padding, and resize to 224x224. Make sure to preserve the aspect ratio
img_crop, morphed_label_crop, label_crop = self.pad_crop_resize(img, morphed_label, label)
# Data augmentation for RGB
# if self.config['use_data_augmentation']:
# img_crop = data_augmentation.random_color_warp(img_crop)
img_crop = data_augmentation.standardize_image(img_crop)
# Turn into torch tensors
img_crop = data_augmentation.array_to_tensor(img_crop) # Shape: [3 x H x W]
morphed_label_crop = data_augmentation.array_to_tensor(morphed_label_crop) # Shape: [H x W]
label_crop = data_augmentation.array_to_tensor(label_crop) # Shape: [H x W]
return img_crop, morphed_label_crop, label_crop
def __getitem__(self, idx):
cv2.setNumThreads(0) # some hack to make sure pyTorch doesn't deadlock. Found at https://github.com/pytorch/pytorch/issues/1355. Seems to work for me
# Get label filename
label_filename = self.starts[idx]
label = cv2.imread(str(os.path.join(self.base_dir, 'Labels', label_filename))) # Shape: [H x W x 3]
label = label[..., | |
<gh_stars>0
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common python commands used by various build scripts."""
from __future__ import print_function
import __main__
import collections
import contextlib
from datetime import datetime
import email.utils
import errno
import functools
import getpass
import hashlib
import inspect
import operator
import os
import pprint
import re
import signal
import socket
import subprocess
import sys
import tempfile
import time
import traceback
import types
from chromite.cbuildbot import constants
from chromite.lib import cros_logging as logging
from chromite.lib import signals
STRICT_SUDO = False
# For use by ShellQuote. Match all characters that the shell might treat
# specially. This means a number of things:
# - Reserved characters.
# - Characters used in expansions (brace, variable, path, globs, etc...).
# - Characters that an interactive shell might use (like !).
# - Whitespace so that one arg turns into multiple.
# See the bash man page as well as the POSIX shell documentation for more info:
# http://www.gnu.org/software/bash/manual/bashref.html
# http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html
_SHELL_QUOTABLE_CHARS = frozenset('[|&;()<> \t!{}[]=*?~$"\'\\#^')
# The chars that, when used inside of double quotes, need escaping.
# Order here matters as we need to escape backslashes first.
_SHELL_ESCAPE_CHARS = r'\"`$'
def ShellQuote(s):
"""Quote |s| in a way that is safe for use in a shell.
We aim to be safe, but also to produce "nice" output. That means we don't
use quotes when we don't need to, and we prefer to use less quotes (like
putting it all in single quotes) than more (using double quotes and escaping
a bunch of stuff, or mixing the quotes).
While python does provide a number of alternatives like:
- pipes.quote
- shlex.quote
They suffer from various problems like:
- Not widely available in different python versions.
- Do not produce pretty output in many cases.
- Are in modules that rarely otherwise get used.
Note: We don't handle reserved shell words like "for" or "case". This is
because those only matter when they're the first element in a command, and
there is no use case for that. When we want to run commands, we tend to
run real programs and not shell ones.
Args:
s: The string to quote.
Returns:
A safely (possibly quoted) string.
"""
s = s.encode('utf-8')
# See if no quoting is needed so we can return the string as-is.
for c in s:
if c in _SHELL_QUOTABLE_CHARS:
break
else:
if not s:
return "''"
else:
return s
# See if we can use single quotes first. Output is nicer.
if "'" not in s:
return "'%s'" % s
# Have to use double quotes. Escape the few chars that still expand when
# used inside of double quotes.
for c in _SHELL_ESCAPE_CHARS:
if c in s:
s = s.replace(c, r'\%s' % c)
return '"%s"' % s
def ShellUnquote(s):
"""Do the opposite of ShellQuote.
This function assumes that the input is a valid escaped string. The behaviour
is undefined on malformed strings.
Args:
s: An escaped string.
Returns:
The unescaped version of the string.
"""
if not s:
return ''
if s[0] == "'":
return s[1:-1]
if s[0] != '"':
return s
s = s[1:-1]
output = ''
i = 0
while i < len(s) - 1:
# Skip the backslash when it makes sense.
if s[i] == '\\' and s[i + 1] in _SHELL_ESCAPE_CHARS:
i += 1
output += s[i]
i += 1
return output + s[i] if i < len(s) else output
def CmdToStr(cmd):
"""Translate a command list into a space-separated string.
The resulting string should be suitable for logging messages and for
pasting into a terminal to run. Command arguments are surrounded by
quotes to keep them grouped, even if an argument has spaces in it.
Examples:
['a', 'b'] ==> "'a' 'b'"
['a b', 'c'] ==> "'a b' 'c'"
['a', 'b\'c'] ==> '\'a\' "b\'c"'
[u'a', "/'$b"] ==> '\'a\' "/\'$b"'
[] ==> ''
See unittest for additional (tested) examples.
Args:
cmd: List of command arguments.
Returns:
String representing full command.
"""
# Use str before repr to translate unicode strings to regular strings.
return ' '.join(ShellQuote(arg) for arg in cmd)
class CommandResult(object):
"""An object to store various attributes of a child process."""
def __init__(self, cmd=None, error=None, output=None, returncode=None):
self.cmd = cmd
self.error = error
self.output = output
self.returncode = returncode
@property
def cmdstr(self):
"""Return self.cmd as a space-separated string, useful for log messages."""
return CmdToStr(self.cmd)
class RunCommandError(Exception):
"""Error caught in RunCommand() method."""
def __init__(self, msg, result, exception=None):
self.msg, self.result, self.exception = msg, result, exception
if exception is not None and not isinstance(exception, Exception):
raise ValueError('exception must be an exception instance; got %r'
% (exception,))
Exception.__init__(self, msg)
self.args = (msg, result, exception)
def Stringify(self, error=True, output=True):
"""Custom method for controlling what is included in stringifying this.
Each individual argument is the literal name of an attribute
on the result object; if False, that value is ignored for adding
to this string content. If true, it'll be incorporated.
Args:
error: See comment about individual arguments above.
output: See comment about individual arguments above.
"""
items = [
'return code: %s; command: %s' % (
self.result.returncode, self.result.cmdstr),
]
if error and self.result.error:
items.append(self.result.error)
if output and self.result.output:
items.append(self.result.output)
if self.msg:
items.append(self.msg)
return '\n'.join(items)
def __str__(self):
# __str__ needs to return ascii, thus force a conversion to be safe.
return self.Stringify().decode('utf-8', 'replace').encode(
'ascii', 'xmlcharrefreplace')
def __eq__(self, other):
return (type(self) == type(other) and
self.args == other.args)
def __ne__(self, other):
return not self.__eq__(other)
class TerminateRunCommandError(RunCommandError):
"""We were signaled to shutdown while running a command.
Client code shouldn't generally know, nor care about this class. It's
used internally to suppress retry attempts when we're signaled to die.
"""
def SudoRunCommand(cmd, user='root', **kwargs):
"""Run a command via sudo.
Client code must use this rather than coming up with their own RunCommand
invocation that jams sudo in- this function is used to enforce certain
rules in our code about sudo usage, and as a potential auditing point.
Args:
cmd: The command to run. See RunCommand for rules of this argument-
SudoRunCommand purely prefixes it with sudo.
user: The user to run the command as.
kwargs: See RunCommand options, it's a direct pass thru to it.
Note that this supports a 'strict' keyword that defaults to True.
If set to False, it'll suppress strict sudo behavior.
Returns:
See RunCommand documentation.
Raises:
This function may immediately raise RunCommandError if we're operating
in a strict sudo context and the API is being misused.
Barring that, see RunCommand's documentation- it can raise the same things
RunCommand does.
"""
sudo_cmd = ['sudo']
strict = kwargs.pop('strict', True)
if user == 'root' and os.geteuid() == 0:
return RunCommand(cmd, **kwargs)
if strict and STRICT_SUDO:
if 'CROS_SUDO_KEEP_ALIVE' not in os.environ:
raise RunCommandError(
'We were invoked in a strict sudo non - interactive context, but no '
'sudo keep alive daemon is running. This is a bug in the code.',
CommandResult(cmd=cmd, returncode=126))
sudo_cmd += ['-n']
if user != 'root':
sudo_cmd += ['-u', user]
# Pass these values down into the sudo environment, since sudo will
# just strip them normally.
extra_env = kwargs.pop('extra_env', None)
extra_env = {} if extra_env is None else extra_env.copy()
for var in constants.ENV_PASSTHRU:
if var not in extra_env and var in os.environ:
extra_env[var] = os.environ[var]
sudo_cmd.extend('%s=%s' % (k, v) for k, v in extra_env.iteritems())
# Finally, block people from passing options to sudo.
sudo_cmd.append('--')
if isinstance(cmd, basestring):
# We need to handle shell ourselves so the order is correct:
# $ sudo [sudo args] -- bash -c '[shell command]'
# If we let RunCommand take care of it, we'd end up with:
# $ bash -c 'sudo [sudo args] -- [shell command]'
shell = kwargs.pop('shell', False)
if not shell:
raise Exception('Cannot run a string command without a shell')
sudo_cmd.extend(['/bin/bash', '-c', cmd])
else:
sudo_cmd.extend(cmd)
return RunCommand(sudo_cmd, **kwargs)
def _KillChildProcess(proc, int_timeout, kill_timeout, cmd, original_handler,
signum, frame):
"""Used as a signal handler by RunCommand.
This is internal to Runcommand. No other code should use this.
"""
if signum:
# If we've been invoked | |
from __future__ import absolute_import, division, print_function
from scitbx.math import tensor_rank_2_gradient_transform_matrix
from scitbx import matrix
from scitbx.array_family import flex
import cmath
import math
from six.moves import zip
mtps = -2 * math.pi**2
class structure_factor:
def __init__(self, xray_structure, hkl):
self.unit_cell = xray_structure.unit_cell()
self.space_group = xray_structure.space_group()
self.scatterers = xray_structure.scatterers()
self.site_symmetry_table = xray_structure.site_symmetry_table()
self.scattering_type_registry = xray_structure.scattering_type_registry()
self.hkl = hkl
self.d_star_sq = self.unit_cell.d_star_sq(hkl)
def f(self):
result = 0
tphkl = 2 * math.pi * matrix.col(self.hkl)
for scatterer in self.scatterers:
w = scatterer.weight()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = ffp + 1j * fdp
for s in self.space_group:
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
r = s.r().as_rational().as_float()
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
result += w * dw * ff * e
return result
def df_d_params(self):
tphkl = 2 * math.pi * matrix.col(self.hkl)
h,k,l = self.hkl
d_exp_huh_d_u_star = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
for i_scatterer,scatterer in enumerate(self.scatterers):
site_symmetry_ops = None
if (self.site_symmetry_table.is_special_position(i_scatterer)):
site_symmetry_ops = self.site_symmetry_table.get(i_scatterer)
site_constraints = site_symmetry_ops.site_constraints()
if (scatterer.flags.use_u_aniso()):
adp_constraints = site_symmetry_ops.adp_constraints()
w = scatterer.weight()
wwo = scatterer.weight_without_occupancy()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = ffp + 1j * fdp
d_site = matrix.col([0,0,0])
if (not scatterer.flags.use_u_aniso()):
d_u_iso = 0
d_u_star = None
else:
d_u_iso = None
d_u_star = matrix.col([0,0,0,0,0,0])
d_occ = 0j
d_fp = 0j
d_fdp = 0j
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = r.transpose()
d_site += site_gtmx * (
w * dw * ff * e * 1j * tphkl)
if (not scatterer.flags.use_u_aniso()):
d_u_iso += w * dw * ff * e * mtps * self.d_star_sq
else:
u_star_gtmx = matrix.sqr(tensor_rank_2_gradient_transform_matrix(r))
d_u_star += u_star_gtmx * (
w * dw * ff * e * mtps * d_exp_huh_d_u_star)
d_occ += wwo * dw * ff * e
d_fp += w * dw * e
d_fdp += w * dw * e * 1j
if (site_symmetry_ops is not None):
gsm = site_constraints.gradient_sum_matrix()
gsm = matrix.rec(elems=gsm, n=gsm.focus())
d_site = gsm * d_site
if (scatterer.flags.use_u_aniso()):
gsm = adp_constraints.gradient_sum_matrix()
gsm = matrix.rec(elems=gsm, n=gsm.focus())
d_u_star = gsm * d_u_star
result = flex.complex_double(d_site)
if (not scatterer.flags.use_u_aniso()):
result.append(d_u_iso)
else:
result.extend(flex.complex_double(d_u_star))
result.extend(flex.complex_double([d_occ, d_fp, d_fdp]))
yield result
def d2f_d_params(self):
tphkl = 2 * math.pi * flex.double(self.hkl)
tphkl_outer = tphkl.matrix_outer_product(tphkl) \
.matrix_symmetric_as_packed_u()
h,k,l = self.hkl
d_exp_huh_d_u_star = flex.double([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.matrix_outer_product(
d_exp_huh_d_u_star).matrix_symmetric_as_packed_u()
for i_scatterer,scatterer in enumerate(self.scatterers):
site_symmetry_ops = None
if (self.site_symmetry_table.is_special_position(i_scatterer)):
site_symmetry_ops = self.site_symmetry_table.get(i_scatterer)
site_constraints = site_symmetry_ops.site_constraints()
if (scatterer.flags.use_u_aniso()):
adp_constraints = site_symmetry_ops.adp_constraints()
w = scatterer.weight()
wwo = scatterer.weight_without_occupancy()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = (ffp + 1j * fdp)
d2_site_site = flex.complex_double(3*(3+1)//2, 0j)
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso = flex.complex_double(flex.grid(3,1), 0j)
d2_site_u_star = None
else:
d2_site_u_iso = None
d2_site_u_star = flex.complex_double(flex.grid(3,6), 0j)
d2_site_occ = flex.complex_double(flex.grid(3,1), 0j)
d2_site_fp = flex.complex_double(flex.grid(3,1), 0j)
d2_site_fdp = flex.complex_double(flex.grid(3,1), 0j)
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso = 0j
d2_u_iso_occ = 0j
d2_u_iso_fp = 0j
d2_u_iso_fdp = 0j
else:
d2_u_star_u_star = flex.complex_double(6*(6+1)//2, 0j)
d2_u_star_occ = flex.complex_double(flex.grid(6,1), 0j)
d2_u_star_fp = flex.complex_double(flex.grid(6,1), 0j)
d2_u_star_fdp = flex.complex_double(flex.grid(6,1), 0j)
d2_occ_fp = 0j
d2_occ_fdp = 0j
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = tphkl.dot(flex.double(s_site))
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = flex.double(r.transpose())
site_gtmx.reshape(flex.grid(3,3))
d2_site_site += (w * dw * ff * e * (-1)) * (
site_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
tphkl_outer))
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso += (w * dw * ff * e * 1j * mtps * self.d_star_sq) \
* site_gtmx.matrix_multiply(tphkl)
else:
u_star_gtmx = tensor_rank_2_gradient_transform_matrix(r)
d2_site_u_star += (w * dw * ff * e * 1j * mtps) \
* site_gtmx.matrix_multiply(
tphkl.matrix_outer_product(d_exp_huh_d_u_star)) \
.matrix_multiply(u_star_gtmx.matrix_transpose())
site_gtmx_tphkl = site_gtmx.matrix_multiply(tphkl)
d2_site_occ += (wwo * dw * ff * e * 1j) * site_gtmx_tphkl
d2_site_fp += (w * dw * e * 1j) * site_gtmx_tphkl
d2_site_fdp += (w * dw * e * (-1)) * site_gtmx_tphkl
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2
d2_u_iso_occ += wwo * dw * ff * e * mtps * self.d_star_sq
d2_u_iso_fp += w * dw * e * mtps * self.d_star_sq
d2_u_iso_fdp += 1j * w * dw * e * mtps * self.d_star_sq
else:
d2_u_star_u_star +=(w * dw * ff * e * mtps**2) \
* u_star_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
d2_exp_huh_d_u_star_u_star)
u_star_gtmx_d_exp_huh_d_u_star = u_star_gtmx.matrix_multiply(
d_exp_huh_d_u_star)
d2_u_star_occ += (wwo * dw * ff * e * mtps) \
* u_star_gtmx_d_exp_huh_d_u_star
d2_u_star_fp += (w * dw * e * mtps) \
* u_star_gtmx_d_exp_huh_d_u_star
d2_u_star_fdp += (w * dw * 1j * e * mtps) \
* u_star_gtmx_d_exp_huh_d_u_star
d2_occ_fp += wwo * dw * e
d2_occ_fdp += wwo * dw * e * 1j
if (site_symmetry_ops is None):
i_u = 3
else:
i_u = site_constraints.n_independent_params()
if (not scatterer.flags.use_u_aniso()):
i_occ = i_u + 1
elif (site_symmetry_ops is None):
i_occ = i_u + 6
else:
i_occ = i_u + adp_constraints.n_independent_params()
i_fp, i_fdp, np = i_occ+1, i_occ+2, i_occ+3
if (site_symmetry_ops is not None):
gsm = site_constraints.gradient_sum_matrix()
d2_site_site = gsm.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_site_site)
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso = gsm.matrix_multiply(d2_site_u_iso)
else:
d2_site_u_star = gsm.matrix_multiply(d2_site_u_star)
d2_site_occ = gsm.matrix_multiply(d2_site_occ)
d2_site_fp = gsm.matrix_multiply(d2_site_fp)
d2_site_fdp = gsm.matrix_multiply(d2_site_fdp)
if (scatterer.flags.use_u_aniso()):
gsm = adp_constraints.gradient_sum_matrix()
d2_site_u_star = d2_site_u_star.matrix_multiply(
gsm.matrix_transpose())
d2_u_star_u_star = gsm \
.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_u_star_u_star)
d2_u_star_occ = gsm.matrix_multiply(d2_u_star_occ)
d2_u_star_fp = gsm.matrix_multiply(d2_u_star_fp)
d2_u_star_fdp = gsm.matrix_multiply(d2_u_star_fdp)
dp = flex.complex_double(flex.grid(np,np), 0j)
paste = dp.matrix_paste_block_in_place
paste(d2_site_site.matrix_packed_u_as_symmetric(), 0,0)
if (not scatterer.flags.use_u_aniso()):
paste(d2_site_u_iso, 0,i_u)
paste(d2_site_u_iso.matrix_transpose(), i_u,0)
else:
paste(d2_site_u_star, 0,i_u)
paste(d2_site_u_star.matrix_transpose(), i_u,0)
paste(d2_site_occ, 0,i_occ)
paste(d2_site_occ.matrix_transpose(), i_occ,0)
paste(d2_site_fp, 0,i_fp)
paste(d2_site_fp.matrix_transpose(), i_fp,0)
paste(d2_site_fdp, 0,i_fdp)
paste(d2_site_fdp.matrix_transpose(), i_fdp,0)
if (not scatterer.flags.use_u_aniso()):
dp[i_u*np+i_u] = d2_u_iso_u_iso
dp[i_u*np+i_occ] = d2_u_iso_occ
dp[i_occ*np+i_u] = d2_u_iso_occ
dp[i_u*np+i_fp] = d2_u_iso_fp
dp[i_fp*np+i_u] = d2_u_iso_fp
dp[i_u*np+i_fdp] = d2_u_iso_fdp
dp[i_fdp*np+i_u] = d2_u_iso_fdp
else:
paste(d2_u_star_u_star.matrix_packed_u_as_symmetric(), i_u, i_u)
paste(d2_u_star_occ, i_u, i_occ)
paste(d2_u_star_occ.matrix_transpose(), i_occ, i_u)
paste(d2_u_star_fp, i_u, i_fp)
paste(d2_u_star_fp.matrix_transpose(), i_fp, i_u)
paste(d2_u_star_fdp, i_u, i_fdp)
paste(d2_u_star_fdp.matrix_transpose(), i_fdp, i_u)
dp[i_occ*np+i_fp] = d2_occ_fp
dp[i_fp*np+i_occ] = d2_occ_fp
dp[i_occ*np+i_fdp] = d2_occ_fdp
dp[i_fdp*np+i_occ] = d2_occ_fdp
yield dp
def d2f_d_params_diag(self):
tphkl = 2 * math.pi * flex.double(self.hkl)
tphkl_outer = tphkl.matrix_outer_product(tphkl) \
.matrix_symmetric_as_packed_u()
h,k,l = self.hkl
d_exp_huh_d_u_star = flex.double([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.matrix_outer_product(
d_exp_huh_d_u_star).matrix_symmetric_as_packed_u()
for i_scatterer,scatterer in enumerate(self.scatterers):
site_symmetry_ops = None
if (self.site_symmetry_table.is_special_position(i_scatterer)):
site_symmetry_ops = self.site_symmetry_table.get(i_scatterer)
site_constraints = site_symmetry_ops.site_constraints()
if (scatterer.flags.use_u_aniso()):
adp_constraints = site_symmetry_ops.adp_constraints()
w = scatterer.weight()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = (ffp + 1j * fdp)
d2_site_site = flex.complex_double(3*(3+1)//2, 0j)
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso = 0j
else:
d2_u_star_u_star = flex.complex_double(6*(6+1)//2, 0j)
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = tphkl.dot(flex.double(s_site))
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = flex.double(r.transpose())
site_gtmx.reshape(flex.grid(3,3))
d2_site_site += (w * dw * ff * e * (-1)) * (
site_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
tphkl_outer))
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2
else:
u_star_gtmx = tensor_rank_2_gradient_transform_matrix(r)
d2_u_star_u_star +=(w * dw * ff * e * mtps**2) \
* u_star_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
d2_exp_huh_d_u_star_u_star)
if (site_symmetry_ops is None):
i_u = 3
else:
i_u = site_constraints.n_independent_params()
if (not scatterer.flags.use_u_aniso()):
i_occ = i_u + 1
elif (site_symmetry_ops is None):
i_occ = i_u + 6
else:
i_occ = i_u + adp_constraints.n_independent_params()
np = i_occ+3
if (site_symmetry_ops is not None):
gsm = site_constraints.gradient_sum_matrix()
d2_site_site = gsm.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_site_site)
if (scatterer.flags.use_u_aniso()):
gsm = adp_constraints.gradient_sum_matrix()
d2_u_star_u_star = gsm \
.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_u_star_u_star)
#
dpd = flex.complex_double(flex.grid(np,1), | |
<filename>deephop/onmt/myutils.py<gh_stars>1-10
import re
import numpy as np
import networkx as nx
import rdkit
import torchtext
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures, AllChem
import os
import torch
import time
import dgl
import dgl.function as fn
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import multiprocessing
import shutil
import torch.utils.data as data
ATOMS = ['C', 'Si', 'F', 'I', 'Sn', 'N', 'P', 'Cl', 'B', 'Se', 'S', 'O', 'Br']
# ATOMS = ['Si', 'S', 'C', 'O', 'Se', 'P', 'F', 'Br', 'I', 'B', 'Cl', 'Sn', 'N']
EMB_ATOMS = ['Si', 'S', 'C', 'O', 'Se', 'P', 'F', 'Br', 'I', 'B', 'Cl', 'Sn', 'N', 'c', 'n', 'o', 's', 'p']
def extend_atoms_in_smiles(smiles):
patterns = ['B r', 'C l', 'S i']
t_patterns = ['Br', 'Cl', 'Si']
for i in range(len(patterns)):
smiles = smiles.replace(patterns[i], t_patterns[i])
return smiles
def get_atoms(smiles):
atoms = []
smiles = smiles.strip().split(' ')
for i in range(len(smiles)):
if smiles[i] in ATOMS:
atoms.append(smiles[i])
return atoms
def rawsmiles2graph(smiles):
# smiles = smiles.strip().replace(' ','')
m = Chem.MolFromSmiles(smiles)
g = nx.Graph()
fdef_name = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
factory = ChemicalFeatures.BuildFeatureFactory(fdef_name)
feats = factory.GetFeaturesForMol(m)
# Nodes
for i in range(0, m.GetNumAtoms()):
atom_i = m.GetAtomWithIdx(i)
g.add_node(i, a_type=atom_i.GetSymbol(), a_num=atom_i.GetAtomicNum(), acceptor=0, donor=0,
aromatic=atom_i.GetIsAromatic(), hybridization=atom_i.GetHybridization(),
num_h=atom_i.GetTotalNumHs())
# Donor and Acceptor properties
for i in range(0, len(feats)):
if feats[i].GetFamily() == 'Donor':
node_list = feats[i].GetAtomIds()
for i in node_list:
g.node[i]['donor'] = 1
elif feats[i].GetFamily() == 'Acceptor':
node_list = feats[i].GetAtomIds()
for i in node_list:
g.node[i]['acceptor'] = 1
# Edges
for i in range(0, m.GetNumAtoms()):
for j in range(0, m.GetNumAtoms()):
e_ij = m.GetBondBetweenAtoms(i, j)
if e_ij is not None:
g.add_edge(i, j, b_type=e_ij.GetBondType())
else:
# Unbonded
g.add_edge(i, j, b_type=None, )
return g
def extract_graph_feature(g, hydrogen=True, with_3d=False):
h = []
# node features
for n, d in list(g.nodes()(data=True)):
is_atom = True
try:
a = d['a_type']
except:
is_atom = False
if is_atom:
h_t = []
# Atom type (One-hot)
h_t += [int(d['a_type'] == x) for x in ATOMS]
# Atomic number
h_t.append(d['a_num'])
# Acceptor
h_t.append(d['acceptor'])
# Donor
h_t.append(d['donor'])
# Aromatic
h_t.append(int(d['aromatic']))
# Hybradization
h_t += [int(d['hybridization'] == x) for x in
[rdkit.Chem.rdchem.HybridizationType.SP, rdkit.Chem.rdchem.HybridizationType.SP2,
rdkit.Chem.rdchem.HybridizationType.SP3]]
# If number hydrogen is used as a
if hydrogen:
h_t.append(d['num_h'])
if with_3d:
h_t.extend(d['corr'])
else:
if not with_3d:
h_t = [0] * 21
else:
h_t = [0] * 24
h.append(h_t)
# edge features
remove_edges = []
e = {}
for n1, n2, d in list(g.edges()(data=True)):
e_t = []
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t += [int(d['b_type'] == x) for x in
[rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC]]
if e_t:
e[(n1, n2)] = e_t
for edg in remove_edges:
g.remove_edge(*edg)
return h, e
class MolData(data.Dataset):
def __init__(self, data, lengths):
self.data = data
self.lengths = lengths
self.gs = []
for i in range(len(data)):
self.gs.append(smiles2graph(self.data[i], self.lengths[i]))
def __getitem__(self, index):
return self.gs[index]
def __len__(self):
return len(self.data)
def collate_dgl(samples):
# The input `samples` is a list of graphs
batched_graph = dgl.batch(samples)
return batched_graph
def collate_g(batch):
batch_sizes = np.max(np.array([[len(input_b[1]), len(input_b[1][0]), len(input_b[2]),
len(list(input_b[2].values())[0])]
if input_b[2] else
[len(input_b[1]), len(input_b[1][0]), 0, 0]
for (input_b, target_b) in batch]), axis=0)
g = np.zeros((len(batch), batch_sizes[0], batch_sizes[0]))
h = np.zeros((len(batch), batch_sizes[0], batch_sizes[1]))
e = np.zeros((len(batch), batch_sizes[0], batch_sizes[0], batch_sizes[3]))
target = np.zeros((len(batch), len(batch[0][1])))
for i in range(len(batch)):
num_nodes = len(batch[i][0][1])
# Adjacency matrix
g[i, 0:num_nodes, 0:num_nodes] = batch[i][0][0]
# Node features
h[i, 0:num_nodes, :] = batch[i][0][1]
# Edges
for edge in batch[i][0][2].keys():
e[i, edge[0], edge[1], :] = batch[i][0][2][edge]
e[i, edge[1], edge[0], :] = batch[i][0][2][edge]
# Target
target[i, :] = batch[i][1]
g = torch.FloatTensor(g)
h = torch.FloatTensor(h)
e = torch.FloatTensor(e)
target = torch.FloatTensor(target)
return g, h, e, target
def get_all_atoms(filenames, with_reaction_type=True):
atom_set = set()
for filename in filenames:
data = []
with open(filename, 'r') as f:
data = f.readlines()
for i in range(len(data)):
if with_reaction_type:
data[i] = ''.join(data[i].strip().split()[1:])
else:
data[i] = ''.join(data[i].strip().split())
m = Chem.MolFromSmiles(data[i])
for i in range(0, m.GetNumAtoms()):
atom_i = m.GetAtomWithIdx(i)
symbol = atom_i.GetSymbol()
atom_set.add(symbol)
return atom_set
def canonicalize(filenames, atom_set):
for filename in filenames:
with open(filename, 'r') as f:
data = f.readlines()
for i in range(len(data)):
for atom in atom_set:
if len(atom) > 1:
data[i] = data[i].strip().replace(atom[0] + ' ' + atom[1], atom)
with open('modify_data/' + filename, 'w') as f:
for line in data:
f.write(line)
f.write('\n')
vocab = {'<unk>': 0, '<blank>': 1, '<s>': 2, '</s>': 3, 'c': 4, 'C': 5, '(': 6, ')': 7, '1': 8, 'O': 9, '2': 10,
'=': 11, 'N': 12, 'n': 13, '3': 14, 'F': 15, '[': 16, ']': 17, '@': 18, 'H': 19, '-': 20, 'Cl': 21, '.': 22,
'4': 23, 'S': 24, 'Br': 25, '#': 26, 's': 27, '+': 28, 'o': 29, '5': 30, '/': 31, 'B': 32, 'I': 33, 'Si': 34,
'\\': 35, '6': 36, 'P': 37, 'M': 38, 'g': 39, 'Sn': 40, '7': 41, 'Z': 42, 'u': 43, 'e': 44, 'L': 45, 'i': 46,
'8': 47, 'Se': 48, '9': 49, 'K': 50, 't': 51, 'd': 52}
# vocab = {'<unk>': 0, '<blank>': 1, '<s>': 2, '</s>': 3, 'c': 4, 'C': 5, '(': 6, ')': 7, '1': 8, 'O': 9, '2': 10, '=': 11, 'N': 12, 'n': 13, '3': 14, 'F': 15, '[': 16, ']': 17, '@': 18, 'H': 19, '-': 20, 'Cl': 21, '.': 22, '4': 23, 'S': 24, 'Br': 25, '<RX_1>': 26, '<RX_2>': 27, '#': 28, 's': 29, '<RX_6>': 30, '+': 31, 'o': 32, '<RX_3>': 33, '5': 34, '/': 35, '<RX_7>': 36, 'B': 37, 'I': 38, 'Si': 39, '<RX_9>': 40, '\\': 41, '<RX_4>': 42, '<RX_8>': 43, '6': 44, 'P': 45, '<RX_5>': 46, 'M': 47, 'g': 48, 'Sn': 49, '<RX_10>': 50, '7': 51, 'Z': 52, 'u': 53, 'e': 54, 'L': 55, 'i': 56, '8': 57, 'Se': 58, '9': 59, 'K': 60, 't': 61, 'd': 62}
inver_vocab = {vocab[key]: key for key in vocab}
invalid_words = ['<unk>', '<RX_9>', '<RX_5>', '<RX_2>', '<blank>', '<RX_10>', '<RX_8>', '</s>', '<RX_6>', '<s>',
'<RX_3>', '<RX_4>', '<RX_1>', '<RX_7>']
def recover_to_raw(src): # len * batch
src = src.transpose(0, 1).contiguous() # batch * len
w_batch, w_len = src.size()
rawstr = []
for i in range(w_batch):
smile = []
for j in range(w_len):
word = inver_vocab[src[i][j].item()]
smile.append(word)
rawstr.append(smile)
return rawstr
def mpnn_emb(model, g_loader, ccuda=1):
for i, (g, h, e, target) in enumerate(g_loader):
if ccuda >= 0:
g = g.cuda(ccuda)
h = h.cuda(ccuda)
e = e.cuda(ccuda)
return model(g, h, e)
def gcn_emb(model, gs, device):
batched_graph = dgl.batch(gs)
batched_graph = batched_graph.to(device)
graph_encode_emb = model(batched_graph, batched_graph.ndata['init_h'])
return graph_encode_emb
def check_num_zero(emb):
num = []
e1, e2, e3 = emb.size()
for i in range(e1):
n = 0
for j in range(e2):
if torch.sum(emb[i][j] == torch.zeros(e3).cuda()).item() == e3:
n += 1
num.append(n)
return num
# def cat_two_emb(emb1, emb2):
# e1, e2 = emb1.size(2), emb2.size(2)
# src = src.transpose(0, 1).contiguous()[:,:,0] # batch * len
# w_batch, w_len = src.size()
# pre_pad = torch.zeros(w_batch, w_len, e2).cuda(0)
# emb1 = torch.cat((emb1, em), dim = 3)
# # num_atom = []
# for i in range(w_batch):
# num = 0
# index = 0
# for j in range(w_len):
# if inver_vocab[src[i][j].item()] in EMB_ATOMS:
# num += 1
# emb1[i][j][e1:] = emb2[i][index]
# index += 1
# # num_atom.append(num)
# # print(emb2[i][index:]) # should be all zero tensors
# # print(num_atom)
# return emb1
def need_emb(word, EMB_ATOMS):
return word in EMB_ATOMS
def str2molgraph(rawstr,
length): # rawstr :tuple() e.g. ('<RX_6>', 'N', 'c', '1', 'n', 'c', '2', '[', 'n', 'H', ']', 'c', '(', 'C', 'C', 'C', 'c', '3', 'c', 's', 'c', '(', 'C', '(', '=', 'O', ')', 'O', ')', 'c', '3', ')', 'c', 'c', '2', 'c', '(', '=', 'O', ')', '[', 'n', 'H', ']', '1')
# if length == 1:
# rawstr = [s for s in rawstr[0]]
# length = len(rawstr)
smiles = ''.join(rawstr[:length])
m = Chem.MolFromSmiles(smiles)
code = AllChem.EmbedMolecule(m)
code = AllChem.MMFFOptimizeMolecule(m, maxIters=200)
g = nx.Graph()
fdef_name = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
factory = ChemicalFeatures.BuildFeatureFactory(fdef_name)
feats = factory.GetFeaturesForMol(m)
atom_true_index = {}
atom_index = 0
# Nodes
for i in range(len(rawstr)):
if not need_emb(rawstr[i], EMB_ATOMS):
g.add_node(i)
else:
atom_true_index[atom_index] = i # meanwhile, set a map dict to find the true index of atoms
atom_i = m.GetAtomWithIdx(atom_index)
g.add_node(i, a_type=atom_i.GetSymbol(), a_num=atom_i.GetAtomicNum(), acceptor=0, donor=0,
aromatic=atom_i.GetIsAromatic(), hybridization=atom_i.GetHybridization(),
num_h=atom_i.GetTotalNumHs(), corr=list(m.GetConformer().GetAtomPosition(atom_index)))
atom_index += 1
# Donor and Acceptor properties
for i in range(0, len(feats)):
if feats[i].GetFamily() == 'Donor':
node_list = feats[i].GetAtomIds()
for i in node_list:
g.nodes[atom_true_index[i]]['donor'] = 1
elif feats[i].GetFamily() == 'Acceptor':
node_list = feats[i].GetAtomIds()
for i in node_list:
g.nodes[atom_true_index[i]]['acceptor'] = 1
# Edges
for i | |
0:
return sum_absolute_errors(y_l) + sum_absolute_errors(y_r), (0, 0)
y_i = np.sort(y_i)
max_error = 0
indices = None
for i in range(len(y_i)):
error = 0
error += sum_absolute_errors(np.concatenate((y_l, y_i[:i])))
error += sum_absolute_errors(np.concatenate((y_r, y_i[i:])))
if error > max_error:
max_error = error
indices = (0, i)
for i in range(len(y_i)):
error = 0
error += sum_absolute_errors(np.concatenate((y_l, y_i[i:])))
error += sum_absolute_errors(np.concatenate((y_r, y_i[:i])))
if error > max_error:
max_error = error
indices = (i, len(y_i))
return max_error, indices
@jit(nopython=True, nogil=NOGIL)
def chen_adversarial_sum_absolute_errors(y_l, y_li, y_ri, y_r):
if len(y_li) == 0 and len(y_ri) == 0:
return sum_absolute_errors(y_l) + sum_absolute_errors(y_r), 1
s1 = sum_absolute_errors(np.concatenate((y_l, y_li))) + sum_absolute_errors(
np.concatenate((y_r, y_ri))
)
s2 = sum_absolute_errors(y_l) + sum_absolute_errors(
np.concatenate((y_li, y_ri, y_r))
)
s3 = sum_absolute_errors(np.concatenate((y_l, y_li, y_ri))) + sum_absolute_errors(
y_r
)
s4 = sum_absolute_errors(np.concatenate((y_l, y_ri))) + sum_absolute_errors(
np.concatenate((y_r, y_li))
)
worst_case = max(s1, s2, s3, s4)
if s1 == worst_case:
return s1, 1
elif s2 == worst_case:
return s2, 2
elif s3 == worst_case:
return s3, 3
else:
return s4, 4
@jit(nopython=True, nogil=NOGIL)
def weighted_gini(l_0, l_1, r_0, r_1):
l_t = l_0 + l_1
r_t = r_0 + r_1
# Prevent division by 0
if l_t == 0:
l_p = 1.0
else:
l_p = l_0 / (l_0 + l_1)
if r_t == 0:
r_p = 1.0
else:
r_p = r_0 / (r_0 + r_1)
gini = l_t * (1 - (l_p ** 2) - ((1 - l_p) ** 2)) + r_t * (
1 - (r_p ** 2) - ((1 - r_p) ** 2)
)
total = l_t + r_t
if total != 0:
gini /= total
return gini
else:
return 1.0
@jit(nopython=True, nogil=NOGIL)
def _counts_to_one_class_adv_gini(counts, rho, chen_heuristic):
# Apply rho by moving a number of samples back from intersect
rho_inv = 1.0 - rho
left_mal = counts[LEFT][1] + int(round(rho_inv * counts[LEFT_INTERSECT][1]))
right_mal = counts[RIGHT][1] + int(round(rho_inv * counts[RIGHT_INTERSECT][1]))
left_i_mal = int(round(rho * counts[LEFT_INTERSECT][1]))
right_i_mal = int(round(rho * counts[RIGHT_INTERSECT][1]))
# Compute the adversarial gini gain
if chen_heuristic:
adv_gini, _ = chen_adversarial_gini_gain_one_class(
counts[LEFT][0],
left_mal,
counts[RIGHT][0],
right_mal,
left_i_mal,
right_i_mal,
)
else:
adv_gini, _ = adversarial_gini_gain_one_class(
counts[LEFT][0],
left_mal,
counts[RIGHT][0],
right_mal,
left_i_mal + right_i_mal,
)
return adv_gini
@jit(nopython=True, nogil=NOGIL)
def _counts_to_two_class_adv_gini(counts, rho, chen_heuristic):
# Apply rho by moving a number of samples back from intersect
rho_inv = 1.0 - rho
left = counts[LEFT] + np.rint(rho_inv * counts[LEFT_INTERSECT]).astype(np.int64)
right = counts[RIGHT] + np.rint(rho_inv * counts[RIGHT_INTERSECT]).astype(np.int64)
left_i = np.rint(rho * counts[LEFT_INTERSECT]).astype(np.int64)
right_i = np.rint(rho * counts[RIGHT_INTERSECT]).astype(np.int64)
# Compute the adversarial gini gain
if chen_heuristic:
adv_gini, _, _ = chen_adversarial_gini_gain_two_class(
left[0],
left[1],
left_i[0],
left_i[1],
right_i[0],
right_i[1],
right[0],
right[1],
)
else:
adv_gini, _, _ = adversarial_gini_gain_two_class(
left[0],
left[1],
left_i[0],
left_i[1],
right_i[0],
right_i[1],
right[0],
right[1],
)
return adv_gini
class BaseGrootTree(BaseEstimator):
"""
Base class for GROOT decision trees.
Implements high level fitting operation and exporting to strings/JSON.
"""
def __init__(
self,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
robust_weight=1.0,
attack_model=None,
chen_heuristic=False,
compile=True,
random_state=None,
):
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.robust_weight = robust_weight
self.attack_model = attack_model
self.chen_heuristic = chen_heuristic
self.compile = compile
self.random_state = random_state
def fit(self, X, y, check_input=True):
"""
Build a robust and fair binary decision tree from the training set
(X, y) using greedy splitting according to the weighted adversarial
Gini impurity and fairness impurity.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The class labels as integers 0 (benign) or 1 (malicious)
Returns
-------
self : object
Fitted estimator.
"""
if check_input:
X, y = check_X_y(X, y)
y = self._check_target(y)
self.n_samples_, self.n_features_in_ = X.shape
if self.attack_model is None:
attack_model = [""] * X.shape[1]
else:
attack_model = self.attack_model
# Turn numerical features in attack model into tuples to make fitting
# code simpler
self.attack_model_ = np.array(
_attack_model_to_tuples(attack_model, X.shape[1]), dtype=X.dtype
)
self.random_state_ = check_random_state(self.random_state)
if self.max_features == "sqrt":
self.max_features_ = int(np.sqrt(self.n_features_in_))
elif self.max_features == "log2":
self.max_features_ = int(np.log2(self.n_features_in_))
elif self.max_features is None:
self.max_features_ = self.n_features_in_
else:
self.max_features_ = self.max_features
if self.max_features_ == 0:
self.max_features_ = 1
# Keep track of the minimum and maximum split value for each feature
constraints = np.concatenate(
(np.min(X, axis=0).reshape(-1, 1), np.max(X, axis=0).reshape(-1, 1)), axis=1
)
self.root_ = self.__fit_recursive(X, y, constraints)
# Compile the tree into a representation that is faster when predicting
if self.compile:
self.compiled_root_ = CompiledTree(self.root_)
return self
def __fit_recursive(self, X, y, constraints, depth=0):
"""
Recursively fit the decision tree on the training dataset (X, y).
The constraints make sure that leaves are well formed, e.g. don't
cross an earlier split. Stop when the depth has reached self.max_depth,
when a leaf is pure or when the leaf contains too few samples.
"""
if (
(self.max_depth is not None and depth == self.max_depth)
or len(y) < self.min_samples_split
or np.all(y == y[0])
):
return self._create_leaf(y)
current_score = self._score(y)
rule, feature, split_score = self.__best_adversarial_decision(X, y, constraints)
score_gain = current_score - split_score
if rule is None or score_gain <= 0.00:
return self._create_leaf(y)
# Assert that the split obeys constraints made by previous splits
assert rule >= constraints[feature][0]
assert rule < constraints[feature][1]
X_left, y_left, X_right, y_right = self._split_left_right(
X,
y,
rule,
feature,
)
if len(y_left) < self.min_samples_leaf or len(y_right) < self.min_samples_leaf:
return self._create_leaf(y)
# Set the right bound and store old one for after recursion
old_right_bound = constraints[feature][1]
constraints[feature][1] = rule
left_node = self.__fit_recursive(X_left, y_left, constraints, depth + 1)
# Reset right bound, set left bound, store old one for after recursion
constraints[feature][1] = old_right_bound
old_left_bound = constraints[feature][0]
constraints[feature][0] = rule
right_node = self.__fit_recursive(X_right, y_right, constraints, depth + 1)
# Reset the left bound
constraints[feature][0] = old_left_bound
node = NumericalNode(feature, rule, left_node, right_node, _TREE_UNDEFINED)
return node
def __best_adversarial_decision(self, X, y, constraints):
"""
Find the best split by iterating through each feature and scanning
it for that feature's optimal split.
"""
best_score = 10e9
best_rule = None
best_feature = None
# If there is a limit on features to consider in a split then choose
# that number of random features.
all_features = np.arange(self.n_features_in_)
features = self.random_state_.choice(
all_features, size=self.max_features_, replace=False
)
for feature in features:
score, decision_rule = self._scan_feature(X, y, feature, constraints)
if decision_rule is not None and score < best_score:
best_score = score
best_rule = decision_rule
best_feature = feature
return best_rule, best_feature, best_score
def to_string(self):
result = ""
result += f"Parameters: {self.get_params()}\n"
if hasattr(self, "root_"):
result += f"Tree:\n{self.root_.pretty_print()}"
else:
result += "Tree has not yet been fitted"
return result
def to_json(self, output_file="tree.json"):
dictionary = {
"params": self.get_params(),
}
if hasattr(self, "root_"):
dictionary["tree"] = self.root_.to_json()
else:
dictionary["tree"] = None
if output_file is None:
return dictionary
else:
with open(output_file, "w") as fp:
json.dump(dictionary, fp, indent=2, default=convert_numpy)
def to_xgboost_json(self, output_file="tree.json"):
check_is_fitted(self, "root_")
dictionary, _ = self.root_.to_xgboost_json(0, 0)
if output_file is None:
return dictionary
else:
with open(output_file, "w") as fp:
# If saving to file then surround dict in list brackets
json.dump([dictionary], fp, indent=2, default=convert_numpy)
class GrootTreeClassifier(BaseGrootTree, ClassifierMixin):
"""
A robust decision tree for binary classification.
"""
def __init__(
self,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
robust_weight=1.0,
attack_model=None,
one_adversarial_class=False,
chen_heuristic=False,
compile=True,
random_state=None,
):
"""
Parameters
----------
max_depth : int, optional
The maximum depth for the decision tree once fitted.
min_samples_split : int, optional
The minimum number of samples required to split a node.
min_samples_leaf : int, optional
The minimum number of samples required to make a leaf.
max_features : int or {"sqrt", "log2"}, optional
The number of features to consider while making each split, if None then all features are considered.
robust_weight : float, optional
The ratio of samples that are actually moved by an adversary.
attack_model : array-like of shape (n_features,), optional
Attacker capabilities for perturbing X. By default, all features are considered not perturbable.
one_adversarial_class : bool, optional
Whether one class (malicious, 1) perturbs their samples or if both classes (benign and malicious, 0 and 1) do so.
chen_heuristic : bool, optional
Whether to use the heuristic for the adversarial Gini impurity from Chen et al. (2019) instead of GROOT's adversarial Gini impurity.
compile : bool, optional
Whether to compile the tree for faster predictions.
random_state : int, optional
Controls the sampling | |
("[-] Ca ,get error in VALUE_METHOD at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1b:
# print 'here 0x1b VALUE_ENUM in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_ENUM at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1c:
# print 'here 0x1c VALUE_ARRAY in class : ' + str(curClass_idx)
if self.arg != 0x00:
print ("[-] Ca ,get error in VALUE_ARRAY")
os._exit(1)
self.value.append(EncodedArray(file))
elif self.type == 0x1d:
# print 'here 0x1d VALUE_ANNOTATION in class : ' + str(curClass_idx)
if self.arg != 0:
os._exit()
self.value.append(EncodedAnnotation(file))
# if case(0x1e):
# print 'here 0x1e VALUE_NULL in class : ' + str(curClass_idx)
# break
# if case(0x1f):
# print 'here 0x1f VALUE_BOOLEAN in class : ' + str(curClass_idx)
# break
def copytofile(self, file):
file.write(struct.pack("B", self.onebyte))
if self.type <= 0x1b:
for i in range(0, self.arg+1):
file.write(struct.pack("B", self.value[i]))
elif self.type == 0x1c:
self.value[0].copytofile(file)
elif self.type == 0x1d:
self.value[0].copytofile(file)
def makeoffset(self, off):
off += 1
if self.type <= 0x1b:
off += self.arg+1
elif self.type == 0x1c:
off = self.value[0].makeoffset(off)
elif self.type == 0x1d:
off = self.value[0].makeoffset(off)
return off
def printf(self):
print("encoded value :", self.type, self.arg)
# ----------------------------------------------------------------------------------------
class AnnotationElement:
def __init__(self, file):
self.name_idx = readunsignedleb128(file)
self.value = EncodedValue(file)
def copytofile(self, file):
writeunsignedleb128(self.name_idx, file)
self.value.copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.name_idx)
off = self.value.makeoffset(off)
return off
class EncodedAnnotation:
def __init__(self, file):
self.type_idx = readunsignedleb128(file)
self.size = readunsignedleb128(file)
self.elements = [] # annotation_element[size]
for i in range(0, self.size):
self.elements.append(AnnotationElement(file))
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.elements[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.elements[i].makeoffset(off)
return off
class DexHeader:
def __init__(self, file, mode=0):
if mode == 0:
self.start = file.tell()
self.magic = []
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.version = []
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.checksum = struct.unpack("I", file.read(4))[0]
self.signature = file.read(20)
self.file_size = struct.unpack("I", file.read(4))[0]
self.header_size = struct.unpack("I", file.read(4))[0]
self.endian_tag = hex(struct.unpack("I", file.read(4))[0])
self.link_size = struct.unpack("I", file.read(4))[0]
self.link_off = struct.unpack("I", file.read(4))[0]
self.map_off = struct.unpack("I", file.read(4))[0]
self.string_ids_size = struct.unpack("I", file.read(4))[0]
self.string_ids_off = struct.unpack("I", file.read(4))[0]
self.type_ids_size = struct.unpack("I", file.read(4))[0]
self.type_ids_off = struct.unpack("I", file.read(4))[0]
self.proto_ids_size = struct.unpack("I", file.read(4))[0]
self.proto_ids_off = struct.unpack("I", file.read(4))[0]
self.field_ids_size = struct.unpack("I", file.read(4))[0]
self.field_ids_off = struct.unpack("I", file.read(4))[0]
self.method_ids_size = struct.unpack("I", file.read(4))[0]
self.method_ids_off = struct.unpack("I", file.read(4))[0]
self.class_defs_size = struct.unpack("I", file.read(4))[0]
self.class_defs_off = struct.unpack("I", file.read(4))[0]
self.data_size = struct.unpack("I", file.read(4))[0]
self.data_off = struct.unpack("I", file.read(4))[0]
self.len = file.tell() - self.start
def create(self, dexfile):
self.magic = []
self.magic.append('d')
self.magic.append('e')
self.magic.append('x')
self.magic.append(0x0A)
self.version = []
self.version.append('0')
self.version.append('3')
self.version.append('5')
self.version.append(0)
self.checksum = 1234
self.signature = "idontknow"
self.file_size = 1234
self.header_size = 112
self.endian_tag = 0x12345678
self.link_size = 0
self.link_off = 0
# self.map_off = dexfile.dexmaplist
def copytofile(self, file):
file.seek(self.start, 0)
file.write(struct.pack("B", ord(self.magic[0])))
file.write(struct.pack("B", ord(self.magic[1])))
file.write(struct.pack("B", ord(self.magic[2])))
file.write(struct.pack("B", ord(self.magic[3])))
file.write(struct.pack("B", ord(self.version[0])))
file.write(struct.pack("B", ord(self.version[1])))
file.write(struct.pack("B", ord(self.version[2])))
file.write(struct.pack("B", ord(self.version[3])))
file.write(struct.pack("I", self.checksum))
file.write(self.signature)
file.write(struct.pack("I", self.file_size))
file.write(struct.pack("I", self.header_size))
file.write(struct.pack("I", int(self.endian_tag, 16)))
file.write(struct.pack("I", self.link_size))
file.write(struct.pack("I", self.link_off))
file.write(struct.pack("I", self.map_off))
file.write(struct.pack("I", self.string_ids_size))
file.write(struct.pack("I", self.string_ids_off))
file.write(struct.pack("I", self.type_ids_size))
file.write(struct.pack("I", self.type_ids_off))
file.write(struct.pack("I", self.proto_ids_size))
file.write(struct.pack("I", self.proto_ids_off))
file.write(struct.pack("I", self.field_ids_size))
file.write(struct.pack("I", self.field_ids_off))
file.write(struct.pack("I", self.method_ids_size))
file.write(struct.pack("I", self.method_ids_off))
file.write(struct.pack("I", self.class_defs_size))
file.write(struct.pack("I", self.class_defs_off))
file.write(struct.pack("I", self.data_size))
file.write(struct.pack("I", self.data_off))
def makeoffset(self, dexmaplist):
self.string_ids_size = dexmaplist[1].size
self.string_ids_off = dexmaplist[1].offset
self.type_ids_size = dexmaplist[2].size
self.type_ids_off = dexmaplist[2].offset
self.proto_ids_size = dexmaplist[3].size
self.proto_ids_off = dexmaplist[3].offset
self.field_ids_size = dexmaplist[4].size
self.field_ids_off = dexmaplist[4].offset
self.method_ids_size = dexmaplist[5].size
self.method_ids_off = dexmaplist[5].offset
self.class_defs_size = dexmaplist[6].size
self.class_defs_off = dexmaplist[6].offset
self.data_off = dexmaplist[0x1000].offset
self.data_size = 0
self.map_off = dexmaplist[0x1000].offset
self.file_size = 0
def printf(self):
print ("DEX FILE HEADER:")
print ("magic: ", self.magic)
print ("version: ", self.version)
print ("checksum: ", self.checksum)
print ("signature: ", self.signature)
print ("file_size: ", self.file_size)
print ("header_size: ", self.header_size)
print ("endian_tag: ", self.endian_tag)
print ("link_size: ", self.link_size)
print ("link_off: ", self.link_off)
print ("map_off: ", self.map_off)
print ("string_ids_size: ", self.string_ids_size)
print ("string_ids_off: ", self.string_ids_off)
print ("type_ids_size: ", self.type_ids_size)
print ("type_ids_off: ", self.type_ids_off)
print ("proto_ids_size: ", self.proto_ids_size)
print ("proto_ids_off: ", self.proto_ids_off)
print ("field_ids_size: ", self.field_ids_size)
print ("field_ids_off: ", self.field_ids_off)
print ("method_ids_size: ", self.method_ids_size)
print ("method_ids_off: ", self.method_ids_off)
print ("class_defs_size: ", self.class_defs_size)
print ("class_defs_off: ", self.class_defs_off)
print ("data_size: ", self.data_size)
print ("data_off: ", self.data_off)
class DexStringID:
def __init__(self, file, mode=1):
if mode == 1:
self.stringDataoff = struct.unpack("I", file.read(4))[0] # in file
file.seek(self.stringDataoff, 0)
self.size = readunsignedleb128(file)
self.str = getutf8str(file)
self.ref = None
else:
self.stringDataoff = 0
self.size = 0
self.str = ""
self.ref = None
def addstrID(self, str):
self.ref = str
self.str = getstr(str.str)
def copytofile(self, file):
# self.stringDataoff = self.ref.start
file.write(struct.pack("I", self.ref.start))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x2002].getreference(self.stringDataoff)
def printf(self):
print ("size: ", self.size, " str: ", self.str, "dataof: ", self.stringDataoff)
class DexTypeID:
def __init__(self, file, str_table, mode=1):
if mode == 1:
self.descriptorIdx = struct.unpack("I", file.read(4))[0] # in file
self.str = str_table[self.descriptorIdx].str
else:
self.descriptorIdx = 0
self.str = ""
def addtype(self, index, string):
self.descriptorIdx = index
self.str = string
def copytofile(self, file):
file.write(struct.pack("I", self.descriptorIdx))
def printf(self):
print ("type id: ", self.str)
class DexProtoId:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.shortyIdx = struct.unpack("I", file.read(4))[0] # in file
self.returnTypeIdx = struct.unpack("I", file.read(4))[0] # in file
self.parametersOff = struct.unpack("I", file.read(4))[0] # in file
self.name = str_table[self.shortyIdx].str
self.returnstr = type_table[self.returnTypeIdx].str
self.ref = None
else:
self.shortyIdx = 0
self.returnTypeIdx = 0
self.parametersOff = 0
self.ref = None
def addproto(self, idx, typeidx, reference):
self.shortyIdx = idx
self.returnTypeIdx = typeidx
self.ref = reference
def copytofile(self, file):
file.write(struct.pack("I", self.shortyIdx))
file.write(struct.pack("I", self.returnTypeIdx))
if self.ref is not None:
file.write(struct.pack("I", self.ref.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x1001].getreference(self.parametersOff)
def printf(self):
print ("return Type:", self.returnstr)
print ("methodname:", self.name)
if self.ref is not None:
self.ref.printf()
class DexFieldId:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("H", file.read(2))[0] # in file
self.typeIdx = struct.unpack("H", file.read(2))[0] # in file
self.nameIdx = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.typestr = type_table[self.typeIdx].str
self.name = str_table[self.nameIdx].str
def addfield(self, classidx, typeidx, nameidx):
self.classIdx = classidx
self.typeIdx = typeidx
self.nameIdx = nameidx
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.typeIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("typestr:", self.typestr)
print ("name:", self.name)
print ()
class DexMethodId:
def __init__(self, file, str_table, type_table, proto_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("H", file.read(2))[0] # in file
self.protoIdx = struct.unpack("H", file.read(2))[0] # in file
self.nameIdx = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.name = str_table[self.nameIdx].str
else:
self.classIdx = 0
self.protoIdx = 0
self.nameIdx = 0
def addmethod(self, class_idx, proto_idx, name_idx):
self.classIdx = class_idx
self.protoIdx = proto_idx
self.nameIdx = name_idx
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.protoIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("name:", self.name)
print ()
class DexClassDef:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("I", file.read(4))[0] # in file
self.accessFlags = struct.unpack("I", file.read(4))[0] # in file
self.superclassIdx = struct.unpack("I", file.read(4))[0] # in file
self.interfacesOff = struct.unpack("I", file.read(4))[0] # in file
self.sourceFileIdx = struct.unpack("I", file.read(4))[0] # in file
self.annotationsOff = struct.unpack("I", file.read(4))[0] # in file
self.classDataOff = struct.unpack("I", file.read(4))[0] # in file
self.staticValuesOff = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.superclassstr = type_table[self.superclassIdx].str
if self.sourceFileIdx == 0xFFFFFFFF:
self.sourceFilestr = "NO_INDEX"
else:
self.sourceFilestr = str_table[self.sourceFileIdx].str
else:
self.classIdx = 0
self.accessFlags = 0
self.superclassIdx = 0
self.interfacesOff = 0
self.sourceFileIdx = 0
self.annotationsOff = 0
self.classDataOff = 0
self.staticValuesOff = 0
self.interfacesRef = None
self.annotationsRef = None
self.classDataRef = None
self.staticValuesRef = None
def addclassdef(self, classidx, access, superclass, source):
self.classIdx = classidx
self.accessFlags = access
self.superclassIdx = superclass
self.sourceFileIdx = source
def addclassdefref(self, interref, annoref, classref, staticref):
self.interfacesRef = interref
self.annotationsRef = annoref
self.classDataRef = classref
self.staticValuesRef = staticref
# get class data reference by its name,e.g. Lcom/cc/test/MainActivity;
def getclassdefref(self, str):
if self.classstr == str and self.classDataOff > 0:
return self.classDataRef
return None
def copytofile(self, file):
file.write(struct.pack("I", self.classIdx))
file.write(struct.pack("I", self.accessFlags))
file.write(struct.pack("I", self.superclassIdx))
if self.interfacesRef is not None:
file.write(struct.pack("I", self.interfacesRef.start))
# print(self.interfacesRef.start)
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.sourceFileIdx))
if self.annotationsRef is not None:
file.write(struct.pack("I", self.annotationsRef.start))
# print(self.annotationsRef.start)
else:
file.write(struct.pack("I", 0))
if self.classDataRef is not None:
file.write(struct.pack("I", | |
by the vSphere License Level.
except vim.fault.RestrictedVersion as err:
log.debug(err)
ret.update({host_name: {'Error': err}})
continue
ret.update({host_name: {'Service Restarted': True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def set_service_policy(host,
username,
password,
service_name,
service_policy,
protocol=None,
port=None,
host_names=None):
'''
Set the service name's policy for a given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
service_name
The name of the service for which to set the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
service_policy
The policy to set for the service. For example, 'automatic'.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to set the service policy.
If host_names is not provided, the service policy information will be retrieved
for the ``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.set_service_policy my.esxi.host root bad-password '<PASSWORD>' 'automatic'
# Used for connecting to a vCenter Server
salt '*' vsphere.set_service_policy my.vcenter.location root bad-password '<PASSWORD>' 'automatic' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond',
'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg']
ret = {}
for host_name in host_names:
# Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services:
ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}})
return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref)
services = host_ref.configManager.serviceSystem.serviceInfo.service
# Services are stored in a general list - we need loop through the list and find
# service key that matches our service name.
for service in services:
service_key = None
# Find the service key based on the given service_name
if service.key == service_name:
service_key = service.key
elif service_name == 'ssh' or service_name == 'SSH':
if service.key == 'TSM-SSH':
service_key = 'TSM-SSH'
# If we have a service_key, we've found a match. Update the policy.
if service_key:
try:
service_manager.UpdateServicePolicy(id=service_key, policy=service_policy)
except vim.fault.NotFound:
msg = 'The service name \'{0}\' was not found.'.format(service_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
# Some services are restricted by the vSphere License Level.
except vim.fault.HostConfigFault as err:
msg = '\'vsphere.set_service_policy\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
ret.update({host_name: True})
# If we made it this far, something else has gone wrong.
if ret.get(host_name) is None:
msg = 'Could not find service \'{0}\' for host \'{1}\'.'.format(service_name, host_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def update_host_datetime(host, username, password, protocol=None, port=None, host_names=None):
'''
Update the date/time on the given host or list of host_names. This function should be
used with caution since network delays and execution delays can result in time skews.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The <PASSWORD> to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts should update their date/time.
If host_names is not provided, the date/time will be updated for the ``host``
location instead. This is useful for when service instance connection
information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.update_date_time my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.update_date_time my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
date_time_manager = _get_date_time_mgr(host_ref)
try:
date_time_manager.UpdateDateTime(datetime.datetime.utcnow())
except vim.fault.HostConfigFault as err:
msg = '\'vsphere.update_date_time\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
ret.update({host_name: {'Datetime Updated': True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def update_host_password(host, username, password, new_password, protocol=None, port=None):
'''
Update the password for a given host.
.. note:: Currently only works with connections to ESXi hosts. Does not work with vCenter servers.
host
The location of the ESXi host.
username
The username used to login to the ESXi host, such as ``root``.
password
The password used to login to the ESXi host.
new_password
The new password that will be updated for the provided username on the ESXi host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
CLI Example:
.. code-block:: bash
salt '*' vsphere.update_host_password my.esxi.host root original-bad-password new-bad-password
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
# Get LocalAccountManager object
account_manager = salt.utils.vmware.get_inventory(service_instance).accountManager
# Create user account specification object and assign id and password attributes
user_account = vim.host.LocalAccountManager.AccountSpecification()
user_account.id = username
user_account.password = <PASSWORD>password
# Update the password
try:
account_manager.UpdateUser(user_account)
except vmodl.fault.SystemError as err:
raise CommandExecutionError(err.msg)
except vim.fault.UserNotFound:
raise CommandExecutionError('\'vsphere.update_host_password\' failed for host {0}: '
'User was not found.'.format(host))
# If the username and password already exist, we don't need to do anything.
except vim.fault.AlreadyExists:
pass
return True
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vmotion_disable(host, username, password, protocol=None, port=None, host_names=None):
'''
Disable vMotion for a given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The <PASSWORD> login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts should disable VMotion.
If host_names is not provided, VMotion will be disabled for the ``host``
location instead. This is useful for when service instance connection
information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vmotion_disable my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vmotion_disable my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vmotion_system = host_ref.configManager.vmotionSystem
# Disable VMotion for the host by removing the VNic selected to use for VMotion.
try:
vmotion_system.DeselectVnic()
except vim.fault.HostConfigFault as err:
msg = 'vsphere.vmotion_disable failed: {0}'.format(err)
log.debug(msg)
ret.update({host_name: {'Error': msg,
'VMotion Disabled': False}})
continue
ret.update({host_name: {'VMotion Disabled': True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vmotion_enable(host, username, password, protocol=None, port=None, host_names=None, device='vmk0'):
'''
Enable vMotion for a given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The <PASSWORD>.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol | |
<reponame>arjkesh/deep-learning-containers-1
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# !/usr/bin/env python2.7
"""Tests for tensorflow_model_server."""
import atexit
import os
import shlex
import socket
import subprocess
import sys
import time
import pickle
import shutil
import boto3
import botocore
import marshal
import argparse
import logging
import pprint
from multiprocessing.dummy import Pool
# This is a placeholder for a Google-internal import.
import grpc
from grpc.beta import implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.interfaces.face import face
import tensorflow as tf
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.platform import flags
from tensorflow_serving.apis import classification_pb2
# from tensorflow_serving.apis import get_model_status_pb2
# from tensorflow_serving.apis import model_service_pb2_grpc
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow_serving.apis import regression_pb2
from tensorflow_serving.apis import inference_pb2
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.estimator import model_fn as model_fn_lib
FLAGS = flags.FLAGS
RPC_TIMEOUT = 600.0
CHANNEL_WAIT_TIMEOUT = 5.0
WAIT_FOR_SERVER_READY_INT_SECS = 600
NUM_PREDICTIONS = 5
SERVING_OUPUT_LOG = "serving_output.log"
DOCKER_NAME = "tensorflow_inference_container"
def print_helper(cmd):
print("****************************************")
print(cmd)
print("****************************************")
def download_test_image():
if not os.path.isfile("/tmp/data/angeldog.jpg"):
os.system(
"wget http://arumi.blog.kataweb.it/files/photos/uncategorized/2007/05/22/angeldog.jpg")
os.system("mkdir -p /tmp/data")
os.system("mv angeldog.jpg /tmp/data")
os.system("rm angeldog.jpg")
def PickUnusedPort():
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def WaitForServerReady(port):
"""Waits for a server on the localhost to become ready."""
for _ in range(0, WAIT_FOR_SERVER_READY_INT_SECS):
time.sleep(1)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'intentionally_missing_model'
try:
# Send empty request to missing model
channel = implementations.insecure_channel('localhost', port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(
channel)
stub.Predict(request, RPC_TIMEOUT)
except grpc.RpcError as error:
# Missing model error will have details containing 'Servable'
if 'Servable' in error.details():
print('Server is ready')
break
class TensorflowModelServerTester(object):
"""This class defines integration test cases for tensorflow_model_server."""
def __init__(self, ServingInput, model_server=None, concurrent=1):
"""ServingInput is a 4 item tuple.
0: Model Name
1: Model Path
2: Signature Name
3: Predict_Input_fn
"""
self.Predict_Input_fn = ServingInput[3]
self.model_name = ServingInput[0]
self.model_path = ServingInput[1]
self.sig_name = ServingInput[2]
self.server_proc = None
self.concurrent = concurrent
if (model_server != None):
self.binary = model_server
if (not os.path.isfile(self.binary)):
print("Can't find Tensorflow Serving Binary at %s please point to TFS binary" % (self.binary))
exit(1)
else:
self.binary = None
self.open_procs = []
def TerminateProcs(self):
"""Terminate all processes."""
print('Terminating all processes...')
if self.server_proc is not None:
print(self.server_proc)
self.output_file.close()
self.server_proc.terminate()
def RunServer(self,
port,
model_name,
model_path,
batching_parameters_file='',
grpc_channel_arguments='',
wait_for_server_ready=True):
"""Run tensorflow_model_server using test config."""
print('Starting test server...')
command = 'docker' if processor == 'cpu' else 'nvidia-docker'
env_command = ' -e TENSORFLOW_INTER_OP_PARALLELISM=2 -e TENSORFLOW_INTRA_OP_PARALLELISM=72 -e KMP_AFFINITY=\'granularity=fine,verbose,compact,1,0\' -e OMP_NUM_THREADS=36 -e TENSORFLOW_SESSION_PARALLELISM=9 -e KMP_BLOCKTIME=1 -e KMP_SETTINGS=0 ' if processor == 'cpu' else ''
command += ' run --rm --name ' + DOCKER_NAME + env_command + ' -p 8500:8500 -v /home/ubuntu/src/container_tests:/test --mount type=bind,source=' + model_path + ',target=/models/' + model_name + ' -e MODEL_NAME=' + model_name + ' -itd ' + docker_image_name
port = 8500
print_helper(command)
my_env = os.environ.copy()
self.output_file = open(SERVING_OUPUT_LOG, 'w')
self.server_proc = subprocess.Popen(shlex.split(command), env=my_env, stdout=self.output_file)
self.open_procs.append(self.server_proc)
print('Server started')
if wait_for_server_ready:
WaitForServerReady(port)
return 'localhost:' + str(port)
def _Predict(self, model_server_address, request_timeout=30, iterations=NUM_PREDICTIONS):
"""Helper method to call predict on models we want to test
input_fn: This will return 4 lists
[input_names], [input_data], [input_shapes], [input_types]
model_name = name of model testing
signature_name = default to SERVING
"""
print("Sending Predict request...")
host, port = model_server_address.split(':')
request = predict_pb2.PredictRequest()
request.model_spec.name = self.model_name
request.model_spec.signature_name = self.sig_name
input_names, input_data, input_shapes, input_types = self.Predict_Input_fn()
# input_names, input_data, input_shapes, input_types = self.convert_to_proto_artifacts(self.Predict_Input_fn())
for ii in range(len(input_names)): # Goverened by the input_names'
print(input_shapes)
if (input_types[ii] != None):
request.inputs[input_names[ii]].CopyFrom(
tf.contrib.util.make_tensor_proto(input_data[ii], shape=input_shapes[ii], dtype=input_types[ii]))
else:
request.inputs[input_names[ii]].CopyFrom(
tf.contrib.util.make_tensor_proto(input_data[ii], shape=input_shapes[ii]))
# Create the stub and channel
channel = implementations.insecure_channel(host, int(port))
timing = []
stub = prediction_service_pb2.beta_create_PredictionService_stub(
channel)
p = Pool(self.concurrent)
def do_pred(x):
start = time.time()
result = stub.Predict(request, request_timeout)
end = time.time()
return (result, end - start)
for each in range(iterations):
res = p.map(do_pred, range(self.concurrent))
result = res[-1][0]
times = [x[1] for x in res]
timing.append((np.min(times), np.mean(times), np.max(times)))
results = {}
for output in result.outputs:
results[output] = (tf.contrib.util.make_ndarray(
result.outputs[output]))
return results, timing
def testServing(self, iterations=2):
"""ServingInput is a 4 item tuple.
0: Model Name
1: Model Path
2: Signature Name
3: Predict_Input_fn
"""
# atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(
PickUnusedPort(),
self.model_name,
self.model_path,
)
result = self._Predict(model_server_address, RPC_TIMEOUT, iterations)
print("Terminating Proc")
self.server_proc.terminate()
self.server_proc.wait()
os.system("docker rm -f {}".format(DOCKER_NAME))
return result
def test_serving(ServingTests, binary=None, concurrent=1, exclude_list=[], iterations=2):
results = {}
for each in ServingTests:
if (type(each) == list):
if (each[0] not in exclude_list):
results[each[0]] = {}
tester = TensorflowModelServerTester(each, binary, concurrent)
res = tester.testServing(iterations=iterations)
results[each[0]]["values"] = res[0]
results[each[0]]["timing"] = res[1]
else:
if (ServingTests[0] not in exclude_list):
results[ServingTests[0]] = {}
tester = TensorflowModelServerTester(ServingTests, binary, concurrent)
res = tester.testServing(iterations=iterations)
results[ServingTests[0]]["values"] = res[0]
results[ServingTests[0]]["timing"] = res[1]
break
return results
def download_file(bucket_name, s3, key):
working_dir = "/tmp/test_eia_serving_"
if (not os.path.isdir(working_dir)):
os.mkdir(working_dir)
fname = key + ".zip"
working_dir = working_dir + "/" + key # WORKING PLUS KEY
print(key)
if (os.path.isdir(working_dir)):
shutil.rmtree(working_dir)
os.mkdir(working_dir)
destfile = working_dir + "/" + fname
print("destfile", destfile)
try:
s3.Bucket(bucket_name).download_file(fname, destfile)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("Object does not exist")
else:
raise
if destfile.endswith(".zip"):
os.system("unzip " + destfile + " -d /tmp/test_eia_serving_/")
with open("%s/metadata.pkl" % (working_dir), "rb") as f2:
metadata_read = f2.read()
# First check python version
if sys.version_info >= (3, 0):
metadata = pickle.loads(metadata_read, encoding='latin1')
else:
metadata = pickle.loads(metadata_read)
# Process input fn
from types import FunctionType
input_fn = metadata["input_fn"]
exec(input_fn[0])
id2 = eval(input_fn[1])
metadata["input_fn"] = id2
model_path = ''
if os.path.isdir(working_dir + "/" + key):
model_path = working_dir + "/" + key
else:
model_path = working_dir + "/" + key + ".pb"
if not model_path:
sys.exit("No model found in directory")
output = [metadata['Test name'], model_path,
metadata['sig_name'], metadata['input_fn']]
return output
def upload_files(ServingTests, bucket_name):
for each in ServingTests:
print(each)
upload_file(each, bucket_name)
def upload_file(ServingTest, bucket_name):
# holds metadata i.e. classes, input names, output names
working_dir = "/tmp/test_eia_serving_"
# If clean up hasn't happened
if (os.path.isdir(working_dir)):
shutil.rmtree(working_dir)
os.mkdir(working_dir)
pickle_dict = {}
zip_filename = ServingTest[0]
model_file = ServingTest[1]
working_dir = working_dir + "/" + zip_filename
os.mkdir(working_dir)
def is_pb_in_dir(dir_name):
for versions in os.listdir(dir_name):
for fname in os.listdir(dir_name + "/" + versions):
if fname.endswith(".pb"):
print("Found pb")
return (True and "variables" in os.listdir(dir_name + "/" + versions))
return False
if not os.path.isfile(model_file):
if (not (os.path.isdir(model_file) or is_pb_in_dir(model_file))):
sys.exit("Invalid model file name")
input_fn = ServingTest[3]
pickle_dict["Test name"] = ServingTest[0]
pickle_dict["sig_name"] = ServingTest[2]
# Need to modify the pickling of a function.
# Process input_fn
import inspect
funcdetails = [
inspect.getsource(input_fn),
input_fn.__name__,
]
input_fn = funcdetails
pickle_dict['input_fn'] = input_fn
def copyfile(file_or_dir):
if (os.path.isdir(working_dir)):
shutil.copytree(file_or_dir, working_dir + "/" + zip_filename)
else:
shutil.copyfile(file_or_dir, working_dir + "/" + zip_filename + ".pb")
# Copy in the model file or directory
if (model_file.endswith("/")):
model_file = model_file[:-1]
copyfile(model_file)
pickle.dump(
pickle_dict, open(working_dir + "/metadata.pkl", "wb"), 2)
# zips file together
os.chdir("/tmp/test_eia_serving_")
os.system("zip -r " + zip_filename + " " + zip_filename)
# uploads zip file to s3
os.system("aws s3 cp " + zip_filename + ".zip" + " s3://" + bucket_name)
def mnist_input_fn():
input_names = ["images"]
np.random.seed(0)
input_data = np.random.rand(1, 784)
input_shapes = [[1, 784]]
input_types = [(tf.float32)]
return input_names, input_data, input_shapes, input_types
def saved_model_half_plus_two_input_fn():
input_names = ["x"]
np.random.seed(0)
input_data = np.random.rand(1, 1)
input_shapes = [[1, 1]]
input_types = [(tf.float32)]
return input_names, input_data, input_shapes, input_types
def Inception_input_fn():
f = open("/tmp/data/angeldog.jpg", 'rb')
input_names = ['images']
input_shapes = [[1]]
input_data = [f.read()]
input_types = [None]
f.close()
return input_names, input_data, input_shapes, input_types
def Resnet_50_v1_input_fn():
input_names = ['input']
np.random.seed(0)
input_data = np.random.rand(1, 224, 224, 3)
input_shapes = [(1, 224, 224, 3)]
input_types = [tf.float32]
return input_names, input_data, input_shapes, input_types
def Resnet_input_fn():
input_names = ['input']
np.random.seed(0)
input_data = np.random.rand(128, 224, 224, 3)
input_shapes = [(128, 224, 224, 3)]
input_types = [tf.float32]
return input_names, input_data, input_shapes, input_types
def Resnet101_input_fn():
input_names = ['inputs']
np.random.seed(0)
input_data = np.random.rand(1, 224, 224, 3)
input_shapes = [(1, 224, 224, 3)]
input_types = [tf.uint8]
return input_names, input_data, input_shapes, input_types
def resnet50v2_taehoon_input_fn():
input_names = ['Placeholder:0']
np.random.seed(0)
input_data = np.random.rand(1, 224, 224, 3)
input_shapes = [(1, 224, 224, 3)]
input_types = [tf.float32]
return input_names, | |
<filename>algorithm/operators.py<gh_stars>1-10
##############
# Efficient operators like SMA, EMA etc
# those operators are all sub-classes of a base operator
# which defines basic storage structure to enable efficient calculation of those indicators
# 1, initialize.
# to construct an operator, we need to initialize basic parameters that define this operator
# 2, internal storage
# stock indicators usually rely on price history, therefore we allow customized storage of price history
# for each operator.
# 3, online calculation
# calculation of indicators should be efficient: i.e. it only needs input of price of the current
# time stamp, while utilizing its internal storage for necessary modifications. If necessary, a
# @memorized or @lazy_evaluate might be used.
#############
from backtest.BackExchange import BackExchange
from core.Ticker import TickerFields
from collections import deque
import math
class OperatorsBase(object):
def __init__(self, exchange: BackExchange):
self.exchange = exchange
self.last_timestamp = 0
self.operator_name = ""
pass
def get(self):
pass
"""
This is something that should only be used internally,
but support is added here nevertheless to facilitate
highly customized indicator of indicators, like RSI.
in get_feed, every time stamp value is sent in directly
rather than read from the exchange
"""
def __get_feed(self, value):
pass
"""
Exponential moving average
"""
class EMA(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, window_size: int, field: TickerFields):
super(EMA, self).__init__(exchange)
self.ticker_name = ticker_name
self.window_size = window_size
self.price_queue = deque(maxlen=window_size + 1)
self.field = field
self.ema = None
self.multiplier = 2 / (1 + window_size)
self.operator_name = "EMA(" + str(window_size) + ")" + " of " + ticker_name
def get(self):
current_price = self.exchange.fetch_ticker(self.ticker_name)[self.field]
return self.__get_feed(current_price)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.ema
self.price_queue.append(value)
if len(self.price_queue) < self.window_size:
return self.ema
elif len(self.price_queue) == self.window_size:
self.ema = sum(self.price_queue) / self.window_size
else:
self.ema += (value - self.price_queue.popleft()) * self.multiplier
self.last_timestamp = self.exchange.__time
return self.ema
"""
Simple moving average
"""
class SMA(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, window_size: int, field: TickerFields):
super(SMA, self).__init__(exchange)
self.ticker_name = ticker_name
self.window_size = window_size
self.price_queue = deque(maxlen=window_size + 1)
self.field = field
self.sma = None
self.operator_name = "SMA(" + str(window_size) + ")" + " of " + ticker_name
def get(self):
current_price = self.exchange.fetch_ticker(self.ticker_name)[self.field]
return self.__get_feed(current_price)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.sma
self.price_queue.append(value)
if len(self.price_queue) < self.window_size:
return self.sma
elif len(self.price_queue) == self.window_size:
self.sma = sum(self.price_queue) / self.window_size
else:
self.sma += (value - self.price_queue.popleft()) / self.window_size
self.last_timestamp = self.exchange.__time
return self.sma
def get_feed_extern(self, value):
return self.__get_feed(value)
"""
SMMA, the smoothed moving average
This indicator is used to facilitate standard RSI calculations.
"""
class SMMA(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, window_size: int, field: TickerFields):
super(SMMA, self).__init__(exchange)
self.ticker_name = ticker_name
self.window_size = window_size
self.field = field
self.smma = None
self.operator_name = "SMMA(" + str(window_size) + ")" + " of " + ticker_name
def get(self):
current_price = self.exchange.fetch_ticker(self.ticker_name)[self.field]
return self.__get_feed(current_price)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.smma
if self.smma is None:
self.smma = value
else:
self.smma = (self.smma * (self.window_size - 1) + value) / self.window_size
self.last_timestamp = self.exchange.__time
return self.smma
"""
expose __get_feed to external use by choice
"""
def get_feed_extern(self, value):
return self.__get_feed(value)
"""
Standard Deviation indicator
this class calculates the standard deviation
of a commodity
"""
class Sigma(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, window_size: int, field: TickerFields):
super(Sigma, self).__init__(exchange)
self.ticker_name = ticker_name
self.window_size = window_size
self.price_queue = deque(maxlen=window_size + 1)
self.price_queue_sq = deque(maxlen=window_size + 1)
self.price_sum = None
self.price_sum_sq = None
self.field = field
self.sigma = None
self.operator_name = "Sigma(" + str(window_size) + ")" + " of " + ticker_name
def get(self):
current_price = self.exchange.fetch_ticker(self.ticker_name)[self.field]
return self.__get_feed(current_price)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.sigma
self.last_timestamp = self.exchange.__time
self.price_queue.append(value)
self.price_queue_sq.append(value ** 2)
if len(self.price_queue) != len(self.price_queue_sq):
print("internal error: price_queue and price_queue_sq must have the same length")
return self.sigma
if len(self.price_queue) < self.window_size:
return self.sigma
elif len(self.price_queue) == self.window_size:
self.price_sum = sum(self.price_queue)
self.price_sum_sq = sum(self.price_queue_sq)
else:
self.price_sum += (value - self.price_queue.popleft())
self.price_sum_sq += (value ** 2 - self.price_queue_sq.popleft())
self.sigma = math.sqrt((self.price_sum_sq - self.price_sum ** 2) / (self.window_size - 1))
return self.sigma
def get_feed_extern(self, value):
return self.__get_feed(value)
"""
MACD is an indicator of indicators (EMA)
"""
class MACD(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, field: TickerFields):
super(MACD, self).__init__(exchange)
self.ticker_name = ticker_name
self.ema_26 = EMA(exchange, ticker_name, 26, field)
self.ema_12 = EMA(exchange, ticker_name, 12, field)
self.macd = None
self.operator_name = "SMA" + " of " + ticker_name
def get(self):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.macd
ema_12 = self.ema_12.get()
ema_26 = self.ema_26.get()
if ema_12 is None or ema_26 is None:
self.macd = None
else:
self.macd = ema_12 - ema_26
self.last_timestamp = self.exchange.__time
return self.macd
"""
Stochastic Oscillator
it returns both %K and %D, while oscillator is commonly
used to check if %K crossed %D
"""
class StochasticOscillator(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str):
super(StochasticOscillator, self).__init__(exchange)
self.ticker_name = ticker_name
self.low_14 = None
self.high_14 = None
self.price_queue = deque(maxlen=14)
self.percent_k = None
self.past_oscillator = deque(maxlen=3)
self.percent_d = None
self.operator_name = "StochasticOscillator" + " of " + ticker_name
def get(self):
current_close = self.exchange.fetch_ticker(self.ticker_name)[TickerFields.Close]
return self.__get_feed(current_close)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.percent_k, self.percent_d
if len(self.price_queue) < 14:
self.price_queue.append(value)
else:
self.low_14 = min(self.price_queue)
self.high_14 = max(self.price_queue)
self.price_queue.append(value)
self.stochastic_oscillator = round((value - self.low_14) / (self.high_14 - self.low_14) * 100, 2)
self.past_oscillator.append(self.stochastic_oscillator)
self.last_timestamp = self.exchange.__time
if len(self.past_oscillator) == 3:
self.percent_d = round(sum(self.past_oscillator) / 3, 2)
self.last_timestamp = self.exchange.__time
return self.percent_k, self.percent_d
"""
RSI Index
it returns the RSI index calculated with smoothed SMA for ups and downs.
it is also an indicator of indicators
"""
class RSI(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, window_size: int = 14):
super(RSI, self).__init__(exchange)
self.ticker_name = ticker_name
self.window_size = window_size
self.smma_up = SMMA(exchange, ticker_name, window_size, TickerFields.Close)
self.smma_down = SMMA(exchange, ticker_name, window_size, TickerFields.Close)
self.rsi = None
self.close_prev = None
self.operator_name = "RSI(" + str(self.window_size) + ")" + " of " + ticker_name
def get(self):
current_close = self.exchange.fetch_ticker(self.ticker_name)[TickerFields.Close]
return self.__get_feed(current_close)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.rsi
self.last_timestamp = self.exchange.__time
if self.close_prev is None:
return self.rsi
up_price = max(0, value - self.close_prev)
down_price = max(0, self.close_prev - value)
smma_u = self.smma_up.get_feed_extern(up_price)
smma_d = self.smma_down.get_feed_extern(down_price)
if smma_u is None or smma_d is None:
return self.rsi
self.rsi = 100 - 100 / (1 + smma_u / smma_d)
return self.rsi
"""
Commodity Channel Index
it returns the CCI index calculated with SMA and typical prices
see https://en.wikipedia.org/wiki/Commodity_channel_index
it is also an indicator of indicators
It uses standard deviation.
"""
class CCI(OperatorsBase):
def __init__(self, exchange: BackExchange, ticker_name: str, window_size: int = 20):
super(CCI, self).__init__(exchange)
self.ticker_name = ticker_name
self.window_size = window_size
# store price as a list
self.sigma = Sigma(exchange, ticker_name, window_size, TickerFields.Close)
self.sma = SMA(exchange, ticker_name, window_size, TickerFields.Close)
self.cci = None
self.operator_name = "CCI(" + str(self.window_size) + ")" + " of " + ticker_name
def get(self):
current_close = self.exchange.fetch_ticker(self.ticker_name)[TickerFields.Close]
current_high = self.exchange.fetch_ticker(self.ticker_name)[TickerFields.High]
current_low = self.exchange.fetch_ticker(self.ticker_name)[TickerFields.Low]
typical_price = (current_close + current_high + current_low) / 3
return self.__get_feed(typical_price)
def __get_feed(self, value):
if self.last_timestamp == self.exchange.__time:
print("You attempt to calculate {} twice at ts={}".format(self.operator_name, self.last_timestamp))
print("Please save it to a local variable and reuse it elsewhere, now using calculated value.")
return self.cci
self.last_timestamp = self.exchange.__time
sma = self.sma.get_feed_extern(value)
sigma = self.sigma.get_feed_extern(value)
if sma is None or sigma is None:
return self.cci
self.cci = (value - sma) / (0.015 | |
from PyQt5.QtCore import QCoreApplication, Qt
import random
import numpy as np
from scipy.sparse import csr_matrix
from GUI_classes.utils_gui import choose_dataset, pause_execution
from GUI_classes.generic_gui import StartingGui
from base import appctxt
class PAM_class(StartingGui):
def __init__(self):
super(PAM_class, self).__init__(
name="PAM",
twinx=False,
first_plot=True,
second_plot=False,
function=self.start_PAM,
extract=False,
stretch_plot=False,
)
def start_PAM(self):
self.ax1.cla()
self.log.clear()
self.log.appendPlainText("{} LOG".format(self.name))
QCoreApplication.processEvents()
self.verify_input_parameters()
if self.param_check is False:
return
self.n_medoids = int(self.line_edit_n_medoids.text())
self.n_points = int(self.line_edit_np.text())
self.X = choose_dataset(self.combobox.currentText(), self.n_points)
self.button_run.setEnabled(False)
self.checkbox_saveimg.setEnabled(False)
self.button_delete_pics.setEnabled(False)
self.slider.setEnabled(False)
if self.first_run_occurred is True:
self.ind_run += 1
self.ind_extr_fig = 0
if self.save_plots is True:
self.checkBoxChangedAction(self.checkbox_saveimg.checkState())
else:
if Qt.Checked == self.checkbox_saveimg.checkState():
self.first_run_occurred = True
self.checkBoxChangedAction(self.checkbox_saveimg.checkState())
self.checkbox_gif.setEnabled(False)
PAM = KMedoids_gui(
n_cluster=self.n_medoids,
log=self.log,
ax=self.ax1,
canvas=self.canvas_up,
save_fig=self.save_plots,
ind_run=self.ind_run,
delay=self.delay,
)
PAM.fit(self.X.tolist())
if (self.make_gif is True) and (self.save_plots is True):
self.generate_GIF()
self.button_run.setEnabled(True)
self.checkbox_saveimg.setEnabled(True)
if self.checkbox_saveimg.isChecked() is True:
self.checkbox_gif.setEnabled(True)
self.button_delete_pics.setEnabled(True)
self.slider.setEnabled(True)
class KMedoids_gui:
def __init__(
self,
n_cluster,
log,
ax,
canvas,
save_fig,
ind_run,
delay,
max_iter=10,
tol=0.001,
start_prob=0.8,
end_prob=0.99,
random_state=42,
):
""" Kmedoids constructor called """
if (
start_prob < 0
or start_prob >= 1
or end_prob < 0
or end_prob >= 1
or start_prob > end_prob
):
raise ValueError("Invalid input")
self.n_cluster = n_cluster
self.log = log
self.ax = ax
self.canvas = canvas
self.save_fig = save_fig
self.ind_run = ind_run
self.delay = delay
self.max_iter = max_iter
self.tol = tol
self.start_prob = start_prob
self.end_prob = end_prob
self.medoids = [] # empty medois
self.clusters = {} # empty clusters
self.tol_reached = float("inf")
self.current_distance = 0
self.__data = None
self.__is_csr = None
self.__rows = 0
self.__columns = 0
self.cluster_distances = {}
self.__random_state = random_state
def fit(self, data):
self.log.appendPlainText("")
self.log.appendPlainText("fitting")
self.__data = data
self.__set_data_type()
self.__start_algo()
return self
def __start_algo(self):
self.log.appendPlainText("starting algorithm")
self.__initialize_medoids() # choosing initial medoids
# computing clusters and cluster_distances
self.clusters, self.cluster_distances = self.__calculate_clusters(self.medoids)
# print cluster and cluster_distances
self.log.appendPlainText("clusters: {}".format(self.clusters))
self.log.appendPlainText(
"clusters_distances: {}".format(self.cluster_distances)
)
if self.delay != 0:
pause_execution(self.delay)
self.plot_pam_gui(
data=self.__data,
cl=self.clusters,
ax=self.ax,
canvas=self.canvas,
ind_run=self.ind_run,
ind_fig=0,
save_plots=self.save_fig,
)
self.__update_clusters()
def __update_clusters(self):
for i in range(
self.max_iter
): # to stop if convergence isn't reached whithin max_iter iterations
self.log.appendPlainText("")
self.log.appendPlainText("iteration n°: {}".format(i + 1))
# compute distance obtained by swapping medoids in the clusters
cluster_dist_with_new_medoids = self.__swap_and_recalculate_clusters()
# if the new sum of cluster_distances is smaller than the old one
if self.__is_new_cluster_dist_small(cluster_dist_with_new_medoids) is True:
self.log.appendPlainText("new is smaller")
# compute clusters and cluster_distance with new medoids
self.clusters, self.cluster_distances = self.__calculate_clusters(
self.medoids
)
self.log.appendPlainText("clusters: {}".format(self.clusters))
if self.delay != 0:
pause_execution(self.delay)
self.plot_pam_gui(
data=self.__data,
cl=self.clusters,
ax=self.ax,
canvas=self.canvas,
ind_run=self.ind_run,
ind_fig=i + 1,
save_plots=self.save_fig,
)
# print("clusters_distances: ", self.cluster_distances)
else:
# if the sum of cluster_distances doesn't improve, terminate the algorithm
self.log.appendPlainText("termination")
break
def __is_new_cluster_dist_small(self, cluster_dist_with_new_medoids):
"""returns True if the new sum of cluster_distances is smaller than the previous one, and updates the
medoids, else returns False """
# compute the existing sum of cluster_distances
existance_dist = self.calculate_distance_of_clusters()
self.log.appendPlainText("present dist: {}".format(existance_dist))
# computes the new sum of cluster_distances
new_dist = self.calculate_distance_of_clusters(cluster_dist_with_new_medoids)
self.log.appendPlainText("new dist: {}".format(new_dist))
# if it is better, substitute the old medoids with the new ones and return True, else return False
if existance_dist > new_dist and (existance_dist - new_dist) > self.tol:
self.medoids = cluster_dist_with_new_medoids.keys()
return True
return False
def calculate_distance_of_clusters(self, cluster_dist=None):
"""if no argument is provided, just sum the distances of the existing cluster_distances, else sum the distances
of the input cluster_distances """
if cluster_dist is None:
cluster_dist = self.cluster_distances
dist = 0
for medoid in cluster_dist.keys():
dist += cluster_dist[medoid]
return dist
def __swap_and_recalculate_clusters(self):
# http://www.math.le.ac.uk/people/ag153/homepage/KmeansKmedoids/Kmeans_Kmedoids.html
"""returns dictionary of new cluster_distances obtained by swapping medoids in each cluster"""
self.log.appendPlainText("swap and recompute")
cluster_dist = {}
for medoid in self.medoids: # for each medoid
is_shortest_medoid_found = False
for data_index in self.clusters[
medoid
]: # for each point in the current medoid's cluster
if data_index != medoid: # exclude the medoid itself
# create a list of the elements of the cluster
cluster_list = list(self.clusters[medoid])
# make the current point the temporary medoid
cluster_list[self.clusters[medoid].index(data_index)] = medoid
# compute new cluster distance obtained by swapping the medoid
new_distance = self.calculate_inter_cluster_distance(
data_index, cluster_list
)
if (
new_distance < self.cluster_distances[medoid]
): # if this new distance is smaller than the previous one
self.log.appendPlainText(
"new better medoid: {}".format(data_index)
)
cluster_dist[data_index] = new_distance
is_shortest_medoid_found = True
break # exit for loop for this medoid, since a better one has been found
# if no better medoid has been found, keep the current one
if is_shortest_medoid_found is False:
self.log.appendPlainText(
"no better medoids found, keep: {}".format(medoid)
)
cluster_dist[medoid] = self.cluster_distances[medoid]
self.log.appendPlainText("cluster_dist: {}".format(cluster_dist))
return cluster_dist
def calculate_inter_cluster_distance(self, medoid, cluster_list):
"""computes the average distance of points in a cluster from their medoid"""
distance = 0
for data_index in cluster_list:
distance += self.__get_distance(medoid, data_index)
return distance / len(cluster_list)
def __calculate_clusters(self, medoids):
"""returns the clusters and the relative distances (average distance of each element of the cluster from the
medoid) """
clusters = (
{}
) # it will be of the form {medoid0: [elements of cluster0], medoid1: [elements of cluster1], ...}
cluster_distances = {}
# initialize empty clusters and cluster_distances
for medoid in medoids:
clusters[medoid] = []
cluster_distances[medoid] = 0
for row in range(self.__rows): # for every row of input data
# compute nearest medoid and relative distance from row
nearest_medoid, nearest_distance = self.__get_shortest_distance_to_medoid(
row, medoids
)
# add this distance to the distances relative to the nearest_medoid cluster
cluster_distances[nearest_medoid] += nearest_distance
# add the row to the nearest_medoid cluster
clusters[nearest_medoid].append(row)
# divide each cluster_distance for the number of element in its corresponding cluster, to obtain the average
# distance
for medoid in medoids:
cluster_distances[medoid] /= len(clusters[medoid])
return clusters, cluster_distances
def __get_shortest_distance_to_medoid(self, row_index, medoids):
"""returns closest medoid and relative distance from the input row (point)"""
min_distance = float("inf")
current_medoid = None
for medoid in medoids:
current_distance = self.__get_distance(
medoid, row_index
) # compute distance from input row to medoid
if (
current_distance < min_distance
): # if it happens to be shorter than all previously computed distances
min_distance = current_distance # save it as min_distance
current_medoid = medoid # choose this medoid as the closest one
return current_medoid, min_distance
def __initialize_medoids(self):
"""Kmeans++ initialisation"""
self.log.appendPlainText("initializing medoids with kmeans++")
random.seed(self.__random_state)
self.medoids.append(
random.randint(0, self.__rows - 1)
) # choosing a random row from data
while (
len(self.medoids) != self.n_cluster
): # until the number of medoids reaches the number of clusters
self.medoids.append(
self.__find_distant_medoid()
) # choose as next medoid the most distant from the previously chosen ones
def __find_distant_medoid(self):
"""returns a row corresponding to a point which is considerably distant from its closest medoid"""
distances = []
indices = []
for row in range(self.__rows): # for every row in data
indices.append(row)
distances.append(
self.__get_shortest_distance_to_medoid(row, self.medoids)[1]
) # shortest distance from row to its closest medoid
distances_index = np.argsort(distances) # the sorted indices of the distances
choosen_dist = self.__select_distant_medoid(
distances_index
) # the index corresponding to the distance chosen
return indices[choosen_dist] # row corresponding to the chosen distance
def __select_distant_medoid(self, distances_index):
"""returns a random index of the distances_index between start and end"""
start_index = round(
self.start_prob * len(distances_index)
) # normally 0.8*len(dist)
end_index = round(
self.end_prob * (len(distances_index) - 1)
) # normally 0.99*len(dist)
# returns a random index corresponding to a row which is distant from its closest medoid, but not necessarily
# the row with the maximum distance from its medoid
return distances_index[random.randint(start_index, end_index)]
def __get_distance(self, x1, x2):
"""computes euclidean distance, with an initial transformation based on input data"""
a = (
self.__data[x1].toarray()
if self.__is_csr == True
else np.array(self.__data[x1])
)
b = (
self.__data[x2].toarray()
if self.__is_csr == True
else np.array(self.__data[x2])
)
return np.linalg.norm(a - b)
def __set_data_type(self):
"""to check whether the given input is of type list or csr """
# print("setting data type")
if isinstance(self.__data, csr_matrix):
self.__is_csr = True
self.__rows = self.__data.shape[0]
self.__columns = self.__data.shape[1]
elif isinstance(self.__data, list):
self.__is_csr = False
self.__rows = len(self.__data)
self.__columns = len(self.__data[0])
else:
raise ValueError("Invalid input")
def plot_pam_gui(
self, data, ax, canvas, cl, ind_run, ind_fig, name="PAM", save_plots=False
):
"""
Scatterplot of data points, with colors according to cluster labels.
Centers of mass of the clusters are marked with an X.
:param data: input data sample | |
import numpy as np
import xarray as xr
import pandas as pd
import multiprocessing as mp
class PreprocessData:
"""Class instantiation of PreprocessData:
Here we will be preprocessing data for deep learning model training.
Attributes:
working_directory (str): The path to the directory where the deep learning preprocessing files will be saved and worked from.
stormpatch_path (str): Where the storm patch files were saved.
climate (str): The climate period to derive deep learning data for. Options are ``current`` or ``future``.
threshold1 (int): The UH threshold to use. This value will delineate some form of ``severe`` and ``non-severe`` storm patches.
mask (boolean): Whether the threshold will be applied within the storm patch mask or within the full storm patch. Defaults to ``False``.
num_cpus (int): Number of CPUs to use in a node for parallelizing extractions. Defaults to 36 (Cheyenne compute nodes contain 36).
"""
def __init__(self, working_directory, stormpatch_path, climate, threshold1, mask=False, num_cpus=36):
# class attributes
self.working_directory=working_directory
self.stormpatch_path=stormpatch_path
# sanity check
if climate!='current' and climate!='future':
raise Exception("Please enter current or future for climate option.")
else:
self.climate=climate
# class attributes
self.threshold1=threshold1
# string help
self.mask=mask
if not self.mask:
self.mask_str='nomask'
if self.mask:
self.mask_str='mask'
# cpus for parallelizing
self.num_cpus=num_cpus
def generate_time_full(self):
"""Creation of the full time period that will be looped through for extracting storm patch information.
Only considering December-May months due to warm season bias over the central CONUS. The CONUS1 simulations
were run for 2000-2013.
Returns:
Pandas date range (DatetimeIndex).
"""
return pd.date_range('2000-10-01','2013-09-30',freq='MS')[(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==12)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==1)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==2)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==3)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==4)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==5)]
def create_data_indices(self, time):
"""Split the loaded data into categories based on the UH threshold chosen and save the first intermediary files. Here we create
the indices of the storm patches that satisfy UH criteria for later use.
Args:
time (DatetimeIndex): Time object from pandas date range.
"""
if not self.mask:
data=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_assemble=xr.Dataset({'grid':(['x'], np.argwhere(data.uh25_grid.values.max(axis=(1,2)) > self.threshold1)[:,0])})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc")
if self.mask:
data=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_assemble=xr.Dataset({'grid':(['x'], np.argwhere(data.uh25_grid.where(data.mask).max(axis=(1,2), skipna=True).values > self.threshold1)[:,0])})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc")
def parallelizing_indxs(self):
"""Activate the multiprocessing function to parallelize the functions.
"""
print(f"Starting jobs...")
timearray=self.generate_time_full()
pool1=mp.Pool(self.num_cpus)
for time in timearray:
print(f"Extracting {time.strftime('%Y-%m')} indices...")
pool1.apply_async(self.create_data_indices, args=([time]))
pool1.close()
pool1.join()
print(f"Completed the jobs.")
def generate_time_month(self, month_int):
"""Creation of the time array that will be looped through for extracting storm patch information.
Args:
month_int (int): The month being used for the time array (2000-2013 years).
Returns:
Pandas date range (DatetimeIndex) for the respective month.
"""
return pd.date_range('2000-10-01','2013-09-30',freq='MS')[(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==month_int)]
def apply_exceed_mask(self, data_var, data_mask, level):
"""Function to retain the patches that exceeded the threshold.
Args:
data_var (Xarray data array): The variable's data.
data_mask (Xarray data array): The storm patch mask.
level (int): The dataset level coordinate. This could be 0, 1, 2, or 3.
Returns:
Xarray data array of the variable for the storm patches that exceeded the UH threshold.
"""
return data_var.var_grid.sel(levels=level)[data_mask.grid.values,:,:]
def apply_notexceed_mask(self, data_var, data_mask, level):
"""Function to retain the patches that did not exceed the threshold.
Args:
data_var (Xarray data array): The variable's data.
data_mask (Xarray data array): The storm patch mask.
level (int): The dataset level coordinate. This could be 0, 1, 2, or 3.
Returns:
Numpy array of the variable for the storm patches that did not exceed the UH threshold.
"""
return np.delete(data_var.var_grid.sel(levels=level).values, data_mask.grid.values, axis=0)
def flatten_list(self, array):
"""Function to flatten the created list of Xarray data arrays.
Args:
array (list): The list of Xarray data arrays.
Returns:
Flattened list of Xarray data arrays.
"""
return [j for i in array for j in i.values]
def flatten_arraylist(self, array):
"""Function to flatten the created list of numpy arrays.
Args:
array (list): The list of numpy arrays.
Returns:
Flattened list of numpy arrays.
"""
return [j for i in array for j in i]
def month_translate(self, num):
"""Convert integer month to string month.
Args:
num (int): Input month.
Returns:
out (str): Input month as string.
Raises:
ValueError: If the month is not within the study's range (Dec-May).
"""
var={12:'December',
1:'January',
2:'February',
3:'March',
4:'April',
5:'May'}
try:
out=var[num]
return out
except:
raise ValueError("Please enter month integer from Dec-May.")
def run_months(self, months=np.array([12,1,2,3,4,5]), uh=True, nouh=True):
"""Function to automate and parallelize the creation of the exceedance/nonexceedance files.
Args:
months (int array): Months to iterate through.
uh (boolean): Whether to compute analysis for threshold exceedances. Defaults to ``True``.
nouh(boolean): Whether to compute analysis for threshold non-exceedances. Defaults to ``True``.
"""
pool2=mp.Pool(self.num_cpus)
for mo in months:
if uh:
print(f"Creating {self.month_translate(mo)} patches of threshold exceedances...")
pool2.apply_async(self.create_files_exceed_threshold, args=([mo]))
if nouh:
print(f"Creating {self.month_translate(mo)} patches of threshold non-exceedances...")
pool2.apply_async(self.create_files_notexceed_threshold, args=([mo]))
pool2.close()
pool2.join()
print(f"Completed the jobs.")
def create_files_exceed_threshold(self, month_int):
"""Create and save files containing the environment patches for storms that exceeded the threshold.
Data files being opened contain the storm patches, not the full CONUS WRF domain.
Args:
month_int (int): Month for analysis.
"""
time_temp=self.generate_time_month(month_int)
data_temp_sev_1=[]; data_temp_sev_3=[]; data_temp_sev_5=[]; data_temp_sev_7=[]; data_evwd_sev_1=[]; data_evwd_sev_3=[]
data_euwd_sev_1=[]; data_euwd_sev_3=[]; data_euwd_sev_5=[]; data_euwd_sev_7=[]; data_evwd_sev_5=[]; data_evwd_sev_7=[]
data_qvap_sev_1=[]; data_qvap_sev_3=[]; data_qvap_sev_5=[]; data_qvap_sev_7=[]; data_dbzs_sev_1=[]; data_maxw_sev_1=[]
data_pres_sev_1=[]; data_pres_sev_3=[]; data_pres_sev_5=[]; data_pres_sev_7=[]; data_ctts_sev_1=[]; data_mask_sev_1=[]
data_wwnd_sev_1=[]; data_wwnd_sev_3=[]; data_wwnd_sev_5=[]; data_wwnd_sev_7=[]; data_uh25_sev_1=[]; data_uh03_sev_1=[]
for time in time_temp:
print(f"opening files for {time.strftime('%Y')}{time.strftime('%m')}")
data_mask=xr.open_mfdataset(
f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc",
combine='by_coords')
data_temp=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_tk_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_evwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_ev_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_euwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_eu_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_qvap=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_qvapor_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_pres=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_p_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_wwnd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_w_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_maxw=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_maxw_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_gen =xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_temp_sev_1.append(self.apply_exceed_mask(data_temp, data_mask, 0))
data_temp_sev_3.append(self.apply_exceed_mask(data_temp, data_mask, 1))
data_temp_sev_5.append(self.apply_exceed_mask(data_temp, data_mask, 2))
data_temp_sev_7.append(self.apply_exceed_mask(data_temp, data_mask, 3))
data_evwd_sev_1.append(self.apply_exceed_mask(data_evwd, data_mask, 0))
data_evwd_sev_3.append(self.apply_exceed_mask(data_evwd, data_mask, 1))
data_evwd_sev_5.append(self.apply_exceed_mask(data_evwd, data_mask, 2))
data_evwd_sev_7.append(self.apply_exceed_mask(data_evwd, data_mask, 3))
data_euwd_sev_1.append(self.apply_exceed_mask(data_euwd, data_mask, 0))
data_euwd_sev_3.append(self.apply_exceed_mask(data_euwd, data_mask, 1))
data_euwd_sev_5.append(self.apply_exceed_mask(data_euwd, data_mask, 2))
data_euwd_sev_7.append(self.apply_exceed_mask(data_euwd, data_mask, 3))
data_qvap_sev_1.append(self.apply_exceed_mask(data_qvap, data_mask, 0))
data_qvap_sev_3.append(self.apply_exceed_mask(data_qvap, data_mask, 1))
data_qvap_sev_5.append(self.apply_exceed_mask(data_qvap, data_mask, 2))
data_qvap_sev_7.append(self.apply_exceed_mask(data_qvap, data_mask, 3))
data_pres_sev_1.append(self.apply_exceed_mask(data_pres, data_mask, 0))
data_pres_sev_3.append(self.apply_exceed_mask(data_pres, data_mask, 1))
data_pres_sev_5.append(self.apply_exceed_mask(data_pres, data_mask, 2))
data_pres_sev_7.append(self.apply_exceed_mask(data_pres, data_mask, 3))
data_wwnd_sev_1.append(self.apply_exceed_mask(data_wwnd, data_mask, 0))
data_wwnd_sev_3.append(self.apply_exceed_mask(data_wwnd, data_mask, 1))
data_wwnd_sev_5.append(self.apply_exceed_mask(data_wwnd, data_mask, 2))
data_wwnd_sev_7.append(self.apply_exceed_mask(data_wwnd, data_mask, 3))
data_maxw_sev_1.append(data_maxw.var_grid[data_mask.grid.values,:,:])
data_dbzs_sev_1.append(data_gen.dbz_grid[data_mask.grid.values,:,:])
data_ctts_sev_1.append(data_gen.ctt_grid[data_mask.grid.values,:,:])
data_uh25_sev_1.append(data_gen.uh25_grid[data_mask.grid.values,:,:])
data_uh03_sev_1.append(data_gen.uh03_grid[data_mask.grid.values,:,:])
data_mask_sev_1.append(data_gen.mask[data_mask.grid.values,:,:])
data_temp_sev_1_patches=self.flatten_list(data_temp_sev_1)
data_temp_sev_3_patches=self.flatten_list(data_temp_sev_3)
data_temp_sev_5_patches=self.flatten_list(data_temp_sev_5)
data_temp_sev_7_patches=self.flatten_list(data_temp_sev_7)
data_evwd_sev_1_patches=self.flatten_list(data_evwd_sev_1)
data_evwd_sev_3_patches=self.flatten_list(data_evwd_sev_3)
data_evwd_sev_5_patches=self.flatten_list(data_evwd_sev_5)
data_evwd_sev_7_patches=self.flatten_list(data_evwd_sev_7)
data_euwd_sev_1_patches=self.flatten_list(data_euwd_sev_1)
data_euwd_sev_3_patches=self.flatten_list(data_euwd_sev_3)
data_euwd_sev_5_patches=self.flatten_list(data_euwd_sev_5)
data_euwd_sev_7_patches=self.flatten_list(data_euwd_sev_7)
data_qvap_sev_1_patches=self.flatten_list(data_qvap_sev_1)
data_qvap_sev_3_patches=self.flatten_list(data_qvap_sev_3)
data_qvap_sev_5_patches=self.flatten_list(data_qvap_sev_5)
data_qvap_sev_7_patches=self.flatten_list(data_qvap_sev_7)
data_pres_sev_1_patches=self.flatten_list(data_pres_sev_1)
data_pres_sev_3_patches=self.flatten_list(data_pres_sev_3)
data_pres_sev_5_patches=self.flatten_list(data_pres_sev_5)
data_pres_sev_7_patches=self.flatten_list(data_pres_sev_7)
data_wwnd_sev_1_patches=self.flatten_list(data_wwnd_sev_1)
data_wwnd_sev_3_patches=self.flatten_list(data_wwnd_sev_3)
data_wwnd_sev_5_patches=self.flatten_list(data_wwnd_sev_5)
data_wwnd_sev_7_patches=self.flatten_list(data_wwnd_sev_7)
data_maxw_sev_1_patches=self.flatten_list(data_maxw_sev_1)
data_dbzs_sev_1_patches=self.flatten_list(data_dbzs_sev_1)
data_ctts_sev_1_patches=self.flatten_list(data_ctts_sev_1)
data_uh25_sev_1_patches=self.flatten_list(data_uh25_sev_1)
data_uh03_sev_1_patches=self.flatten_list(data_uh03_sev_1)
data_mask_sev_1_patches=self.flatten_list(data_mask_sev_1)
data_assemble=xr.Dataset({
'temp_sev_1':(['patch','y','x'], np.array(data_temp_sev_1_patches)), 'temp_sev_3':(['patch','y','x'], np.array(data_temp_sev_3_patches)),
'temp_sev_5':(['patch','y','x'], np.array(data_temp_sev_5_patches)), 'temp_sev_7':(['patch','y','x'], np.array(data_temp_sev_7_patches)),
'evwd_sev_1':(['patch','y','x'], np.array(data_evwd_sev_1_patches)), 'evwd_sev_3':(['patch','y','x'], np.array(data_evwd_sev_3_patches)),
'evwd_sev_5':(['patch','y','x'], np.array(data_evwd_sev_5_patches)), 'evwd_sev_7':(['patch','y','x'], np.array(data_evwd_sev_7_patches)),
'euwd_sev_1':(['patch','y','x'], np.array(data_euwd_sev_1_patches)), 'euwd_sev_3':(['patch','y','x'], np.array(data_euwd_sev_3_patches)),
'euwd_sev_5':(['patch','y','x'], np.array(data_euwd_sev_5_patches)), 'euwd_sev_7':(['patch','y','x'], np.array(data_euwd_sev_7_patches)),
'qvap_sev_1':(['patch','y','x'], np.array(data_qvap_sev_1_patches)), 'qvap_sev_3':(['patch','y','x'], np.array(data_qvap_sev_3_patches)),
'qvap_sev_5':(['patch','y','x'], np.array(data_qvap_sev_5_patches)), 'qvap_sev_7':(['patch','y','x'], np.array(data_qvap_sev_7_patches)),
'pres_sev_1':(['patch','y','x'], np.array(data_pres_sev_1_patches)), 'pres_sev_3':(['patch','y','x'], np.array(data_pres_sev_3_patches)),
'pres_sev_5':(['patch','y','x'], np.array(data_pres_sev_5_patches)), 'pres_sev_7':(['patch','y','x'], np.array(data_pres_sev_7_patches)),
'wwnd_sev_1':(['patch','y','x'], np.array(data_wwnd_sev_1_patches)), 'wwnd_sev_3':(['patch','y','x'], np.array(data_wwnd_sev_3_patches)),
'wwnd_sev_5':(['patch','y','x'], np.array(data_wwnd_sev_5_patches)), 'wwnd_sev_7':(['patch','y','x'], np.array(data_wwnd_sev_7_patches)),
'maxw_sev_1':(['patch','y','x'], np.array(data_maxw_sev_1_patches)), 'dbzs_sev_1':(['patch','y','x'], np.array(data_dbzs_sev_1_patches)),
'ctts_sev_1':(['patch','y','x'], np.array(data_ctts_sev_1_patches)), 'uh25_sev_1':(['patch','y','x'], np.array(data_uh25_sev_1_patches)),
'uh03_sev_1':(['patch','y','x'], np.array(data_uh03_sev_1_patches)), 'mask_sev_1':(['patch','y','x'], np.array(data_mask_sev_1_patches))})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_{time.strftime('%m')}.nc")
print(f"Exceedances for {time.strftime('%m')} complete...")
def create_files_notexceed_threshold(self, month_int):
"""Create files containing environment patches for storms that did not exceed the threshold.
Data files being opened contain the storm patches, not the full CONUS WRF domain.
Args:
month_int (int): Month for analysis.
"""
time_temp=self.generate_time_month(month_int)
data_temp_sev_1=[]; data_temp_sev_3=[]; data_temp_sev_5=[]; data_temp_sev_7=[]; data_evwd_sev_1=[]; data_evwd_sev_3=[]
data_euwd_sev_1=[]; data_euwd_sev_3=[]; data_euwd_sev_5=[]; data_euwd_sev_7=[]; data_evwd_sev_5=[]; data_evwd_sev_7=[]
data_qvap_sev_1=[]; data_qvap_sev_3=[]; data_qvap_sev_5=[]; data_qvap_sev_7=[]; data_dbzs_sev_1=[]; data_maxw_sev_1=[]
data_pres_sev_1=[]; data_pres_sev_3=[]; data_pres_sev_5=[]; data_pres_sev_7=[]; data_ctts_sev_1=[]; data_mask_sev_1=[]
data_wwnd_sev_1=[]; data_wwnd_sev_3=[]; data_wwnd_sev_5=[]; data_wwnd_sev_7=[]; data_uh25_sev_1=[]; data_uh03_sev_1=[]
for time in time_temp:
print(f"opening files for {time.strftime('%Y')}{time.strftime('%m')}")
data_mask=xr.open_mfdataset(
f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc",
combine='by_coords')
data_temp=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_tk_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_evwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_ev_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_euwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_eu_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_qvap=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_qvapor_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_pres=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_p_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_wwnd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_w_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_maxw=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_maxw_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_gen =xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_temp_sev_1.append(self.apply_notexceed_mask(data_temp, data_mask, 0))
data_temp_sev_3.append(self.apply_notexceed_mask(data_temp, data_mask, 1))
data_temp_sev_5.append(self.apply_notexceed_mask(data_temp, data_mask, 2))
data_temp_sev_7.append(self.apply_notexceed_mask(data_temp, data_mask, 3))
data_evwd_sev_1.append(self.apply_notexceed_mask(data_evwd, data_mask, 0))
data_evwd_sev_3.append(self.apply_notexceed_mask(data_evwd, data_mask, 1))
data_evwd_sev_5.append(self.apply_notexceed_mask(data_evwd, data_mask, 2))
data_evwd_sev_7.append(self.apply_notexceed_mask(data_evwd, data_mask, 3))
data_euwd_sev_1.append(self.apply_notexceed_mask(data_euwd, data_mask, 0))
data_euwd_sev_3.append(self.apply_notexceed_mask(data_euwd, data_mask, 1))
data_euwd_sev_5.append(self.apply_notexceed_mask(data_euwd, data_mask, 2))
data_euwd_sev_7.append(self.apply_notexceed_mask(data_euwd, data_mask, 3))
data_qvap_sev_1.append(self.apply_notexceed_mask(data_qvap, data_mask, 0))
data_qvap_sev_3.append(self.apply_notexceed_mask(data_qvap, data_mask, 1))
data_qvap_sev_5.append(self.apply_notexceed_mask(data_qvap, data_mask, 2))
data_qvap_sev_7.append(self.apply_notexceed_mask(data_qvap, data_mask, 3))
data_pres_sev_1.append(self.apply_notexceed_mask(data_pres, data_mask, 0))
data_pres_sev_3.append(self.apply_notexceed_mask(data_pres, data_mask, 1))
data_pres_sev_5.append(self.apply_notexceed_mask(data_pres, data_mask, 2))
data_pres_sev_7.append(self.apply_notexceed_mask(data_pres, data_mask, 3))
data_wwnd_sev_1.append(self.apply_notexceed_mask(data_wwnd, data_mask, 0))
data_wwnd_sev_3.append(self.apply_notexceed_mask(data_wwnd, data_mask, 1))
data_wwnd_sev_5.append(self.apply_notexceed_mask(data_wwnd, data_mask, 2))
data_wwnd_sev_7.append(self.apply_notexceed_mask(data_wwnd, data_mask, 3))
data_maxw_sev_1.append(np.delete(data_maxw.var_grid.values, data_mask.grid.values, axis=0))
data_dbzs_sev_1.append(np.delete(data_gen.dbz_grid.values, data_mask.grid.values, axis=0))
data_ctts_sev_1.append(np.delete(data_gen.ctt_grid.values, data_mask.grid.values, axis=0))
data_uh25_sev_1.append(np.delete(data_gen.uh25_grid.values, data_mask.grid.values, axis=0))
data_uh03_sev_1.append(np.delete(data_gen.uh03_grid.values, data_mask.grid.values, axis=0))
data_mask_sev_1.append(np.delete(data_gen.mask.values, data_mask.grid.values, axis=0))
data_temp_sev_1_patches=self.flatten_arraylist(data_temp_sev_1)
data_temp_sev_3_patches=self.flatten_arraylist(data_temp_sev_3)
data_temp_sev_5_patches=self.flatten_arraylist(data_temp_sev_5)
data_temp_sev_7_patches=self.flatten_arraylist(data_temp_sev_7)
data_evwd_sev_1_patches=self.flatten_arraylist(data_evwd_sev_1)
data_evwd_sev_3_patches=self.flatten_arraylist(data_evwd_sev_3)
data_evwd_sev_5_patches=self.flatten_arraylist(data_evwd_sev_5)
data_evwd_sev_7_patches=self.flatten_arraylist(data_evwd_sev_7)
data_euwd_sev_1_patches=self.flatten_arraylist(data_euwd_sev_1)
data_euwd_sev_3_patches=self.flatten_arraylist(data_euwd_sev_3)
data_euwd_sev_5_patches=self.flatten_arraylist(data_euwd_sev_5)
data_euwd_sev_7_patches=self.flatten_arraylist(data_euwd_sev_7)
data_qvap_sev_1_patches=self.flatten_arraylist(data_qvap_sev_1)
data_qvap_sev_3_patches=self.flatten_arraylist(data_qvap_sev_3)
data_qvap_sev_5_patches=self.flatten_arraylist(data_qvap_sev_5)
data_qvap_sev_7_patches=self.flatten_arraylist(data_qvap_sev_7)
data_pres_sev_1_patches=self.flatten_arraylist(data_pres_sev_1)
data_pres_sev_3_patches=self.flatten_arraylist(data_pres_sev_3)
data_pres_sev_5_patches=self.flatten_arraylist(data_pres_sev_5)
data_pres_sev_7_patches=self.flatten_arraylist(data_pres_sev_7)
data_wwnd_sev_1_patches=self.flatten_arraylist(data_wwnd_sev_1)
data_wwnd_sev_3_patches=self.flatten_arraylist(data_wwnd_sev_3)
data_wwnd_sev_5_patches=self.flatten_arraylist(data_wwnd_sev_5)
| |
complete
self._trace_logger.debug("handshake complete")
self._extra.update(
peercert=self._tls_conn.get_peer_certificate()
)
if self._tls_post_handshake_callback:
self._trace_logger.debug("post handshake scheduled via callback")
task = asyncio.ensure_future(
self._tls_post_handshake_callback(self)
)
task.add_done_callback(self._tls_post_handshake_done)
self._chained_pending.add(task)
self._tls_post_handshake_callback = None
else:
self._tls_post_handshake(None)
def _tls_post_handshake_done(
self,
task: asyncio.Future,
) -> None:
self._chained_pending.discard(task)
try:
task.result()
except asyncio.CancelledError:
# canceled due to closure or something similar
pass
except BaseException as err:
self._tls_post_handshake(err)
else:
self._tls_post_handshake(None)
def _tls_post_handshake(
self,
exc: typing.Optional[BaseException],
) -> None:
self._trace_logger.debug("_tls_post_handshake called")
if exc is not None:
if self._waiter is not None and not self._waiter.done():
self._waiter.set_exception(exc)
self._fatal_error(exc, "Fatal error on post-handshake callback")
return
self._tls_read_wants_write = False
self._tls_write_wants_read = False
self._state = _State.TLS_OPEN
self._loop.add_reader(self._raw_fd, self._read_ready)
if not self._tls_was_starttls:
self._loop.call_soon(self._protocol.connection_made, self)
if self._waiter is not None:
self._loop.call_soon(self._waiter.set_result, None)
def _tls_do_shutdown(self) -> None:
self._trace_logger.debug("_tls_do_shutdown called")
if self._state != _State.TLS_SHUTTING_DOWN:
raise self._invalid_state("_tls_do_shutdown called")
assert isinstance(self._sock, OpenSSL.SSL.Connection)
try:
self._sock.shutdown()
except OpenSSL.SSL.WantReadError:
self._trace_logger.debug("registering reader for _tls_shutdown")
self._loop.add_reader(self._raw_fd, self._tls_shutdown)
return
except OpenSSL.SSL.WantWriteError:
self._trace_logger.debug("registering writer for _tls_shutdown")
self._loop.add_writer(self._raw_fd, self._tls_shutdown)
return
except Exception as exc:
# force_close will take care of removing rw handlers
self._fatal_error(exc, "Fatal error on tls shutdown")
return
except BaseException:
self._remove_rw()
raise
self._remove_rw()
self._state = _State.TLS_SHUT_DOWN
# continue to raw shut down
self._raw_shutdown()
def _tls_shutdown(self) -> None:
self._state = _State.TLS_SHUTTING_DOWN
self._tls_do_shutdown()
def _raw_shutdown(self) -> None:
self._remove_rw()
try:
self._rawsock.shutdown(socket.SHUT_RDWR)
except OSError:
# we cannot do anything anyway if this fails
pass
self._force_close(None)
def _read_ready(self) -> None:
assert self._state is not None
if self._state.tls_started and self._tls_write_wants_read:
self._tls_write_wants_read = False
self._write_ready()
if self._buffer:
self._trace_logger.debug("_read_ready: add writer for more"
" data")
self._loop.add_writer(self._raw_fd, self._write_ready)
if self._state.eof_received:
# no further reading
return
try:
data = self._sock.recv(self.MAX_SIZE)
except (BlockingIOError, InterruptedError, OpenSSL.SSL.WantReadError):
pass
except OpenSSL.SSL.WantWriteError:
assert self._state.tls_started
self._tls_read_wants_write = True
self._trace_logger.debug("_read_ready: swap reader for writer")
self._loop.remove_reader(self._raw_fd)
self._loop.add_writer(self._raw_fd, self._write_ready)
except OpenSSL.SSL.SysCallError as exc:
if self._state in (_State.TLS_SHUT_DOWN,
_State.TLS_SHUTTING_DOWN,
_State.CLOSED):
self._trace_logger.debug(
"_read_ready: ignoring syscall exception during shutdown: "
"%s",
exc,
)
else:
self._fatal_error(exc,
"Fatal read error on STARTTLS transport")
except Exception as err:
self._fatal_error(err, "Fatal read error on STARTTLS transport")
return
else:
if data:
self._protocol.data_received(data)
else:
keep_open = False
try:
keep_open = bool(self._protocol.eof_received())
finally:
self._eof_received(keep_open)
def _write_ready(self) -> None:
assert self._state is not None
if self._tls_read_wants_write:
self._tls_read_wants_write = False
self._read_ready()
if not self._paused and not self._state.eof_received:
self._trace_logger.debug("_write_ready: add reader for more"
" data")
self._loop.add_reader(self._raw_fd, self._read_ready)
# do not send data during handshake!
if self._buffer and self._state != _State.TLS_HANDSHAKING:
try:
nsent = self._send_wrap.send(self._buffer)
except (BlockingIOError, InterruptedError,
OpenSSL.SSL.WantWriteError):
nsent = 0
except OpenSSL.SSL.WantReadError:
nsent = 0
assert self._state.tls_started
self._tls_write_wants_read = True
self._trace_logger.debug(
"_write_ready: swap writer for reader")
self._loop.remove_writer(self._raw_fd)
self._loop.add_reader(self._raw_fd, self._read_ready)
except OpenSSL.SSL.SysCallError as exc:
if self._state in (_State.TLS_SHUT_DOWN,
_State.TLS_SHUTTING_DOWN,
_State.CLOSED):
self._trace_logger.debug(
"_write_ready: ignoring syscall exception during "
"shutdown: %s",
exc,
)
else:
self._fatal_error(exc,
"Fatal write error on STARTTLS "
"transport")
except Exception as err:
self._fatal_error(err,
"Fatal write error on STARTTLS "
"transport")
return
if nsent:
del self._buffer[:nsent]
if not self._buffer:
if not self._tls_read_wants_write:
self._trace_logger.debug("_write_ready: nothing more to write,"
" removing writer")
self._loop.remove_writer(self._raw_fd)
if self._closing:
if self._state.tls_started:
self._tls_shutdown()
else:
self._raw_shutdown()
def _eof_received(self, keep_open: bool) -> None:
assert self._state is not None
self._trace_logger.debug("_eof_received: removing reader")
self._loop.remove_reader(self._raw_fd)
if self._state.tls_started:
assert self._tls_conn is not None
if self._tls_conn.get_shutdown() & OpenSSL.SSL.RECEIVED_SHUTDOWN:
# proper TLS shutdown going on
if keep_open:
self._state = _State.TLS_EOF_RECEIVED
else:
self._tls_shutdown()
else:
if keep_open:
self._trace_logger.warning(
"result of eof_received() ignored as shut down is"
" improper",
)
self._fatal_error(
ConnectionError("Underlying transport closed"),
"unexpected eof_received"
)
else:
if keep_open:
self._state = _State.RAW_EOF_RECEIVED
else:
self._raw_shutdown()
# public API
def abort(self) -> None:
"""
Immediately close the stream, without sending remaining buffers or
performing a proper shutdown.
"""
if self._state == _State.CLOSED:
self._invalid_state("abort() called")
return
self._force_close(None)
def can_write_eof(self) -> bool:
"""
Return :data:`False`.
.. note::
Writing of EOF (i.e. closing the sending direction of the stream) is
theoretically possible. However, it was deemed by the author that
the case is rare enough to neglect it for the sake of implementation
simplicity.
"""
return False
def close(self) -> None:
"""
Close the stream. This performs a proper stream shutdown, except if the
stream is currently performing a TLS handshake. In that case, calling
:meth:`close` is equivalent to calling :meth:`abort`.
Otherwise, the transport waits until all buffers are transmitted.
"""
if self._state == _State.CLOSED:
self._invalid_state("close() called")
return
if self._state == _State.TLS_HANDSHAKING:
# hard-close
self._force_close(None)
elif self._state == _State.TLS_SHUTTING_DOWN:
# shut down in progress, nothing to do
pass
elif self._buffer:
# there is data to be send left, first wait for it to transmit ...
self._closing = True
elif self._state is not None and self._state.tls_started:
# normal TLS state, nothing left to transmit, shut down
self._tls_shutdown()
else:
# normal non-TLS state, nothing left to transmit, close
self._raw_shutdown()
def get_extra_info(
self,
name: str,
default: typing.Optional[typing.Any] = None,
) -> typing.Any:
"""
The following extra information is available:
* ``socket``: the underlying :mod:`socket` object
* ``sslcontext``: the :class:`OpenSSL.SSL.Context` object to use (this
may be :data:`None` until :meth:`starttls` has been called)
* ``ssl_object``: :class:`OpenSSL.SSL.Connection` object (:data:`None`
if TLS is not enabled (yet))
* ``peername``: return value of :meth:`socket.Socket.getpeername`
* ``peer_hostname``: The `peer_hostname` value passed to the
constructor.
* ``server_hostname``: The `server_hostname` value passed to the
constructor.
"""
return self._extra.get(name, default)
async def starttls(
self,
ssl_context: typing.Optional[OpenSSL.SSL.Context] = None,
post_handshake_callback: typing.Optional[
PostHandshakeCallback
] = None,
) -> None:
"""
Start a TLS stream on top of the socket. This is an invalid operation
if the stream is not in RAW_OPEN state.
If `ssl_context` is set, it overrides the `ssl_context` passed to the
constructor. If `post_handshake_callback` is set, it overrides the
`post_handshake_callback` passed to the constructor.
.. versionchanged:: 0.4
This method is now a barrier with respect to reads and writes:
before the handshake is completed (including the post handshake
callback, if any), no data is received or sent.
"""
if self._state != _State.RAW_OPEN or self._closing:
raise self._invalid_state("starttls() called")
if ssl_context is not None:
self._ssl_context = ssl_context
self._extra.update(
sslcontext=ssl_context
)
else:
assert self._ssl_context_factory is not None
self._ssl_context = self._ssl_context_factory(self)
if post_handshake_callback is not None:
self._tls_post_handshake_callback = post_handshake_callback
self._waiter = asyncio.Future()
self._waiter.add_done_callback(self._waiter_done)
self._initiate_tls()
try:
await self._waiter
finally:
self._waiter = None
def write(self, data: typing.Union[bytes, bytearray, memoryview]) -> None:
"""
Write data to the transport. This is an invalid operation if the stream
is not writable, that is, if it is closed. During TLS negotiation, the
data is buffered.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if (self._state is None or
not self._state.is_writable or
self._closing):
raise self._invalid_state("write() called")
if not data:
return
if not self._buffer:
self._loop.add_writer(self._raw_fd, self._write_ready)
self._buffer.extend(data)
def write_eof(self) -> None:
"""
Writing the EOF has not been implemented, for the sake of simplicity.
"""
raise NotImplementedError("Cannot write_eof() on STARTTLS transport")
def can_starttls(self) -> bool:
"""
Return :data:`True`.
"""
return True
def is_closing(self) -> bool:
return (self._state == _State.TLS_SHUTTING_DOWN or
self._state == _State.CLOSED)
async def create_starttls_connection(
loop: asyncio.BaseEventLoop,
protocol_factory: typing.Callable[[], asyncio.Protocol],
host: typing.Optional[str] = None,
port: typing.Optional[int] = None,
*,
sock: typing.Optional[socket.socket] = None,
ssl_context_factory: typing.Optional[SSLContextFactory] = None,
use_starttls: bool = False,
local_addr: typing.Any = None,
**kwargs # type: typing.Any
) -> typing.Tuple[asyncio.Transport, asyncio.Protocol]:
"""
Create a connection which can later be upgraded to use TLS.
.. versionchanged:: 0.4
The `local_addr` argument was added.
:param loop: The event loop to use.
:type loop: :class:`asyncio.BaseEventLoop`
:param protocol_factory: Factory for the protocol for the connection
:param host: The host name or address to connect to
:type host: :class:`str` or :data:`None`
:param port: The port to connect to
:type port: :class:`int` or :data:`None`
:param sock: A socket to wrap (conflicts with `host` and `port`)
:type sock: :class:`socket.socket`
:param ssl_context_factory: Function which returns a
:class:`OpenSSL.SSL.Context` to use for TLS operations
:param use_starttls: Flag to control whether TLS is negotiated right away
or deferredly.
:type use_starttls: :class:`bool`
:param local_addr: Address to bind to
This is roughly a copy of the asyncio implementation of
:meth:`asyncio.BaseEventLoop.create_connection`. It returns a pair
``(transport, protocol)``, where `transport` is a newly created
:class:`STARTTLSTransport` instance. Further keyword arguments are
forwarded to the constructor of :class:`STARTTLSTransport`.
`loop` must be a :class:`asyncio.BaseEventLoop`, with support for
:meth:`asyncio.BaseEventLoop.add_reader` and the corresponding writer | |
<gh_stars>1-10
# encoding: UTF-8
print(u'启动load vtEngine.py')
import shelve
from collections import OrderedDict
import os,sys
import copy
from pymongo import MongoClient, ASCENDING
from pymongo.errors import ConnectionFailure,AutoReconnect
#import vnpy.trader.mongo_proxy
from vnpy.trader.vtEvent import Event as vn_event
from vnpy.trader.language import text
#from vnpy.trader.app.ctaStrategy.ctaEngine import CtaEngine
#from vnpy.trader.app.dataRecorder.drEngine import DrEngine
#from vnpy.trader.app.riskManager.rmEngine import RmEngine
from vnpy.trader.vtFunction import loadMongoSetting, getTempPath
from vnpy.trader.vtGateway import *
from vnpy.trader.app import (ctaStrategy,cmaStrategy, riskManager)
from vnpy.trader.setup_logger import setup_logger
import traceback
import psutil
try:
from .util_mail import *
except:
print('import util_mail fail',file=sys.stderr)
try:
from .util_wechat import *
except:
print('import util_wechat fail',file=sys.stderr)
LOG_DB_NAME = 'vt_logger'
########################################################################
class MainEngine(object):
"""主引擎"""
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""Constructor"""
# 记录今日日期
self.todayDate = datetime.now().strftime('%Y%m%d')
# 创建事件引擎
self.eventEngine = eventEngine
self.eventEngine.start()
# 创建数据引擎
self.dataEngine = DataEngine(self, self.eventEngine)
# MongoDB数据库相关
self.dbClient = None # MongoDB客户端对象
self.db_has_connected = False
# 接口实例
self.gatewayDict = OrderedDict()
self.gatewayDetailList = []
# 应用模块实例
self.appDict = OrderedDict()
self.appDetailList = []
# 扩展模块
self.ctaEngine = None # CtaEngine(self, self.eventEngine) # cta策略运行模块
self.drEngine = None # DrEngine(self, self.eventEngine) # 数据记录模块
self.rmEngine = None # RmEngine(self, self.eventEngine) # 风险管理模块
self.cmaEngine = None # 跨市场套利引擎
self.connected_gw_names = []
self.save_contract_counter = 0
self.logger = None
self.createLogger()
# ----------------------------------------------------------------------
def addGateway(self, gatewayModule,gateway_name=EMPTY_STRING):
"""添加底层接口"""
# 是否使用指定的gateway_name
if gateway_name==EMPTY_STRING:
gatewayName = gatewayModule.gatewayName
else:
gatewayName = gateway_name
self.writeLog(u'add gateway:{}'.format(gateway_name))
# 创建接口实例
self.gatewayDict[gatewayName] = gatewayModule.gatewayClass(self.eventEngine,
gatewayName)
# 设置接口轮询
if gatewayModule.gatewayQryEnabled:
self.gatewayDict[gatewayName].setQryEnabled(gatewayModule.gatewayQryEnabled)
# 保存接口详细信息
d = {
'gatewayName': gatewayName, #gatewayModule.gatewayName,
'gatewayDisplayName': gatewayName, #gatewayModule.gatewayDisplayName,
'gatewayType': gatewayModule.gatewayType
}
self.gatewayDetailList.append(d)
# ----------------------------------------------------------------------
def addApp(self, appModule):
"""添加上层应用"""
appName = appModule.appName
# 创建应用实例
self.appDict[appName] = appModule.appEngine(self, self.eventEngine)
# 将应用引擎实例添加到主引擎的属性中
self.__dict__[appName] = self.appDict[appName]
# 兼容旧的self.ctaEngine/self.rmEngine
if appName == ctaStrategy.appName:
self.ctaEngine = self.appDict[appName]
elif appName == riskManager.appName:
self.rmEngine = self.appDict[appName]
elif appName == cmaStrategy.appName:
self.cmaEngine = self.appDict[appName]
# 保存应用信息
d = {
'appName': appModule.appName,
'appDisplayName': appModule.appDisplayName,
'appWidget': appModule.appWidget,
'appIco': appModule.appIco
}
self.appDetailList.append(d)
# ----------------------------------------------------------------------
def connect(self, gatewayName):
"""连接特定名称的接口"""
if gatewayName in self.gatewayDict:
self.writeLog(u'vtEngine conncet :{}'.format(gatewayName))
gateway = self.gatewayDict[gatewayName]
gateway.connect()
if gatewayName not in self.connected_gw_names:
self.connected_gw_names.append(gatewayName)
# 接口连接后自动执行数据库连接的任务
self.dbConnect()
return True
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
return False
def checkGatewayStatus(self,gatewayName):
"""check gateway connect status"""
# 借用检查网关状态来持久化合约数据
self.save_contract_counter += 1
if self.save_contract_counter > 60 and self.dataEngine is not None:
self.writeLog(u'保存持久化合约数据')
self.dataEngine.saveContracts()
self.save_contract_counter = 0
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
return gateway.checkStatus()
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
return False
def qryStatus(self):
"""查询Status"""
status_dict = OrderedDict()
# gateway Status
gw_status_dict = OrderedDict()
for k,g in self.gatewayDict.items():
gw_status_dict[k] = g.checkStatus()
status_dict['gateways']=gw_status_dict
# ctaEngine Status
if self.ctaEngine:
tick_dict,strategy_dict = self.ctaEngine.qryStatus()
status_dict['ticks']=tick_dict
status_dict['strategies'] = strategy_dict
# cpu/mem status
cpuPercent = psutil.cpu_percent()
memoryPercent = psutil.virtual_memory().percent
server_info_dict = {'cpu':cpuPercent, 'mem':memoryPercent}
status_dict['server'] = server_info_dict
#
event = vn_event(type_=EVENT_STATUS)
event.dict_['data']= status_dict
self.eventEngine.put(event)
return True
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq, gatewayName):
"""订阅特定接口的行情"""
# 处理没有输入gatewayName的情况
if gatewayName is None or len(gatewayName) == 0:
if len(self.connected_gw_names) == 0:
self.writeError(u'vtEngine.subscribe, no connected gateway')
return
for gw_name in self.connected_gw_names:
gateway = self.gatewayDict[gw_name]
gateway.subscribe(subscribeReq)
return
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
gateway.subscribe(subscribeReq)
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
# ----------------------------------------------------------------------
def sendOrder(self, orderReq, gatewayName):
"""对特定接口发单"""
# 如果风控检查失败则不发单
if self.rmEngine and not self.rmEngine.checkRisk(orderReq):
self.writeCritical(u'风控检查不通过,gw:{},{} {} {} p:{} v:{}'.format(gatewayName, orderReq.direction, orderReq.offset, orderReq.symbol, orderReq.price, orderReq.volume))
return ''
if self.rmEngine and self.rmEngine.active\
and self.dataEngine and \
self.dataEngine.check_self_trade_risk(vtSymbol=orderReq.symbol,direction=orderReq.direction, price=orderReq.price, gatewayName=gatewayName):
self.writeCritical(
u'自成交检查不通过,gw:{},{} {} {} p:{} v:{}'.format(gatewayName, orderReq.direction, orderReq.offset,
orderReq.symbol, orderReq.price, orderReq.volume))
return ''
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
return gateway.sendOrder(orderReq)
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
# ----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq, gatewayName):
"""对特定接口撤单"""
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
gateway.cancelOrder(cancelOrderReq)
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
# ----------------------------------------------------------------------
def qryAccount(self, gatewayName):
"""查询特定接口的账户"""
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
gateway.qryAccount()
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
def getAccountInfo(self,gatewayName=None):
"""读取风控的账号与仓位数据
# Added by IncenseLee
仅支持一个账号。不支持多账号
以后支持跨市场套利才更新吧。
return 当前账号的权益、可用资金、当前仓位比例, 投资仓位比例上限
"""
if gatewayName is None and len(self.connected_gw_names) > 1:
raise ValueError(u'多个网关连接,须指明gateway_name参数')
if self.rmEngine:
return self.rmEngine.getAccountInfo()
else:
return 0, 0, 0, 0
def qryAccountNo(self,gatewayName):
"""
根据gateway名称,返回账号
:param gatewayName:
:return:
"""
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
if gateway.accountID:
return copy.copy(gateway.accountID)
return gatewayName
# ----------------------------------------------------------------------
def qryPosition(self, gatewayName):
"""查询特定接口的持仓"""
if gatewayName in self.gatewayDict:
gateway = self.gatewayDict[gatewayName]
gateway.qryPosition()
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
# ----------------------------------------------------------------------
def exit(self):
"""退出程序前调用,保证正常退出"""
# 安全关闭所有接口
for gateway in list(self.gatewayDict.values()):
self.writeLog(u'vtEngine退出,关闭接口')
gateway.close()
# 停止事件引擎
self.eventEngine.stop()
# 停止数据记录引擎
if self.drEngine:
self.drEngine.stop()
# 保存数据引擎里的合约数据到硬盘
self.dataEngine.saveContracts()
def disconnect(self, gateway_name=EMPTY_STRING):
"""断开底层gateway的连接"""
try:
# 只断开指定的gateway
if gateway_name != EMPTY_STRING:
if gateway_name in self.gatewayDict:
self.writeLog(u'获取{} gateway'.format(gateway_name))
gateway = self.gatewayDict[gateway_name]
gateway.close()
if gateway_name in self.connected_gw_names:
self.writeLog(u'移除connected_gw_names[{}]'.format(gateway_name))
self.connected_gw_names.remove(gateway_name)
return
else:
self.writeLog(u'gateway接口不存在:%s' % gateway_name)
# 断开所有的gateway
for gateway in list(self.gatewayDict.values()):
self.writeLog(u'vtEngine.disconnect(),断开所有的gateway')
gateway.close()
return True
except Exception as ex:
print( u'vtEngine.disconnect Exception:{0} '.format(str(ex)))
return False
# ----------------------------------------------------------------------
def writeLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = vn_event(type_ = EVENT_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
# 写入本地log日志
if self.logger is not None:
self.logger.info(content)
else:
self.createLogger()
def createLogger(self):
"""
创建日志记录
:return:
"""
currentFolder = os.path.abspath(os.path.join(os.getcwd(), 'logs'))
if os.path.isdir(currentFolder):
# 如果工作目录下,存在data子目录,就使用data子目录
path = currentFolder
else:
# 否则,使用缺省保存目录 vnpy/trader/app/ctaStrategy/data
path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'logs'))
filename = os.path.abspath(os.path.join(path, 'vnpy'))
print( u'create logger:{}'.format(filename))
self.logger = setup_logger(filename=filename, name='vnpy', debug=True)
# ----------------------------------------------------------------------
def writeError(self, content):
"""快速发出错误日志事件"""
log = VtErrorData()
log.errorMsg = content
event = vn_event(type_=EVENT_ERROR)
event.dict_['data'] = log
self.eventEngine.put(event)
# 写入本地log日志
if self.logger is not None:
self.logger.error(content)
else:
self.createLogger()
# 发出邮件/微信
#try:
# if len(self.gatewayDetailList) > 0:
# target = self.gatewayDetailList[0]['gatewayName']
# else:
# target = WECHAT_GROUP["DEBUG_01"]
# sendWeChatMsg(content, target=target, level=WECHAT_LEVEL_ERROR)
#except Exception as ex:
# print(u'send wechat exception:{}'.format(str(ex)),file=sys.stderr)
# ----------------------------------------------------------------------
def writeWarning(self, content):
"""快速发出告警日志事件"""
log = VtLogData()
log.logContent = content
event = vn_event(type_=EVENT_WARNING)
event.dict_['data'] = log
self.eventEngine.put(event)
print('{}'.format(datetime.now()), file=sys.stderr)
print(content, file=sys.stderr)
# 写入本地log日志
if self.logger is not None:
self.logger.warning(content)
else:
print(content,file=sys.stderr)
self.createLogger()
# 发出邮件
try:
sendmail(subject=u'{0} Warning'.format('_'.join(self.connected_gw_names)), msgcontent=content)
except:
pass
# 发出微信
#try:
# if len(self.gatewayDetailList) > 0:
# target = self.gatewayDetailList[0]['gatewayName']
# else:
# target = WECHAT_GROUP["DEBUG_01"]
# sendWeChatMsg(content, target=target, level=WECHAT_LEVEL_WARNING)
#except Exception as ex:
# print(u'send wechat exception:{}'.format(str(ex)), file=sys.stderr)
# ----------------------------------------------------------------------
def writeNotification(self, content):
"""快速发出通知日志事件"""
log = VtLogData()
log.logContent = content
event = vn_event(type_=EVENT_NOTIFICATION)
event.dict_['data'] = log
self.eventEngine.put(event)
# 发出邮件
try:
sendmail(subject=u'{0} Notification'.format('_'.join(self.connected_gw_names)), msgcontent=content)
except:
pass
# 发出微信
# try:
# if len(self.gatewayDetailList) > 0:
# target = self.gatewayDetailList[0]['gatewayName']
# else:
# target = WECHAT_GROUP["DEBUG_01"]
# sendWeChatMsg(content, target=target, level=WECHAT_LEVEL_INFO)
# except Exception as ex:
# print(u'send wechat exception:{}'.format(str(ex)), file=sys.stderr)
# ----------------------------------------------------------------------
def writeCritical(self, content):
"""快速发出严重错误日志事件"""
log = VtLogData()
log.logContent = content
event = vn_event(type_=EVENT_CRITICAL)
event.dict_['data'] = log
self.eventEngine.put(event)
print('{}'.format(datetime.now()), file=sys.stderr)
print(content, file=sys.stderr)
# 写入本地log日志
if self.logger:
self.logger.critical(content)
else:
self.createLogger()
# 发出邮件
try:
sendmail(subject=u'{0} Critical'.format('_'.join(self.connected_gw_names)), msgcontent=content)
except:
pass
## 发出微信
#try:
# # if len(self.gatewayDetailList) > 0:
# target = self.gatewayDetailList[0]['gatewayName']
# else:
# target = WECHAT_GROUP["DEBUG_01"]
# sendWeChatMsg(content, target=target, level=WECHAT_LEVEL_FATAL)
#except:
# pass
#
# ----------------------------------------------------------------------
def dbConnect(self):
"""连接MongoDB数据库"""
if not self.dbClient:
# 读取MongoDB的设置
host, port, logging = loadMongoSetting()
try:
# 设置MongoDB操作的超时时间为0.5秒
self.dbClient = MongoClient(host, port, connectTimeoutMS=500)
# 调用server_info查询服务器状态,防止服务器异常并未连接成功
self.dbClient.server_info()
self.writeLog(text.DATABASE_CONNECTING_COMPLETED)
self.db_has_connected = True
# 如果启动日志记录,则注册日志事件监听函数
#if logging:
# self.eventEngine.register(EVENT_LOG, self.dbLogging)
except ConnectionFailure:
self.dbClient = None
self.writeError(text.DATABASE_CONNECTING_FAILED)
self.db_has_connected = False
# ----------------------------------------------------------------------
def dbInsert(self, dbName, collectionName, d):
"""向MongoDB中插入数据,d是具体数据"""
try:
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
collection.insert_one(d)
else:
self.writeLog(text.DATA_INSERT_FAILED)
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
self.dbConnect()
except AutoReconnect as ex:
self.writeError(u'数据库连接断开重连:{}'.format(str(ex)))
time.sleep(1)
except ConnectionFailure:
self.dbClient = None
self.writeError(u'数据库连接断开')
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
self.dbConnect()
except Exception as ex:
self.writeError(u'dbInsert exception:{}'.format(str(ex)))
def dbInsertMany(self,dbName, collectionName, data_list,ordered=True):
"""
向MongoDB中插入数据,data_list是具体数据 列表
:param dbName:
:param collectionName:
:param data_list:
:param ordered: 是否忽略insert error
:return:
"""
if not isinstance(data_list,list):
self.writeLog(text.DATA_INSERT_FAILED)
return
try:
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
collection.insert_many(data_list, ordered = ordered)
else:
self.writeLog(text.DATA_INSERT_FAILED)
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
self.dbConnect()
except AutoReconnect as ex:
self.writeError(u'数据库连接断开重连:{}'.format(str(ex)))
time.sleep(1)
except ConnectionFailure:
self.dbClient = None
self.writeError(u'数据库连接断开')
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
self.dbConnect()
except Exception as ex:
self.writeError(u'dbInsertMany exception:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING):
"""从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针"""
try:
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
if sortKey:
cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序
else:
cursor = collection.find(d)
if cursor:
return list(cursor)
else:
return []
else:
self.writeLog(text.DATA_QUERY_FAILED)
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
self.dbConnect()
except AutoReconnect as ex:
self.writeError(u'数据库连接断开重连:{}'.format(str(ex)))
time.sleep(1)
except ConnectionFailure:
self.dbClient = None
self.writeError(u'数据库连接断开')
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
self.dbConnect()
except Exception as ex:
self.writeError(u'dbQuery exception:{}'.format(str(ex)))
return []
def dbQueryBySort(self, dbName, collectionName, d, sortName, sortType, limitNum=0):
"""从MongoDB中读取数据,d是查询要求,sortName是排序的字段,sortType是排序类型
返回的是数据库查询的指针"""
try:
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
if limitNum > 0:
cursor = collection.find(d).sort(sortName, sortType).limit(limitNum)
else:
cursor = collection.find(d).sort(sortName, sortType)
if cursor:
return list(cursor)
else:
return []
else:
self.writeLog(text.DATA_QUERY_FAILED)
if self.db_has_connected:
self.writeLog(u'重新尝试连接数据库')
| |
is not None:
oprot.writeFieldBegin('start_process', TType.BOOL, 1)
oprot.writeBool(self.start_process)
oprot.writeFieldEnd()
if self.cli_args is not None:
oprot.writeFieldBegin('cli_args', TType.STRUCT, 2)
self.cli_args.write(oprot)
oprot.writeFieldEnd()
if self.server_path is not None:
oprot.writeFieldBegin('server_path', TType.STRING, 3)
oprot.writeString(self.server_path.encode('utf-8') if sys.version_info[0] == 2 else self.server_path)
oprot.writeFieldEnd()
if self.log_path is not None:
oprot.writeFieldBegin('log_path', TType.STRING, 4)
oprot.writeString(self.log_path.encode('utf-8') if sys.version_info[0] == 2 else self.log_path)
oprot.writeFieldEnd()
if self.check_process_delay is not None:
oprot.writeFieldBegin('check_process_delay', TType.BYTE, 5)
oprot.writeByte(self.check_process_delay)
oprot.writeFieldEnd()
if self.max_status_poll_tries is not None:
oprot.writeFieldBegin('max_status_poll_tries', TType.BYTE, 6)
oprot.writeByte(self.max_status_poll_tries)
oprot.writeFieldEnd()
if self.status_poll_interval is not None:
oprot.writeFieldBegin('status_poll_interval', TType.I16, 7)
oprot.writeI16(self.status_poll_interval)
oprot.writeFieldEnd()
if self.process_create_timeout is not None:
oprot.writeFieldBegin('process_create_timeout', TType.I32, 8)
oprot.writeI32(self.process_create_timeout)
oprot.writeFieldEnd()
if self.timeout_options is not None:
oprot.writeFieldBegin('timeout_options', TType.STRUCT, 9)
self.timeout_options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.start_process is None:
raise TProtocolException(message='Required field start_process is unset!')
if self.cli_args is None:
raise TProtocolException(message='Required field cli_args is unset!')
if self.log_path is None:
raise TProtocolException(message='Required field log_path is unset!')
if self.check_process_delay is None:
raise TProtocolException(message='Required field check_process_delay is unset!')
if self.max_status_poll_tries is None:
raise TProtocolException(message='Required field max_status_poll_tries is unset!')
if self.status_poll_interval is None:
raise TProtocolException(message='Required field status_poll_interval is unset!')
if self.process_create_timeout is None:
raise TProtocolException(message='Required field process_create_timeout is unset!')
if self.timeout_options is None:
raise TProtocolException(message='Required field timeout_options is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DesiredCapabilities(object):
"""
Attributes:
- browserName
"""
def __init__(self, browserName=None,):
self.browserName = browserName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.browserName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DesiredCapabilities')
if self.browserName is not None:
oprot.writeFieldBegin('browserName', TType.STRING, 1)
oprot.writeString(self.browserName.encode('utf-8') if sys.version_info[0] == 2 else self.browserName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.browserName is None:
raise TProtocolException(message='Required field browserName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Main(object):
"""
Attributes:
- custom_commands_path
- custom_assertions_path
- page_objects_path
- globals_path
- globals
- dotenv
- persist_globals
- output_folder
- src_folders
- live_output
- disable_colors
- parallel_process_delay
- selenium
- start_session
- end_session_on_fail
- test_workers
- test_runner
- webdriver
- test_settings
- launch_url
- silent
- output
- detailed_output
- output_timestamp
- disable_error_log
- screenshots
- log_screenshot_data
- desiredCapabilities
- exclude
- filter
- skipgroup
- sync_test_names
- skiptags
- use_xpath
- parallel_mode
- report_prefix
- unit_tests_mode
- default_reporter
"""
def __init__(self, custom_commands_path=None, custom_assertions_path=None, page_objects_path=None, globals_path=None, globals=None, dotenv=None, persist_globals=None, output_folder=None, src_folders=None, live_output=None, disable_colors=None, parallel_process_delay=None, selenium=None, start_session=None, end_session_on_fail=None, test_workers=None, test_runner=None, webdriver=None, test_settings=None, launch_url=None, silent=None, output=None, detailed_output=None, output_timestamp=None, disable_error_log=None, screenshots=None, log_screenshot_data=None, desiredCapabilities=None, exclude=None, filter=None, skipgroup=None, sync_test_names=None, skiptags=None, use_xpath=None, parallel_mode=None, report_prefix=None, unit_tests_mode=None, default_reporter=None,):
self.custom_commands_path = custom_commands_path
self.custom_assertions_path = custom_assertions_path
self.page_objects_path = page_objects_path
self.globals_path = globals_path
self.globals = globals
self.dotenv = dotenv
self.persist_globals = persist_globals
self.output_folder = output_folder
self.src_folders = src_folders
self.live_output = live_output
self.disable_colors = disable_colors
self.parallel_process_delay = parallel_process_delay
self.selenium = selenium
self.start_session = start_session
self.end_session_on_fail = end_session_on_fail
self.test_workers = test_workers
self.test_runner = test_runner
self.webdriver = webdriver
self.test_settings = test_settings
self.launch_url = launch_url
self.silent = silent
self.output = output
self.detailed_output = detailed_output
self.output_timestamp = output_timestamp
self.disable_error_log = disable_error_log
self.screenshots = screenshots
self.log_screenshot_data = log_screenshot_data
self.desiredCapabilities = desiredCapabilities
self.exclude = exclude
self.filter = filter
self.skipgroup = skipgroup
self.sync_test_names = sync_test_names
self.skiptags = skiptags
self.use_xpath = use_xpath
self.parallel_mode = parallel_mode
self.report_prefix = report_prefix
self.unit_tests_mode = unit_tests_mode
self.default_reporter = default_reporter
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.custom_commands_path = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.custom_assertions_path = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.page_objects_path = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.globals_path = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.globals = Globals()
self.globals.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.dotenv = Empty()
self.dotenv.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.persist_globals = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.output_folder = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.src_folders = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.live_output = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.disable_colors = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BYTE:
self.parallel_process_delay = iprot.readByte()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRUCT:
self.selenium = Selenium()
self.selenium.read(iprot)
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.BOOL:
self.start_session = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.BOOL:
self.end_session_on_fail = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.BOOL:
self.test_workers = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.STRING:
self.test_runner = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.STRUCT:
self.webdriver = WebDriver()
self.webdriver.read(iprot)
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.STRUCT:
self.test_settings = Empty()
self.test_settings.read(iprot)
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.STRING:
self.launch_url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.BOOL:
self.silent = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 22:
if ftype == TType.BOOL:
self.output = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 23:
if ftype == TType.BOOL:
self.detailed_output = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 24:
if ftype == TType.BOOL:
self.output_timestamp = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 25:
if ftype == TType.BOOL:
self.disable_error_log = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 26:
if ftype == TType.BOOL:
self.screenshots = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 27:
if ftype == TType.BOOL:
self.log_screenshot_data = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 28:
if ftype == TType.STRUCT:
self.desiredCapabilities = DesiredCapabilities()
self.desiredCapabilities.read(iprot)
else:
iprot.skip(ftype)
elif fid == 29:
if ftype == TType.STRING:
self.exclude = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 30:
if ftype == TType.STRING:
self.filter = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 31:
if ftype == TType.STRING:
self.skipgroup = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 32:
if ftype == TType.BOOL:
self.sync_test_names = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 33:
if ftype == TType.STRING:
self.skiptags = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 34:
if ftype == TType.BOOL:
self.use_xpath = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 35:
if ftype == TType.BOOL:
self.parallel_mode = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 36:
if ftype == TType.STRING:
self.report_prefix = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 37:
if ftype == TType.BOOL:
self.unit_tests_mode = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 38:
if ftype == TType.STRING:
self.default_reporter = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Main')
if self.custom_commands_path is not None:
oprot.writeFieldBegin('custom_commands_path', TType.STRING, 1)
oprot.writeString(self.custom_commands_path.encode('utf-8') if sys.version_info[0] == 2 else | |
= ogr.Open(shapefile)
lyr = shp.GetLayer()
featList = list(range(lyr.GetFeatureCount()))
feat = lyr.GetFeature(featList[0])
geom = feat.GetGeometryRef()
return geom.GetGeometryName()
def compute_area(shapefile):
shp = ogr.Open(shapefile)
lyr = shp.GetLayer()
featList = list(range(lyr.GetFeatureCount()))
area_list = []
#perim_list = []
for FID in featList:
feat = lyr.GetFeature(FID)
# Get extent of feat
geom = feat.GetGeometryRef()
area_list.append(geom.GetArea())
#perim_list.append(geom.Length())
return area_list#, perim_list
def xy_poly_coords(shapefile,skipFactor = 1):
shp = ogr.Open(shapefile)
lyr = shp.GetLayer()
featList = list(range(lyr.GetFeatureCount()))
ftl = []
area_list = []
#statDict = {}
out_list = []
for FID in featList:
## print 'Extracting fid', FID
feat = lyr.GetFeature(FID)
# Get extent of feat
geom = feat.GetGeometryRef()
## area_list.append(geom.GetArea())
## print FID,geom.GetGeometryName()
olt = []
if (geom.GetGeometryName() == 'MULTIPOLYGON'):
ftl.append('MULTIPOLYGON')
count = 0
#pointsX = []; pointsY = []
for polygon in geom:
oltt = []
geomInner = geom.GetGeometryRef(count)
ring = geomInner.GetGeometryRef(0)
numpoints = ring.GetPointCount()
for p in range(numpoints):
lon, lat, z = ring.GetPoint(p)
#pointsX.append(lon)
#pointsY.append(lat)
oltt.append([lon,lat])
olt.append(oltt)
count += 1
elif (geom.GetGeometryName() == 'POLYGON'):
ftl.append('POLYGON')
ring = geom.GetGeometryRef(0)
numpoints = ring.GetPointCount()
#pointsX = []; pointsY = []
pts = list(range(0,numpoints,skipFactor))
if len(pts) < 25:
pts = list(range(0,numpoints))
for p in pts:
lon, lat, z = ring.GetPoint(p)
olt.append([lon,lat])
#pointsX.append(lon)
#pointsY.append(lat)
out_list.append(olt)
## print 'Area list', area_list
return out_list, ftl
#######################################################
#Takes a point shapefile and creates a plot kml and shp with radius specified
def point_shp_to_plot_kml(shp,kml,radius,idField = 'plotid'):
proj4 = shape_info(shp)['proj4']
xys = xy_coords(shp, write_csv = False)
ids = multiple_field_extraction(shp,[idField])
## ids = map(lambda i: i[0],ids)
print(ids)
out=[]
for i in range(0,len(xys)):
id= ids[i]
xy = xys[i]
x = xy[0]
y = xy[1]
t = [[x-radius,y-radius],
[x-radius,y+radius],
[x+radius,y+radius],
[x+radius,y-radius]]
out.append(t)
tShp = os.path.splitext(kml)[0] + '_s.shp'
if os.path.exists(kml) == False:
list_to_polygon_shapefile(out, tShp, proj4)
shape_to_kml(tShp, kml,idField)
##################################
##shapefile = r'R:\NAFD3\timesync_setup\test_sampled_new_sample3\p035r032_1999_2009_union_lfd_use_sampled.shp'
##ftl,xyss = xy_poly_coords(shapefile)
##i = 0
##for xys in xyss:
##
## print ftl[i],numpy.shape(numpy.array(xys)), len(xys),numpy.array(xys).ndim
## i += 1
##print len(xyss)
## olt = []
## geom = feat.GetGeometryRef()
## ring = geom.GetGeometryRef(0)
## points = ring.GetPointCount()
## for p in xrange(points):
## x,y,z = ring.GetPoint(p)
## olt.append([x,y])
## out_list.append(olt)
##
## return out_list
## shapeData = ogr.Open(shapefile)
## layer = shapeData.GetLayer()
## i = 1
## out_list = []
## for feat in layer:
## olt = []
## geom = feat.GetGeometryRef()
## ring = geom.GetGeometryRef(0)
## points = ring.GetPointCount()
## for p in xrange(points):
## x,y,z = ring.GetPoint(p)
## olt.append([x,y])
## out_list.append(olt)
##
## return out_list
def get_coords(shapefile):
ft= get_feature_type(shapefile)
if ft == 'POINT':
xys = xy_coords(shapefile, False)
xyss = xys
polygon = False
ftl = ['POINT'] * len(xys)
else:
polygon = True
xyss, ftl = xy_poly_coords(shapefile)
return xyss,polygon, ftl
def coords_to_wkt(in_coords, geom_type):
wkt = 'POLYGON '#geom_type
for fid in range(len(in_coords)):
wkt += '('
polys = in_coords[fid]
pi = 1
for poly in polys[-3:-1]:
print('Converting poly no:', pi)
wkt += '('
for xy in poly:
wkt += str(xy[0]) + ' ' + str(xy[1]) + ','
#print wkt
wkt = wkt[:-1] + ')'
pi += 1
wkt += ')'
return wkt
##coords, is_poly, geom_type = get_coords(poly)
##wkt = coords_to_wkt(coords,geom_type[0])
##
##pt_driver = ogr.GetDriverByName("ESRI Shapefile")
##pt_dataSource = pt_driver.Open(points, 0)
##pt_layer = pt_dataSource.GetLayer()
##print pt_layer.GetFeatureCount()
##pt_layer.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))
##print pt_layer.GetFeatureCount()
##
##
##poly_shp = ogr.Open(poly)
##lyr = poly_shp.GetLayer()
##
##pt_layer.SetSpatialFilter(lyr)
##pt_coords, types,ftl_pt = get_coords(points)
##print pt_coords
##shp= '//172.16.31.10/glrc_vct/NAFD3/timesync_setup/test_sampled_new_sample3/p035r032_1999_2009_union_lfd_use_sampled.shp'
##print get_coords(shp)
###get_feature_type(shp)
##raw_input()
def point_feature_to_array_location(xy,coords,res):
return proj_coord_to_array_coord(xy, coords, res)
def poly_feature_to_array_location(xyss,coords,res):
out_list = []
#for xys in xyss:
#print xys
out_listx = []
out_listy = []
for xy in xyss:
t= proj_coord_to_array_coord(xy, coords, res)
out_list.append(t)
## out_listx.append(t[0])
## out_listy.append(t[1])
## out_list.append([out_listx,out_listy])
out_list = transpose(out_list)
return out_list
def multi_poly_feature_to_array_location(xysss,coords,res):
out_list = []
for xyss in xysss:
out_list.append(poly_feature_to_array_location(xyss,coords,res))
return out_list
def coord_to_array_location(xyss, raster, ftl = ''):
info = raster_info(raster)
coords = info['coords']
res = info['res']
out_list = []
if ftl == '':
ftl = ['POINT'] * len(xyss)
i = 0
for xys in xyss:
ft= ftl[i]
if ft == 'POINT':
out_list.append(point_feature_to_array_location(xys,coords,res))
elif ft == 'POLYGON':
out_list.append(poly_feature_to_array_location(xys,coords,res))
elif ft == 'MULTIPOLYGON':
out_list.append(multi_poly_feature_to_array_location(xys,coords,res))
i += 1
return out_list
## if polygon:
##
## for xys in xyss:
##
## out_listx = []
## out_listy = []
## for xy in xys:
## t= proj_coord_to_array_coord(xy, coords, res)
##
##
## out_listx.append(t[0])
## out_listy.append(t[1])
## out_list.append([out_listx,out_listy])
##
## else:
##
## for xy in xyss:
## t= proj_coord_to_array_coord(xy, coords, res)
##
##
## out_list.append(t)
## return out_list
def proj_coord_to_array_location(shapefile, raster):
xyss, polygon, ftl = get_coords(shapefile)
array_xyss = coord_to_array_location(xyss,raster,ftl)
return xyss,array_xyss,polygon,ftl
##
##default_sample = 'R:/NAFD3/timesync_setup/test_sampled_new_sample3/p035r032_1999_2009_union_lfd_use_sampled_pts.shp'
##image = 'R:/NAFD3/timesync_setup/imagery/3532/refls/p35r32_2009cmp_ev_cloudfill.img'
##print proj_coord_to_array_location(default_sample,image)
##shp= 'R:/NAFD/Landtrendr/timesync2/test_data/sample/test_poly_sample.shp'
##shpp = 'R:/NAFD/Landtrendr/timesync2/test_data/sample/test_sample.shp'
##rast = 'R:/NAFD/Landtrendr/timesync2/test_data/images/p034r032_distbYear_flt_20.img'
###xy_poly_coords(shp)
##xyss = proj_coord_to_array_location(shp,rast)
##for xy in xyss:
## print xy
#Converts a raster to a shapefile using the FWTools gdal_polygonize.py script
def raster_to_shapefile(raster, output, overwrite = False, gdal_dir = gdal_dir):
statement = 'gdal_polygonize.py ' + raster + ' -f "ESRI Shapefile" ' + output + ' ' + os.path.splitext(os.path.basename(output))[0]
orig_dir = os.getcwd()
os.chdir(gdal_dir)
bat_filename = os.path.dirname(raster) + '/gdal_polygonize.bat'
open_bat = open(bat_filename, 'w')
open_bat.writelines(statement)
open_bat.close()
if os.path.exists(output) == False or overwrite == True:
call = subprocess.Popen(bat_filename)
call.wait()
os.chdir(orig_dir)
try:
os.remove(bat_filename)
except:
x = 1
#r = cwd +'test.jpg'
#shp = cwd + 'test.shp'
#raster_to_shapefile(r,shp)
########################################################################################################################
##############################################################
#Function to apply a mmu and convert categorized outputs to polygons for ease of use
def apply_mmu_and_convert_to_poly(in_raster, mmu):
#Set up names
in_raster_mmu = os.path.splitext(in_raster)[0] +'_mmu_' + str(mmu) + os.path.splitext(in_raster)[1]
in_raster_mmu_poly = os.path.splitext(in_raster_mmu)[0] + '_poly.shp'
#Apply mmu
if os.path.exists(in_raster_mmu) == False:
print('Creating', os.path.basename(in_raster_mmu))
gdal_sieve(in_raster, in_raster_mmu, mmu)
else:
print('Already created:',os.path.basename(in_raster_mmu))
#Convert to polygons using raster_to_shapefile function (built on gdal_polygonize)
if os.path.exists(in_raster_mmu_poly) == False:
print('Converting', os.path.basename(in_raster_mmu) , 'to polygons')
raster_to_shapefile(in_raster_mmu, in_raster_mmu_poly)
else:
print('Already created:',os.path.basename(in_raster_mmu_poly))
#Find the feature count, add the UNID field, and update the values
si = shape_info(in_raster_mmu_poly)
num_features = si['feature_count']
print('There are', num_features, 'features in', os.path.basename(in_raster_mmu_poly))
update_field(in_raster_mmu_poly,'UNID', list(range(1,num_features + 1)))
update_field(in_raster_mmu_poly, 'Area',compute_area(in_raster_mmu_poly))
#area = compute_area(in_raster_mmu_poly)
########################################################################################################################
#Converts a shapefile to a raster using the r package "raster" polygonize function
#Can provide an optional snap raster if available- otherwise conversion is performed with raster snapped to nearest coordinate from ulx,uly
def shapefile_to_raster(shapefile, output, snap_raster = '', resolution = '10'):
start = time.time()
resolution = str(resolution)
try:
r('library(raster)')
except:
r_library_installer(['raster'])
r('library(raster)')
if snap_raster == '':
layer = os.path.splitext(shapefile)[0].split('/')[-1]
try:
r('library(raster)')
except:
r_library_installer(['raster'])
r('library(raster)')
try:
r('library(rgdal)')
except:
r_library_installer(['rgdal'])
r('library(rgdal)')
r('shp = readOGR("' + shapefile + '", "' + layer + '")')
r('projection = OGRSpatialRef("' + shapefile + '", "' + layer + '")')
r('info = ogrInfo("' + shapefile + '", "' + layer + '")')
r('extent =as.matrix((bbox(shp)))')
r('print(extent[1,])')
r('xmaxmin = extent[1,]')
r('ymaxmin = extent[2,]')
r('ncol = as.numeric((xmaxmin[2] - xmaxmin[1])/' + resolution + ')')
r('nrow = as.numeric((ymaxmin[2] - ymaxmin[1])/' + resolution + ')')
r('ncol = ceiling(ncol)')
r('nrow = ceiling(nrow)')
r('print(ncol)')
r('print(nrow)')
r('dtype = "INT1U"')
r('extent[1,2] = extent[1,1] + (ncol * ' + resolution + ')')
r('extent[2,2] = extent[2,1] + (nrow * ' + resolution + ')')
r('resolution = '+resolution)
else:
try:
r('temp = raster("' + snap_raster + '")')
except:
r('library(raster)')
r('temp = raster("' + snap_raster + '")')
r('projection = projection(temp)')
r('extent = extent(temp)')
r('ncol = ncol(temp)')
r('nrow = nrow(temp)')
r('dtype = dataType(temp)')
r('r = raster(ncol = ncol, nrow = nrow)')
r('projection(r) = projection')
r('extent(r) = extent')
r('r[] = 0')
r('print(r)')
try:
r('poly = readShapePoly("'+shapefile+'", proj4string = CRS(projection(r)), delete_null_obj = TRUE)')
except:
try:
r('library(maptools)')
except:
r_library_installer(['maptools'])
r('library(maptools)')
r('poly = readShapePoly("'+shapefile+'", proj4string = CRS(projection(r)), delete_null_obj = TRUE)')
r('r = rasterize(poly, r, progress = "text", na.rm = TRUE, update = TRUE)')
r('writeRaster(r, "'+output+'", datatype = dtype, format = "HFA", overwrite = TRUE)')
end = time.time()
elapsed_time = end - start
print("Took: ", elapsed_time, "seconds to complete")
##############################################################################################
def new_shapefile_to_raster(in_shp,out_rast, res = 30, no_data = ''):
try:
out_format = format_dict[os.path.splitext(out_rast)[1]]
except:
out_format = 'HFA'
# Open the data source and read in the extent
source_ds = ogr.Open(in_shp)
source_layer = source_ds.GetLayer()
source_srs = source_layer.GetSpatialRef()
x_min, x_max, y_min, y_max = source_layer.GetExtent()
# Create the destination data source
x_res = int((x_max - x_min) / res)
y_res = int((y_max - y_min) / res)
print('Initializing output raster:', out_rast)
target_ds = gdal.GetDriverByName(out_format).Create(out_rast, x_res, y_res, gdal.GDT_Byte)
target_ds.SetGeoTransform((x_min, res, 0, y_max, 0, -res))
#target_ds.SetProjection(projection)
target_ds.SetProjection(source_srs.ExportToWkt())
band = target_ds.GetRasterBand(1)
if no_data != '' and no_data != None:
band.SetNoDataValue(int(no_data))
gdal.RasterizeLayer(target_ds, [1], source_layer, None, None, [1], ['ALL_TOUCHED=TRUE'])#burn_values=[0])
print('Closing raster')
def grow_raster(in_raster, out_raster,no_pixels = 100):
ri = raster_info(in_raster)
coords = ri['coords']
res = ri['res']
offset = res * no_pixels
out_width = ri['width'] + 2*no_pixels
out_height = ri['height'] + 2*no_pixels
out_coords = [coords[0] - offset,coords[1] - offset,coords[2] + offset, coords[3] + offset]
out_transform = [out_coords[0], res, 0, out_coords[-1], 0, -res]
ti = tiled_image(out_raster, template_image = '', width = out_width, height = out_height, bands = ri['bands'], dt = ri['dt'], transform = out_transform, projection = ri['projection'],out_no_data = ri['no_data'])
ti.add_tile(brick(in_raster),no_pixels,no_pixels)
ti.rm()
##############################################################################################
#Will merge a list of shapefiles to a single shapefile using ogr2ogr.exe
#All shapefiles must be of the | |
number of intervals (numAPs - 1) to get mean ISI
inter_spike_interval = time_diff/(numAPs-1)
return inter_spike_interval
def absmax(i):
"""
Returns the largest absolute value present in an array in its raw form
(e.g. in [-2, 0, 1] it returns -2, in [-2,0,3] it returns 3.)
"""
# Use the absolute largest value in its raw form
if max(i) > abs(min(i)):
return max(i)
elif abs(min(i)) >= max(i):
return min(i)
else:
raise ValueError()
# ---- Calculating biomarkers over multiple traces ----
def calculate_rmp(traces):
RMPVals = []
for i,v in enumerate(traces['v']):
RMPValue, RMPIdx = RMP(v)
RMPVals.append(RMPValue)
return RMPVals
CalculateRMP = calculate_rmp # Alias
def calculate_input_res():
input_res_vals = []
for i,v in enumerate(traces['v']):
input_res_vals.append(input_res(v))
return input_res_vals
CalculateInputRes = calculate_input_res # Alias
def calculate_ramp_ap():
"""
Can't remember what this biomarker was supposed to do?
We just run ramp simulations and calculate biomarkers on those now.
"""
# TODO
return 0
CalculateRampAP = calculate_ramp_ap # Alias
def calculate_rheobase(cell_model, amp_step=0.1, amp_max=5., make_plot=False, sim_kwargs=None, search='simple'):
" Run a series of simulations to calculate rheobase"
" Rheobase is defined as the threshold current for an infinite duration pulse "
" We'll try 2 seconds "
# Fill out sim_kwargs with defaults if needed
if sim_kwargs is None:
sim_kwargs = {}
default_kwargs = {'dur':500., 'delay':1000., 'interval':0., 'num_stims':1, 't_stop':1500.,
'mechanisms':None, 'make_plot':False, 'plot_type':'default', 'model':cell_model}
for kwarg in default_kwargs.keys():
if kwarg in sim_kwargs.keys():
pass
else:
sim_kwargs[kwarg] = default_kwargs[kwarg]
def rheobase_simulation(amp):
# Returns simulation amplitude if an AP is found, otherwise returns RHEO_FAIL if no APs found
sim_kwargs['amp'] = amp
output = sh.simulation(**sim_kwargs)
t = output['t']; v = output['v'];
# Look for an AP, after throwing away the delay period, leave a 1 ms run up to catch the start
run_up = 1.
delay = sim_kwargs['delay']
stim_period_indices = (t >= (delay-run_up)) # TODO - why did I put tuple here earlier?
t = t[stim_period_indices]
v = v[stim_period_indices]
traces = split_trace_into_aps(t,v,threshold=0.,time_threshold=5.)
if traces['numAPs'] > 0: # rheobase found
if make_plot:
plot_traces(traces)
rheobase = amp
return rheobase
else:
return RHEO_FAIL
amp_min = 0.
amps = np.arange(amp_min, amp_max, amp_step) # (nA)
# Two search modes
# 1. simple starts from amp_min and works up until it finds an AP
# 2. divide starts from the middle and does a binary search
# simple should be quicker when rheobase is usually very low and very few models have no rheobase
# divide should be quicker if rheobase is distributed any other way
if search == 'simple':
for amp in amps:
rheobase = rheobase_simulation(amp)
if rheobase is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
return rheobase
return RHEO_FAIL
elif search == 'divide':
# Divide and conquer algorithm using a binary search
idx0 = 0
idxn = len(amps) - 1
rheobases = np.empty(len(amps))
rheobases[:] = None
while idx0 <= idxn:
midval = (idx0 + idxn)// 2
rheobase = rheobase_simulations(amps[midval])
rheobases[midval] = rheobase
if rheobase is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
if midval == 0:
# Rheobase is minimum
return amps[0]
elif rheobases[midval-1] is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
# Found minimal amp for an AP - return rheobase
return amps[midval]
else:
# AP found but not definitely lowest amp so lower idxn
idxn = midval - 1
elif rheobase is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
if midval == (len(amps) - 1):
# No rheobase for highest amp
return RHEO_FAIL
elif isinstance(rheobases[midval+1], float):
# We've found highest amp with no AP, so one up is rheobase
return amps[midval+1]
else:
# No AP found but not definitely highest amp so raise idx0
idx0 = midval + 1
else:
raise Exception('Rheobase not accepted value.' )
raise Exception('No rheobase found')
elif search == 'smart':
# Simple search but after first two searches upwards we check the max value to check for
# no rheobase. If the first 5? searches fail we switch to binary.
# TODO
pass
CalculateRheobase = calculate_rheobase # Alias for compatibility
def calculate_threshold(traces, dvdt_threshold=5.):
thresholds = []
for t,v in zip(traces['t'], traces['v']):
thresholds.append(threshold(t, v, dvdt_threshold=dvdt_threshold,))
return thresholds
def calculate_ap_peak(traces):
ap_peak_vals = []
for _,v in zip(range(len(traces['t'])),traces['v']):
ap_peak_vals.append(ap_peak(v)[0])
return ap_peak_vals
CalculateAPPeak = calculate_ap_peak # Alias
def calculate_ap_rise_time(traces,dvdtthreshold=5.):
ap_rise_time_vals = []
for t,v in zip(traces['t'],traces['v']):
ap_rise_time_vals.append(ap_rise_time(t,v,dvdtthreshold))
return ap_rise_time_vals
CalculateAPRiseTime = calculate_ap_rise_time # Alias
def calculate_ap_slope_min_max(traces):
ap_slope_min_vals = []
ap_slope_max_vals = []
for t,v in zip(traces['t'],traces['v']):
dvdt = np.gradient(v,t)
ap_slope_min_vals.append(min(dvdt))
ap_slope_max_vals.append(max(dvdt))
return ap_slope_min_vals, ap_slope_max_vals
CalculateAPSlopeMinMax = calculate_ap_slope_min_max # Alias
def calculate_ap_width(traces, alpha, threshold=0, method='voltage'):
ap_width_vals = []
for t,v in zip(traces['t'],traces['v']):
ap_width_vals.append(ap_width(t,v,alpha,threshold,method))
return ap_width_vals
def calculate_ap_half_width(traces, threshold=0, method='voltage'):
alpha = 0.5
ap_half_width_vals = calculate_ap_width(traces,alpha,threshold,method)
return ap_half_width_vals
def calculate_ap_full_width(traces,threshold=0, method='voltage'):
alpha = 0.0 # Calculate at the threshold so set alpha = 0
ap_full_width_vals = calculate_ap_width(traces,alpha,threshold,method)
return ap_full_width_vals
CalculateAPFullWidth = calculate_ap_full_width # Alias
def calculate_ahp_amp(traces,dvdt_threshold=5):
ahp_amp_vals = []
if traces['numAPs'] > 1:
for i in range(traces['numAPs']-1):
t = traces['t'][i]
v = traces['v'][i]
t2 = traces['t'][i+1]
v2 = traces['v'][i+1]
amp, tau, trough = fit_afterhyperpolarization(t,v,t2,v2,dvdt_threshold)
AHPAmpVals.append(amp)
elif traces['numAPs'] == 1:
v = traces['v'][0]
max_idx = np.argmax(v)
working_voltage = v[max_idx:]### join
amp = min(working_voltage)
ahp_amp_vals.append(amp)
return ahp_amp_vals
CalculateAHPAmp = calculate_ahp_amp # Alias
def calculate_ahp_tau():
# TODO
return 0
CalculateAHPTau = calculate_ahp_tau # Alias
# -- Firing Patterns --
# See Balachandar and Prescott 2018 for algorithms
# TODO: Find algorithms for phasic and burst patterns
def determine_firing_pattern(traces, stim_start, stim_end):
"""
Define firing pattern of traces as one or more of n types:
1. Reluctant
2. Single
3. Tonic
4. Delayed
5. Gap
6. Phasic - multi-AP firing that ends before end of stimulus
7. Burst firing
8. Wide
9. Repolarisation failure
"""
def first_spike_delay(traces, stim_start):
# Find delay between stim start and first spike
first_spike_v = traces['v'][0]
first_spike_t = traces['t'][0]
single_spike_index = ap_peak(first_spike_v)[1]
single_spike_time = first_spike_t[single_spike_index]
delay = single_spike_time - stim_start
#print("delay = {}".format(delay))
return delay
def first_two_spikes_isi(traces):
# Find delay between first and second spikes
spike_times = []
for i in [0,1]:
spike_idx = ap_peak(traces['v'][i])[1]
spike_times.append(traces['t'][i][spike_idx])
delay = spike_times[1] - spike_times[0]
return delay
def second_third_spikes_isi(traces):
# Find delay between second and third spikes
spike_times = []
for i in [1,2]:
spike_idx = ap_peak(traces['v'][i])[1]
spike_times.append(traces['t'][i][spike_idx])
delay = spike_times[1] - spike_times[0]
return delay
def check_delayed(traces):
# Check if firing pattern is delayed
delayed = False
num_aps = traces['numAPs']
# Delayed firing pattern criterion for 1 spike:
# Delay from stim start to first spike is > 100 ms
if num_aps == 1:
if first_spike_delay(traces, stim_start) > 100.0:
delayed = True
# Delayed firing pattern criterion for > 1 spike:
# Delay between stimulus start and firing first spike is > 1.5
# times the ISI between spikes 1 and 2.
elif num_aps > 1:
if first_spike_delay(traces, stim_start) > 1.5*first_two_spikes_isi(traces):
delayed = True
return delayed
def check_gap(traces):
gap = False
num_aps = traces['numAPs']
# Gap firing criteria:
# Number of spikes > 2
# ISI between spikes 1 and 2 > 1.5 times ISI between spikes 2 and 3
gap = False
if num_aps > 2:
if first_two_spikes_isi(traces) > 1.5*second_third_spikes_isi(traces):
gap = True
return gap
def check_phasic(traces, stim_end, ratio_threshold=0.25):
"""
Phasic - firing of multiple APs followed by a period of quiescence.
Cases
1. Idea is use ratio of - Time from last spike to stimulus end:time from first to last spike
If the ratio is above some threshold.
2. Simply time from last peak to end of stimulus compared to a threshold.
"""
phasic = False
# Characterisation cases
case1 = True
case2 = False
# First, check we have multiple APs
# We will class single spikes as single spikes, not phasic.
num_aps = traces['numAPs']
if num_aps < 2:
return False
spike_times = []
for i in range(num_aps):
spike_idx = ap_peak(traces['v'][i])[1]
spike_times.append(traces['t'][i][spike_idx])
# Case 1
if case1:
last_spike_to_stim_end = stim_end - spike_times[-1]
# check stimulus ended before last spike, if not can't be | |
''' main '''
import argparse
import json
import logging
import os
import pkgutil
import re
import sys
import time
import uuid
from httplib import HTTPConnection
from ..common import status
from heron.common.src.python.utils import log
# The location of default configure file
DEFAULT_TEST_CONF_FILE = "integration_test/src/python/test_runner/resources/test.json"
RETRY_ATTEMPTS = 15
#seconds
RETRY_INTERVAL = 10
class FileBasedExpectedResultsHandler(object):
def __init__(self, file_path):
self.file_path = file_path
def fetch_results(self):
# Read expected result from the expected result file
try:
if not os.path.exists(self.file_path):
raise status.TestFailure("Expected results file %s does not exist" % self.file_path)
else:
with open(self.file_path, "r") as expected_result_file:
return expected_result_file.read().rstrip()
except Exception as e:
raise status.TestFailure("Failed to read expected result file %s" % self.file_path, e)
class HttpBasedExpectedResultsHandler(object):
def __init__(self, server_host_port, topology_name, task_count):
self.server_host_port = server_host_port
self.topology_name = topology_name
self.task_count = task_count
# pylint: disable=unnecessary-lambda
def fetch_results(self):
try:
result = []
decoder = json.JSONDecoder(strict=False)
for i in range(0, self.task_count):
task_result = fetch_from_server(self.server_host_port, self.topology_name,
'expected results',
'/state/%s_tuples_emitted_%d' % (self.topology_name, i))
json_result = decoder.decode(task_result)
logging.info("Found %d tuples emitted from spout task %d", len(json_result), i)
result = result + json_result
if len(result) == 0:
raise status.TestFailure(
"Expected result set is empty for topology %s" % self.topology_name)
# need to convert from a list of json objects to a string of a python list,
# without the unicode using double quotes, not single quotes.
return str(map(lambda x: str(x), result)).replace("'", '"')
except Exception as e:
raise status.TestFailure(
"Fetching expected result failed for %s topology" % self.topology_name, e)
class HttpBasedActualResultsHandler(object):
def __init__(self, server_host_port, topology_name):
self.server_host_port = server_host_port
self.topology_name = topology_name
def fetch_results(self):
try:
return fetch_from_server(self.server_host_port, self.topology_name,
'results', '/results/%s' % self.topology_name)
except Exception as e:
raise status.TestFailure("Fetching result failed for %s topology" % self.topology_name, e)
# pylint: disable=unnecessary-lambda
class ExactlyOnceResultsChecker(object):
"""Compares what results we found against what was expected. Verifies and exact match"""
def __init__(self, topology_name, expected_results_handler, actual_results_handler):
self.topology_name = topology_name
self.expected_results_handler = expected_results_handler
self.actual_results_handler = actual_results_handler
def check_results(self):
""" Checks the topology results from the server with the expected results from the file """
actual_result = self.actual_results_handler.fetch_results()
expected_result = self.expected_results_handler.fetch_results()
# Build a new instance of json decoder since the default one could not ignore "\n"
decoder = json.JSONDecoder(strict=False)
# The Heron doesn't guarantee the order of messages in any case, so we should sort the result.
# Notice: here we treat every data unique even they are the same,
# since we could judge here whether two messages are duplicates or not.
# User may deal with emit message along with MESSAGE_ID or remove duplicates in topology.
actual_results = sorted(decoder.decode(actual_result))
expected_results = sorted(decoder.decode(expected_result))
return self._compare(expected_results, actual_results)
def _compare(self, expected_results, actual_results):
# Compare the actual and expected result
if actual_results == expected_results:
return status.TestSuccess(
"Topology %s result matches expected result: %s expected tuples found exactly once" %
(len(expected_results), self.topology_name))
else:
failure = status.TestFailure("Actual result did not match expected result")
# lambda required below to remove the unicode 'u' from the output
logging.info("Actual result ---------- \n" + str(map(lambda x: str(x), actual_results)))
logging.info("Expected result ---------- \n" + str(map(lambda x: str(x), expected_results)))
raise failure
class AtLeastOnceResultsChecker(ExactlyOnceResultsChecker):
"""Compares what results we found against what was expected. Verifies and exact match"""
def _compare(self, expected_results, actual_results):
expected_counts = _frequency_dict(expected_results)
actual_counts = _frequency_dict(actual_results)
missed_counts = {}
for expected_value in expected_counts:
expected_count = expected_counts[expected_value]
if expected_value in actual_counts:
actual_count = actual_counts[expected_value]
if actual_count < expected_count:
missed_counts[expected_value] = expected_count
else:
missed_counts[expected_value] = expected_count
if len(missed_counts) == 0:
return status.TestSuccess(
"Topology %s result matches expected result: %s expected tuples found at least once" %
(self.topology_name, len(expected_counts)))
else:
failure = status.TestFailure("Actual result did not match expected result")
# lambda required below to remove the unicode 'u' from the output
logging.info("Actual value frequencies ---------- \n" + ', '.join(
map(lambda (k, v): "%s(%s)" % (str(k), v), actual_counts.iteritems())))
logging.info("Expected value frequencies ---------- \n" + ', '.join(
map(lambda (k, v): "%s(%s)" % (str(k), v), expected_counts.iteritems())))
raise failure
def _frequency_dict(values):
frequency = {}
for value in values:
count = 0
if value in frequency:
count = frequency[value]
frequency[value] = count + 1
return frequency
def run_test(topology_name, classpath, results_checker,
params, http_server_host_port, update_args, extra_topology_args):
''' Runs the test for one topology '''
#submit topology
try:
args = "-r http://%s/results -t %s %s" %\
(http_server_host_port, topology_name, extra_topology_args)
submit_topology(params.heron_cli_path, params.cli_config_path, params.cluster, params.role,
params.env, params.tests_bin_path, classpath,
params.release_package_uri, args)
except Exception as e:
raise status.TestFailure("Failed to submit %s topology" % topology_name, e)
logging.info("Successfully submitted %s topology", topology_name)
try:
if update_args:
# wait for the topology to be started before triggering an update
poll_state_server(http_server_host_port, topology_name, "topology_started")
logging.info("Verified topology successfully started, proceeding to update it")
update_topology(params.heron_cli_path, params.cli_config_path, params.cluster,
params.role, params.env, topology_name, update_args)
# update state server to trigger more emits from the topology
logging.info("Topology successfully updated, updating state server")
update_state_server(http_server_host_port, topology_name, "topology_updated", "true")
return results_checker.check_results()
except Exception as e:
raise status.TestFailure("Checking result failed for %s topology" % topology_name, e)
finally:
kill_topology(params.heron_cli_path, params.cli_config_path, params.cluster,
params.role, params.env, topology_name)
def poll_state_server(server_host_port, topology_name, key):
return fetch_from_server(
server_host_port, topology_name, key, '/state/%s_%s' % (topology_name, key))
def update_state_server(http_server_host_port, topology_name, key, value):
connection = HTTPConnection(http_server_host_port)
connection.request('POST', '/state/%s_%s' % (topology_name, key), '"%s"' % value)
response = connection.getresponse()
return response.status == 200
def fetch_from_server(server_host_port, topology_name, data_name, path):
''' Make a http get request to fetch actual results from http server '''
for i in range(0, RETRY_ATTEMPTS):
logging.info("Fetching %s for topology %s, retry count: %d", data_name, topology_name, i)
response = get_http_response(server_host_port, path)
if response.status == 200:
return response.read()
elif i != RETRY_ATTEMPTS:
logging.info("Fetching %s failed with status: %s; reason: %s; body: %s",
data_name, response.status, response.reason, response.read())
time.sleep(RETRY_INTERVAL)
raise status.TestFailure("Failed to fetch %s after %d attempts" % (data_name, RETRY_ATTEMPTS))
def get_http_response(server_host_port, path):
''' get HTTP response '''
for _ in range(0, RETRY_ATTEMPTS):
try:
connection = HTTPConnection(server_host_port)
connection.request('GET', path)
response = connection.getresponse()
return response
except Exception:
time.sleep(RETRY_INTERVAL)
continue
raise status.TestFailure("Failed to get HTTP Response after %d attempts" % RETRY_ATTEMPTS)
def cluster_token(cluster, role, env):
if cluster == "local":
return cluster
return "%s/%s/%s" % (cluster, role, env)
def submit_topology(heron_cli_path, cli_config_path, cluster, role,
env, jar_path, classpath, pkg_uri, args=None):
''' Submit topology using heron-cli '''
# Form the command to submit a topology.
# Note the single quote around the arg for heron.package.core.uri.
# This is needed to prevent shell expansion.
cmd = "%s submit --config-path=%s %s %s %s %s" %\
(heron_cli_path, cli_config_path, cluster_token(cluster, role, env),
jar_path, classpath, args)
if pkg_uri is not None:
cmd = "%s --config-property heron.package.core.uri='%s'" %(cmd, pkg_uri)
logging.info("Submitting topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Unable to submit the topology")
def kill_topology(heron_cli_path, cli_config_path, cluster, role, env, topology_name):
''' Kill a topology using heron-cli '''
cmd = "%s kill --config-path=%s %s %s" %\
(heron_cli_path, cli_config_path, cluster_token(cluster, role, env), topology_name)
logging.info("Killing topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Failed to kill topology %s" % topology_name)
logging.info("Successfully killed topology %s", topology_name)
def update_topology(heron_cli_path, cli_config_path, cluster,
role, env, topology_name, update_args):
cmd = "%s update --config-path=%s %s %s %s --verbose" %\
(heron_cli_path, cli_config_path,
cluster_token(cluster, role, env), update_args, topology_name)
logging.info("Update topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Failed to update topology %s" % topology_name)
logging.info("Successfully updated topology %s", topology_name)
def filter_test_topologies(test_topologies, test_pattern):
initial_topologies = test_topologies
if test_pattern:
pattern = re.compile(test_pattern)
test_topologies = filter(lambda x: pattern.match(x['topologyName']), test_topologies)
if len(test_topologies) == 0:
logging.error("Test filter '%s' did not match any configured test names:\n%s",
test_pattern, '\n'.join(map(lambda x: x['topologyName'], initial_topologies)))
sys.exit(1)
return test_topologies
def run_tests(conf, args):
''' Run the test for each topology specified in the conf file '''
successes = []
failures = []
timestamp = time.strftime('%Y%m%d%H%M%S')
http_server_host_port = "%s:%d" % (args.http_server_hostname, args.http_server_port)
if args.tests_bin_path.endswith(".jar"):
test_topologies = filter_test_topologies(conf["javaTopologies"], args.test_topology_pattern)
topology_classpath_prefix = conf["topologyClasspathPrefix"]
extra_topology_args = "-s http://%s/state" % http_server_host_port
elif args.tests_bin_path.endswith(".pex"):
test_topologies = filter_test_topologies(conf["pythonTopologies"], args.test_topology_pattern)
topology_classpath_prefix = ""
extra_topology_args = ""
else:
raise ValueError("Unrecognized binary file type: %s" % args.tests_bin_path)
current = 1
for topology_conf in test_topologies:
topology_name = ("%s_%s_%s") % (timestamp, topology_conf["topologyName"], str(uuid.uuid4()))
classpath = topology_classpath_prefix + topology_conf["classPath"]
# if the test includes an update we need to pass that info to the topology so it can send
# data accordingly. This flag causes the test spout to emit, then check the state of this
# token, then emit more.
update_args = ""
topology_args = extra_topology_args
if "updateArgs" in topology_conf:
update_args = topology_conf["updateArgs"]
if "topologyArgs" in topology_conf:
if topology_conf["topologyArgs"] == "emit_util" and update_args == "":
| |
TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_deleted_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/deleted/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedProxy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_proxy_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_proxy_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedProxy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedProxy', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_proxy_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_proxy_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_proxy_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_proxy_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_proxy_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_registered_query_deleted_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_deleted_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric/deleted', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedDerivedMetricDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values | |
"""Archives test results to Google Storage."""
test_tarball = commands.ArchiveTestResults(
self._build_root, test_results_dir, prefix='')
# Wait for breakpad symbols. The archive path will be ready by the time
# the breakpad symbols are ready.
got_symbols = self._archive_stage.WaitForBreakpadSymbols()
archive_path = self._archive_stage.GetArchivePath()
upload_url = self._archive_stage.GetGSUploadLocation()
filenames = commands.GenerateStackTraces(
self._build_root, self._current_board, test_tarball, archive_path,
got_symbols)
filenames.append(commands.ArchiveFile(test_tarball, archive_path))
cros_build_lib.Info('Uploading artifacts to Google Storage...')
download_url = self._archive_stage.GetDownloadUrl()
for filename in filenames:
try:
commands.UploadArchivedFile(archive_path, upload_url, filename,
self._archive_stage.debug, update_list=True)
self.PrintBuildbotLink(download_url, filename)
except cros_build_lib.RunCommandError as e:
# Treat gsutil flake as a warning if it's the only problem.
self._HandleExceptionAsWarning(e)
def _PerformStage(self):
# These directories are used later to archive test artifacts.
test_results_dir = commands.CreateTestRoot(self._build_root)
try:
commands.RunTestSuite(self._build_root,
self._current_board,
self.GetImageDirSymlink(),
os.path.join(test_results_dir,
'test_harness'),
test_type=self._build_config['vm_tests'],
whitelist_chrome_crashes=self._chrome_rev is None,
archive_dir=self._archive_stage.bot_archive_root)
except Exception:
cros_build_lib.Error(_VM_TEST_ERROR_MSG)
raise
finally:
self._ArchiveTestResults(test_results_dir)
class TestTimeoutException(Exception):
"""Raised when a critical test times out."""
pass
class InvalidTestConditionException(Exception):
"""Raised when pre-conditions for a test aren't met."""
pass
class HWTestStage(BoardSpecificBuilderStage):
"""Stage that runs tests in the Autotest lab."""
option_name = 'tests'
config_name = 'hw_tests'
PERF_RESULTS_EXTENSION = 'results'
def __init__(self, options, build_config, board, archive_stage, suite):
super(HWTestStage, self).__init__(options, build_config, board,
suffix=' [%s]' % suite)
self._archive_stage = archive_stage
self._suite = suite
# Bind this early so derived classes can override it.
self._timeout = build_config['hw_tests_timeout']
self.wait_for_results = True
def _PrintFile(self, filename):
with open(filename) as f:
print f.read()
def _SendPerfResults(self):
"""Sends the perf results from the test to the perf dashboard."""
result_file_name = '%s.%s' % (self._suite,
HWTestStage.PERF_RESULTS_EXTENSION)
gs_results_file = '/'.join([self._archive_stage.GetGSUploadLocation(),
result_file_name])
gs_context = gs.GSContext()
gs_context.Copy(gs_results_file, self._options.log_dir)
# Prints out the actual result from gs_context.Copy.
logging.info('Copy of %s completed. Printing below:', result_file_name)
self._PrintFile(os.path.join(self._options.log_dir, result_file_name))
# Disable use of calling parents HandleStageException class.
# pylint: disable=W0212
def _HandleStageException(self, exception):
"""Override and don't set status to FAIL but FORGIVEN instead."""
if (isinstance(exception, cros_build_lib.RunCommandError) and
exception.result.returncode == 2 and
not self._build_config['hw_tests_critical']):
return self._HandleExceptionAsWarning(exception)
else:
return super(HWTestStage, self)._HandleStageException(exception)
def DealWithTimeout(self, exception):
if not self._build_config['hw_tests_critical']:
return self._HandleExceptionAsWarning(exception)
return super(HWTestStage, self)._HandleStageException(exception)
def _PerformStage(self):
if not self._archive_stage.WaitForHWTestUploads():
raise InvalidTestConditionException('Missing uploads.')
build = '/'.join([self._bot_id, self._archive_stage.GetVersion()])
if self._options.remote_trybot and self._options.hwtest:
debug = self._options.debug_forced
else:
debug = self._options.debug
try:
with cros_build_lib.SubCommandTimeout(self._timeout):
commands.RunHWTestSuite(build, self._suite, self._current_board,
self._build_config['hw_tests_pool'],
self._build_config['hw_tests_num'],
self._build_config['hw_tests_file_bugs'],
self.wait_for_results,
debug)
if self._build_config['hw_copy_perf_results']:
self._SendPerfResults()
except cros_build_lib.TimeoutError as exception:
return self.DealWithTimeout(exception)
class ASyncHWTestStage(HWTestStage, BoardSpecificBuilderStage,
ForgivingBuilderStage):
"""Stage that fires and forgets hw test suites to the Autotest lab."""
def __init__(self, options, build_config, board, archive_stage, suite):
super(ASyncHWTestStage, self).__init__(self, options, build_config, board,
archive_stage, suite)
self.wait_for_results = False
class SDKPackageStage(bs.BuilderStage):
"""Stage that performs preparing and packaging SDK files"""
# Version of the Manifest file being generated. Should be incremented for
# Major format changes.
MANIFEST_VERSION = '1'
_EXCLUDED_PATHS = ('usr/lib/debug', 'usr/local/autotest', 'packages', 'tmp')
def _PerformStage(self):
tarball_name = 'built-sdk.tar.xz'
tarball_location = os.path.join(self._build_root, tarball_name)
chroot_location = os.path.join(self._build_root,
constants.DEFAULT_CHROOT_DIR)
board_location = os.path.join(chroot_location, 'build/amd64-host')
manifest_location = os.path.join(self._build_root,
'%s.Manifest' % tarball_name)
# Create a tarball of the latest SDK.
self.CreateSDKTarball(chroot_location, board_location, tarball_location)
# Create a package manifest for the tarball.
self.CreateManifestFromSDK(board_location, manifest_location)
# Create toolchain packages.
self.CreateRedistributableToolchains(chroot_location)
# Make sure the regular user has the permission to read.
cmd = ['chmod', 'a+r', tarball_location]
cros_build_lib.SudoRunCommand(cmd, cwd=board_location)
def CreateRedistributableToolchains(self, chroot_location):
osutils.RmDir(os.path.join(chroot_location,
constants.SDK_TOOLCHAINS_OUTPUT),
ignore_missing=True)
cros_build_lib.RunCommand(
['cros_setup_toolchains', '--create-packages',
'--output-dir', os.path.join('/', constants.SDK_TOOLCHAINS_OUTPUT)],
enter_chroot=True)
def CreateSDKTarball(self, _chroot, sdk_path, dest_tarball):
"""Creates an SDK tarball from a given source chroot.
Args:
chroot: A chroot used for finding compression tool.
sdk_path: Path to the root of newly generated SDK image.
dest_tarball: Path of the tarball that should be created.
"""
# TODO(zbehan): We cannot use xz from the chroot unless it's
# statically linked.
extra_args = ['--exclude=%s/*' % path for path in self._EXCLUDED_PATHS]
# Options for maximum compression.
extra_env = { 'XZ_OPT' : '-e9' }
cros_build_lib.CreateTarball(
dest_tarball, sdk_path, sudo=True, extra_args=extra_args,
extra_env=extra_env)
def CreateManifestFromSDK(self, sdk_path, dest_manifest):
"""Creates a manifest from a given source chroot.
Args:
sdk_path: Path to the root of the SDK to describe.
dest_manifest: Path to the manifest that should be generated.
"""
package_data = {}
for key, version in portage_utilities.ListInstalledPackages(sdk_path):
package_data.setdefault(key, []).append((version, {}))
self._WriteManifest(package_data, dest_manifest)
def _WriteManifest(self, data, manifest):
"""Encode manifest into a json file."""
json_input = dict(version=self.MANIFEST_VERSION, packages=data)
osutils.WriteFile(manifest, json.dumps(json_input))
class SDKTestStage(bs.BuilderStage):
"""Stage that performs testing an SDK created in a previous stage"""
option_name = 'tests'
def _PerformStage(self):
tarball_location = os.path.join(self._build_root, 'built-sdk.tar.xz')
new_chroot_cmd = ['cros_sdk', '--chroot', 'new-sdk-chroot']
# Build a new SDK using the provided tarball.
cmd = new_chroot_cmd + ['--download', '--replace', '--nousepkg',
'--url', 'file://' + tarball_location]
cros_build_lib.RunCommand(cmd, cwd=self._build_root)
for board in self._boards:
cros_build_lib.PrintBuildbotStepText(board)
cmd = new_chroot_cmd + ['--', './setup_board',
'--board', board, '--skip_chroot_upgrade']
cros_build_lib.RunCommand(cmd, cwd=self._build_root)
cmd = new_chroot_cmd + ['--', './build_packages',
'--board', board, '--nousepkg', '--skip_chroot_upgrade']
cros_build_lib.RunCommand(cmd, cwd=self._build_root)
class NothingToArchiveException(Exception):
"""Thrown if ArchiveStage found nothing to archive."""
def __init__(self, message='No images found to archive.'):
super(NothingToArchiveException, self).__init__(message)
class ArchiveStage(BoardSpecificBuilderStage):
"""Archives build and test artifacts for developer consumption."""
option_name = 'archive'
_VERSION_NOT_SET = '_not_set_version_'
_BUILDBOT_ARCHIVE = 'buildbot_archive'
_TRYBOT_ARCHIVE = 'trybot_archive'
@classmethod
def GetArchiveRoot(cls, buildroot, trybot=False):
"""Return the location where trybot archive images are kept."""
archive_base = cls._TRYBOT_ARCHIVE if trybot else cls._BUILDBOT_ARCHIVE
return os.path.join(buildroot, archive_base)
# This stage is intended to run in the background, in parallel with tests.
def __init__(self, options, build_config, board):
super(ArchiveStage, self).__init__(options, build_config, board)
# Set version is dependent on setting external to class. Do not use
# directly. Use GetVersion() instead.
self._set_version = ArchiveStage._VERSION_NOT_SET
self.prod_archive = self._options.buildbot and not self._options.debug
self._archive_root = self.GetArchiveRoot(
self._build_root, trybot=not self.prod_archive)
self.bot_archive_root = os.path.join(self._archive_root, self._bot_id)
if self._options.remote_trybot:
self.debug = self._options.debug_forced
else:
self.debug = self._options.debug
# Queues that are populated during the Archive stage.
self._breakpad_symbols_queue = multiprocessing.Queue()
self._hw_test_uploads_status_queue = multiprocessing.Queue()
self._recovery_image_status_queue = multiprocessing.Queue()
self._release_upload_queue = multiprocessing.Queue()
self._upload_queue = multiprocessing.Queue()
self._upload_symbols_queue = multiprocessing.Queue()
self._hw_test_upload_queue = multiprocessing.Queue()
# Queues that are populated by other stages.
self._version_queue = multiprocessing.Queue()
self._autotest_tarballs_queue = multiprocessing.Queue()
self._full_autotest_tarball_queue = multiprocessing.Queue()
# These variables will be initialized when the stage is run.
self._archive_path = None
self._pkg_dir = None
def SetVersion(self, path_to_image):
"""Sets the cros version for the given built path to an image.
This must be called in order for archive stage to finish.
Args:
path_to_image: Path to latest image.""
"""
self._version_queue.put(path_to_image)
def AutotestTarballsReady(self, autotest_tarballs):
"""Tell Archive Stage that autotest tarball is ready.
This must be called in order for archive stage to finish.
Args:
autotest_tarballs: The paths of the autotest tarballs.
"""
self._autotest_tarballs_queue.put(autotest_tarballs)
def FullAutotestTarballReady(self, full_autotest_tarball):
"""Tell Archive Stage that full autotest tarball is ready.
This must be called in order for archive stage to finish when
chromeos_offcial is true.
Args:
full_autotest_tarball: The paths of the full autotest tarball.
"""
self._full_autotest_tarball_queue.put(full_autotest_tarball)
def GetVersion(self):
"""Gets the version for the archive stage."""
if self._set_version == ArchiveStage._VERSION_NOT_SET:
version = self._version_queue.get()
self._set_version = version
# Put the version right back on the queue in case anyone else is waiting.
self._version_queue.put(version)
return self._set_version
def WaitForHWTestUploads(self):
"""Waits until artifacts needed for HWTest stage are uploaded.
Returns:
True if artifacts uploaded successfully.
False otherwise.
"""
cros_build_lib.Info('Waiting for uploads...')
status = self._hw_test_uploads_status_queue.get()
# Put the status back so other HWTestStage instances don't starve.
self._hw_test_uploads_status_queue.put(status)
return status
def WaitForRecoveryImage(self):
"""Wait until artifacts needed by SignerTest stage are created.
Returns:
True if artifacts created successfully.
False otherwise.
"""
cros_build_lib.Info('Waiting for recovery image...')
status = self._recovery_image_status_queue.get()
# Put the status back so other SignerTestStage instances don't starve.
self._recovery_image_status_queue.put(status)
return status
def _BreakpadSymbolsGenerated(self, success):
"""Signal that breakpad symbols have been generated.
Arguments:
success: True to indicate the symbols were generated, else False.
"""
self._breakpad_symbols_queue.put(success)
def WaitForBreakpadSymbols(self):
"""Wait for the breakpad symbols to be generated.
Returns:
True if the breakpad symbols were generated.
False if the breakpad symbols were not generated within 20 mins.
"""
success = False
cros_build_lib.Info('Waiting for breakpad symbols...')
try:
# TODO: Clean this up so that we no longer rely on a timeout
success = self._breakpad_symbols_queue.get(True, 1200)
except Queue.Empty:
cros_build_lib.Warning(
'Breakpad symbols were not generated within timeout period.')
return success
def GetDownloadUrl(self):
"""Get the URL where we can download artifacts."""
version = self.GetVersion()
if not version:
return None
if self._options.buildbot or self._options.remote_trybot:
upload_location = self.GetGSUploadLocation()
url_prefix = 'https://sandbox.google.com/storage/'
return upload_location.replace('gs://', url_prefix)
else:
return self.GetArchivePath()
def _GetGSUtilArchiveDir(self):
if self._options.archive_base:
gs_base = self._options.archive_base
elif (self._options.remote_trybot or
self._build_config['gs_path'] == cbuildbot_config.GS_PATH_DEFAULT):
gs_base = constants.DEFAULT_ARCHIVE_BUCKET
else:
return self._build_config['gs_path']
return '%s/%s' % (gs_base, self._bot_id)
def GetGSUploadLocation(self):
"""Get | |
<filename>darch/datasets.py
import numpy as np
import scipy as sp
import tensorflow as tf
try:
import cPickle
except ImportError:
import pickle as cPickle
import gc
import os, sys, tarfile, urllib
import scipy.io as sio
from scipy.misc import *
import argparse
import glob
from PIL import Image
import random
class InMemoryDataset:
"""Wrapper around a dataset for iteration that allows cycling over the
dataset.
This functionality is especially useful for training. One can specify if
the data is to be shuffled at the end of each epoch. It is also possible
to specify a transformation function to applied to the batch before
being returned by next_batch.
"""
def __init__(self, X, y, shuffle_at_epoch_begin, batch_transform_fn=None):
if X.shape[0] != y.shape[0]:
assert ValueError("X and y the same number of examples.")
self.X = X
self.y = y
self.shuffle_at_epoch_begin = shuffle_at_epoch_begin
self.batch_transform_fn = batch_transform_fn
self.iter_i = 0
def get_num_examples(self):
return self.X.shape[0]
def next_batch(self, batch_size):
"""Returns the next batch in the dataset.
If there are fewer that batch_size examples until the end
of the epoch, next_batch returns only as many examples as there are
remaining in the epoch.
"""
#print("Batch Size :",batch_size)
n = self.X.shape[0]
i = self.iter_i
# shuffling step.
if i == 0 and self.shuffle_at_epoch_begin:
inds = np.random.permutation(n)
print("Value of inds = ",inds,len(inds))
# print("Batch Size :",batch_size)
gc.collect()
for i in range(20):
pass
gc.collect()
self.X = self.X[inds]
self.y = self.y[inds]
# getting the batch.
eff_batch_size = min(batch_size, n - i)
X_batch = self.X[i:i + eff_batch_size]
y_batch = self.y[i:i + eff_batch_size]
self.iter_i = (self.iter_i + eff_batch_size) % n
# transform if a transform function was defined.
if self.batch_transform_fn != None:
X_batch_out, y_batch_out = self.batch_transform_fn(X_batch, y_batch)
else:
X_batch_out, y_batch_out = X_batch, y_batch
return (X_batch_out, y_batch_out)
def load_mnist(data_dir, flatten=False, one_hot=True, normalize_range=False,
whiten_pixels=False, border_pad_size=0):
from tensorflow.examples.tutorials.mnist import input_data
# print data_dir
mnist = input_data.read_data_sets(data_dir, one_hot=one_hot, reshape=flatten, validation_size=6000)
def _extract_fn(x):
X = x.images
y = x.labels
y = y.astype('float32')
if not normalize_range:
X *= 255.0
return (X, y)
Xtrain, ytrain = _extract_fn(mnist.train)
Xval, yval = _extract_fn(mnist.validation)
Xtest, ytest = _extract_fn(mnist.test)
print(Xtrain.shape)
if whiten_pixels:
mean = Xtrain.mean()
std = Xtrain.std()
print(mean,std)
Xtrain = (Xtrain - mean) / std
Xval = (Xval - mean) / std
Xtest = (Xtest - mean) / std
# NOTE: the zero padding is done after the potential whitening
if border_pad_size > 0:
Xtrain = zero_pad_border(Xtrain, border_pad_size)
Xval = zero_pad_border(Xval, border_pad_size)
Xtest = zero_pad_border(Xtest, border_pad_size)
return (Xtrain, ytrain, Xval, yval, Xtest, ytest)
def load_fashion(data_dir, flatten=False, one_hot=True, normalize_range=False,
whiten_pixels=False, border_pad_size=0):
from tensorflow.examples.tutorials.mnist import input_data
# print data_dir
mnist = input_data.read_data_sets(data_dir, one_hot=one_hot, reshape=flatten, validation_size=6000)
def _extract_fn(x):
X = x.images
y = x.labels
y = y.astype('float32')
if not normalize_range:
X *= 255.0
return (X, y)
Xtrain, ytrain = _extract_fn(mnist.train)
Xval, yval = _extract_fn(mnist.validation)
Xtest, ytest = _extract_fn(mnist.test)
print(Xtrain.shape)
if whiten_pixels:
mean = Xtrain.mean()
std = Xtrain.std()
print(mean,std)
Xtrain = (Xtrain - mean) / std
Xval = (Xval - mean) / std
Xtest = (Xtest - mean) / std
# NOTE: the zero padding is done after the potential whitening
if border_pad_size > 0:
Xtrain = zero_pad_border(Xtrain, border_pad_size)
Xval = zero_pad_border(Xval, border_pad_size)
Xtest = zero_pad_border(Xtest, border_pad_size)
return (Xtrain, ytrain, Xval, yval, Xtest, ytest)
def load_cifar10(data_dir, flatten=False, one_hot=True, normalize_range=False,
whiten_pixels=True, border_pad_size=0):
"""Loads all of CIFAR-10 in a numpy array.
Provides a few options for the output formats. For example,
normalize_range returns the output images with pixel values in [0.0, 1.0].
The other options are self explanatory. Border padding corresponds to
upsampling the image by zero padding the border of the image.
"""
train_filenames = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4']
val_filenames = ['data_batch_5']
test_filenames = ['test_batch']
# NOTE: this function uses some arguments from the outer scope, namely
# flatten, one_hot, normalize_range, and possibly others once added.
def _load_data(fpath):
with open(fpath, 'rb') as f:
try:
d = cPickle.load(f)
except UnicodeDecodeError:
f.seek(0)
d = cPickle.load(f, encoding='bytes')
d = {k.decode(): v for k, v in d.items()} # change keys into strings
# for the data
X = d['data'].astype('float32')
# reshape the data to the format (num_images, height, width, depth)
num_images = X.shape[0]
num_classes = 10
X = X.reshape( (num_images, 3, 32, 32) )
X = X.transpose( (0,2,3,1) )
X = X.astype('float32')
# transformations based on the argument options.
if normalize_range:
X = X / 255.0
if flatten:
X = X.reshape( (num_images, -1) )
# for the labels
y = np.array(d['labels'])
if one_hot:
y_one_hot = np.zeros( (num_images, num_classes), dtype='float32')
y_one_hot[ np.arange(num_images), y ] = 1.0
y = y_one_hot
return (X, y)
# NOTE: this function uses some arguments from the outer scope.
def _load_data_multiple_files(fname_list):
X_parts = []
y_parts = []
for fname in fname_list:
fpath = os.path.join(data_dir, fname)
X, y = _load_data(fpath)
X_parts.append(X)
y_parts.append(y)
X_full = np.concatenate(X_parts, axis=0)
y_full = np.concatenate(y_parts, axis=0)
return (X_full, y_full)
Xtrain, ytrain = _load_data_multiple_files(train_filenames)
Xval, yval = _load_data_multiple_files(val_filenames)
print("Total Training size before",Xtrain.shape)
print("Validation set size before",Xval.shape)
A = np.vstack((Xtrain, Xval))
B = np.vstack((ytrain, yval))
split = int(np.floor(0.9 * A.shape[0]))
Xtrain = A[:split]
ytrain = B[:split]
Xval = A[split:A.shape[0]]
yval = B[split:B.shape[0]]
print("Total Training size after",Xtrain.shape)
print("Validation set size after",Xval.shape)
Xtest, ytest = _load_data_multiple_files(test_filenames)
if whiten_pixels:
mean = Xtrain.mean(axis=0)[None, :]
std = Xtrain.std(axis=0)[None, :]
Xtrain = (Xtrain - mean) / std
Xval = (Xval - mean) / std
Xtest = (Xtest - mean) / std
# NOTE: the zero padding is done after the potential whitening
if border_pad_size > 0:
Xtrain = zero_pad_border(Xtrain, border_pad_size)
Xval = zero_pad_border(Xval, border_pad_size)
Xtest = zero_pad_border(Xtest, border_pad_size)
return (Xtrain, ytrain, Xval, yval, Xtest, ytest)
def read_labels(path_to_labels):
"""
:param path_to_labels: path to the binary file containing labels from the STL-10 dataset
:return: an array containing the labels
taken from https://github.com/mttk/STL10/blob/master/stl10_input.py
"""
with open(path_to_labels, 'rb') as f:
labels = np.fromfile(f, dtype=np.uint8)
return labels
def read_all_images(path_to_data):
"""
:param path_to_data: the file containing the binary images from the STL-10 dataset
:return: an array containing all the images
taken from https://github.com/mttk/STL10/blob/master/stl10_input.py
"""
with open(path_to_data, 'rb') as f:
# read whole file in uint8 chunks
everything = np.fromfile(f, dtype=np.uint8)
# We force the data into 3x96x96 chunks, since the
# images are stored in "column-major order", meaning
# that "the first 96*96 values are the red channel,
# the next 96*96 are green, and the last are blue."
# The -1 is since the size of the pictures depends
# on the input file, and this way numpy determines
# the size on its own.
images = np.reshape(everything, (-1, 3, 96, 96))
# Now transpose the images into a standard image format
# readable by, for example, matplotlib.imshow
# You might want to comment this line or reverse the shuffle
# if you will use a learning algorithm like CNN, since they like
# their channels separated.
images = np.transpose(images, (0, 3, 2, 1))
return images
def read_single_image(image_file):
"""
CAREFUL! - this method uses a file as input instead of the path - so the
position of the reader will be remembered outside of context of this method.
:param image_file: the open file containing the images
:return: a single image
taken from https://github.com/mttk/STL10/blob/master/stl10_input.py
"""
# image shape
HEIGHT = 96
WIDTH = 96
DEPTH = 3
# size of a single image in bytes
SIZE = HEIGHT * WIDTH * DEPTH
# read a single image, count determines the number of uint8's to read
image = np.fromfile(image_file, dtype=np.uint8, count=SIZE)
# force into image matrix
image = np.reshape(image, (3, 96, 96))
# transpose to standard format
# You might want to comment this line or reverse the shuffle
# if you will use a learning algorithm like CNN, since they like
# their channels separated.
image = np.transpose(image, (2, 1, 0))
return image
def download_and_extract(DATA_DIR):
"""
Download and extract the STL-10 dataset
:return: None
Taken from
https://github.com/mttk/STL10/blob/master/stl10_input.py
"""
DATA_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\rDownloading %s %.2f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.urlretrieve(DATA_URL, filepath, reporthook=_progress)
print('Downloaded', filename)
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def load_stl10(data_dir, flatten=False, one_hot=True, | |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Keyvault client - adapted from Bluehound code."""
import base64
import json
from datetime import datetime
from typing import Any, List
import keyring
import pandas.io.clipboard as pyperclip
from adal import AdalError, AuthenticationContext
from azure.core.exceptions import ResourceNotFoundError
from azure.keyvault.secrets import KeyVaultSecret, SecretClient
from azure.mgmt.keyvault import KeyVaultManagementClient
from azure.mgmt.keyvault.models import (
AccessPolicyEntry,
CertificatePermissions,
KeyPermissions,
Permissions,
SecretPermissions,
Sku,
Vault,
VaultCreateOrUpdateParameters,
VaultProperties,
)
from IPython.display import HTML, display
from keyring.errors import KeyringError
from msrestazure.azure_exceptions import CloudError
from .._version import VERSION
from .azure_auth_core import az_connect_core
from .exceptions import MsticpyKeyVaultConfigError, MsticpyKeyVaultMissingSecretError
from .keyvault_settings import KeyVaultSettings
from .utility import export, is_ipython
__version__ = VERSION
__author__ = "<NAME>, <NAME>"
# pylint: disable=too-many-instance-attributes
@export
class AuthClient:
"""Authentication class base."""
def __init__(
self,
tenant_id: str,
client_id: str,
client_uri: str,
name: str = None,
**kwargs,
):
"""
Initialize base authentication client for credential caching.
Parameters
----------
tenant_id : str
Tenant ID of Azure User
client_id : str
Client ID of application client
client_uri : str
[description]
name : str, optional
Name of the secret store, by default None
authority : str, optional
The AAD authority - one of 'global', 'usgov', 'de' or 'chi'
authority_uri : str, optional
The AAD authority URI - overrides `authority`
debug : bool, optional
Output debug information if True, by default False
Notes
-----
The parameter values can also be obtained from the
KeyVault section of msticpyconfig.yaml.
"""
self.name = name
self.debug = kwargs.pop("debug", False)
self.settings: KeyVaultSettings = (
kwargs.pop("settings", None) or KeyVaultSettings()
)
self.tenant_id = tenant_id or self.settings.get("tenantid")
if not self.tenant_id:
raise MsticpyKeyVaultConfigError(
"Could not get TenantId from function parameters or configuration.",
"Please add this to the KeyVault section of msticpyconfig.yaml",
title="missing tenant ID value.",
)
self.authority = kwargs.pop(
"authority", self.settings.get_tenant_authority_host(tenant_id)
)
self.client_id = client_id or self.settings.CLIENT_ID
self.client_uri = client_uri
self.authority_uri = self.settings.get_tenant_authority_uri(
authority_uri=kwargs.get("authority_uri"), tenant=self.tenant_id
)
if self.debug:
print("AuthClient for %s - %s" % (client_id, client_uri))
self._get_creds()
if self._expired_creds:
if self.debug:
print("expired creds")
try:
self._refresh_creds()
return
except AdalError:
if self.debug:
print("Token was no longer valid, forcing a new one.")
self._get_token()
def _get_token(self):
context = AuthenticationContext(self.authority_uri)
code = context.acquire_user_code(self.client_uri, self.client_id)
_prompt_for_code(code)
self.config_data = context.acquire_token_with_device_code(
self.client_uri, code, self.client_id
)
self._cache_creds()
def _get_creds(self):
self._get_token()
def _cache_creds(self):
pass
def _is_valid_config_data(self):
keys = ["accessToken", "refreshToken", "expiresOn"]
return (
all(key in self.config_data for key in keys)
and all(self.config_data.get(key) for key in keys)
and all(len(self.config_data.get(key)) > 0 for key in keys)
)
@property
def auth_id(self) -> str:
"""Return name or ID of client."""
return self.name if self.name is not None else self.client_id
@property
def user_oid(self) -> str:
"""
Return the user Object ID.
Returns
-------
str
User OID.
"""
data = self._get_parsed_token_data()
return data.get("oid")
def _get_parsed_token_data(self) -> Any:
tok_data = self.token
tok_data = tok_data.split(".")[1]
tok_data += "=" * ((4 - len(tok_data) % 4) % 4)
return json.loads(base64.b64decode(tok_data))
def _refresh_creds(self):
context = AuthenticationContext(self.authority_uri)
self.config_data = context.acquire_token_with_refresh_token(
self.config_data["refreshToken"], self.client_id, self.client_uri
)
if self.debug:
print(f"got new token expiring {self.config_data['expiresOn']}")
self._cache_creds()
@property
def _expired_creds(self) -> bool:
return self._expires_on < datetime.now()
@property
def _expires_on(self) -> datetime:
"""Return token expiry date as string."""
return datetime.strptime(self.config_data["expiresOn"], "%Y-%m-%d %H:%M:%S.%f")
@property
def token(self) -> str:
"""
Return the access token.
Returns
-------
str
Access Token
"""
if self._expired_creds:
try:
self._refresh_creds()
except AdalError:
self._get_token()
return self.config_data["accessToken"]
def _adal_callback(self, server: str, resource: str, scope: str, scheme: str):
"""
ADAL Callback for authentication.
Parameters
----------
server : str
Not used
resource : str
Not used
scope : str
Not used
scheme : str
Not used
Returns
-------
Tuple(str, str)
Bearer, Token
Notes
-----
None of the parameters are used in this function. However,
they are required because of the expected callback signature.
"""
del (server, resource, scope, scheme)
return "Bearer", self.token
# pylint: enable=too-many-instance-attributes
@export
class KeyringAuthClient(AuthClient):
"""
Key Authentication Client.
Handles management of authentication and refresh tokens
via keyring
"""
# pylint: disable=too-many-arguments
def __init__(
self,
tenant_id: str,
client_id: str,
client_url: str,
name: str = None,
debug: bool = False,
):
"""
Initialize KeyringAuthClient.
Parameters
----------
tenant_id : str
Tenant ID of Azure User
client_id : str
Client ID of application client
client_url : str
[description]
name : str, optional
Name of the secret store, by default None
debug : bool, optional
Output debug information if True, by default False
"""
self.name = name
self.keyring = self.auth_id
super().__init__(tenant_id, client_id, client_url, name=name, debug=debug)
# pylint: enable=too-many-arguments
def _get_creds(self):
if self.debug:
print("Fetching creds from keyring")
try:
access_token = (
keyring.get_password(self.keyring, "<PASSWORD>context_1")
+ keyring.get_password(self.keyring, "<PASSWORD>context_2")
+ keyring.get_password(self.keyring, "adal_context_3")
+ keyring.get_password(self.keyring, "<PASSWORD>")
)
refresh_token = (
keyring.get_password(self.keyring, "adal_context_5")
+ keyring.get_password(self.keyring, "adal_context_6")
+ keyring.get_password(self.keyring, "adal_context_7")
)
expires_on = keyring.get_password(self.keyring, "adal_context_8")
self.config_data = {
"accessToken": access_token,
"refreshToken": refresh_token,
"expiresOn": expires_on,
}
except (TypeError, KeyringError):
if self.debug:
print("No valid credentials in keyring %s" % self.keyring)
self._get_token()
if not self._is_valid_config_data():
if self.debug:
print("No valid authtoken config found in keyring")
self._get_token()
def _cache_creds(self):
if self.debug:
print("Saving config data to keyring %s" % self.keyring)
keyring.set_password(
self.keyring, "<PASSWORD>", self.config_data["accessToken"][:400]
)
keyring.set_password(
self.keyring, "<PASSWORD>_<PASSWORD>_2", self.config_data["accessToken"][400:800]
)
keyring.set_password(
self.keyring, "<PASSWORD>", self.config_data["accessToken"][800:1200]
)
keyring.set_password(
self.keyring, "<PASSWORD>", self.config_data["accessToken"][1200:]
)
keyring.set_password(
self.keyring, "<PASSWORD>", self.config_data["refreshToken"][:400]
)
keyring.set_password(
self.keyring, "<PASSWORD>", self.config_data["refreshToken"][400:800]
)
keyring.set_password(
self.keyring, "<PASSWORD>", self.config_data["refreshToken"][800:]
)
keyring.set_password(
self.keyring, "<PASSWORD>_<PASSWORD>", self.config_data["expiresOn"]
)
# class KeyVaultAuthClient(AuthClient):
# """
# Keyvault Auth client.
# Handles management of authentication tokens in keyvault.
# """
# def __init__(
# self,
# tenant_id: str,
# client_id: str,
# client_url: str,
# secret_name: str,
# name: str = None,
# debug: bool = False,
# ):
# """
# Initialize KeyvaultAuthClient.
# Parameters
# ----------
# tenant_id : str
# Tenant ID of Azure User
# client_id : str
# Client ID of application client
# client_url : str
# [description]
# name : str, optional
# Name of the secret store, by default None
# debug : bool, optional
# Output debug information if True, by default False
# """
# self.secret_name = secret_name
# self._get_creds = self._get_keyvault_creds
# self._cache_creds = self._cache_creds_keyvault
# self.keyvault_client = BHKeyVaultClient(
# tenant_id=tenant_id, vault_uri=client_url
# )
# self.config_data: Any = None
# super().__init__(tenant_id, client_id, client_url, name=name, debug=debug)
# def _get_keyvault_creds(self):
# if self.debug:
# print("getting tokens from keyvault")
# try:
# self.config_data = json.loads(
# self.keyvault_client.get_secret(self.secret_name)
# )
# except KeyVaultMissingSecretException:
# if self.debug:
# print("missing secret from keyvault, fetching manually")
# self._get_token()
# except KeyVaultErrorException as err:
# if self.debug:
# print("bad creds in keyvault, you gotta getem")
# print("here is what went wrong: %s" % str(err))
# self._get_token()
# def _cache_creds_keyvault(self):
# self.keyvault_client.set_secret(self.secret_name, json.dumps(self.config_data))
@export
class BHKeyVaultClient:
"""Core KeyVault client."""
_KEYRING_NAME = "keyvault"
def __init__(
self,
tenant_id: str = None,
vault_uri: str = None,
vault_name: str = None,
settings: KeyVaultSettings = None,
**kwargs,
):
"""
Initialize the BHKeyVault client.
Parameters
----------
tenant_id : str
The tenant ID of the service
vault_uri : str, optional
The full URI of the keyvault, by default None
vault_name : str, optional
The name of the keyvault in the public cloud, by default None
authn_type : str, optional
Authentication mode, by default 'interactive'
Supported options are:
- 'device' for device code authentication
- 'interactive' for interactive browser authentication
authority : str, optional
The AAD authority - one of 'global', 'usgov', 'de' or 'chi'
authority_uri : str, optional
The AAD authority URI - overrides `authority`
settings : KeyVaultSettings
An instance of KeyVaultSettings containing KV parameters.
debug : bool, optional
[description], by default False
Raises
------
KeyVaultMissingVaultException
No Vault name or URI supplied.
Notes
-----
The parameter values can also be obtained from the
KeyVault section of msticpyconfig.yaml.
"""
self.debug = kwargs.pop("debug", False)
self.settings: KeyVaultSettings = settings or KeyVaultSettings()
self.tenant_id = tenant_id or self.settings.get("tenantid")
if not self.tenant_id:
raise MsticpyKeyVaultConfigError(
"Could not get TenantId from function parameters or configuration.",
"Please add this to the KeyVault section of msticpyconfig.yaml",
title="missing tenant ID value.",
)
self.authn_type = kwargs.pop(
"authn_type", self.settings.get("authntype", "interactive")
)
# for authority and authority_uri, any parameters take priority
# and fall back on settings if not specified.
if "authority" in kwargs:
self.settings["authority"] = kwargs.pop("authority")
self.authority_uri = self.settings.get_tenant_authority_host(
authority_uri=kwargs.get("authority_uri"), tenant=self.tenant_id
)
if not vault_uri and not vault_name:
if "vaultname" in self.settings:
vault_name = self.settings["vaultname"]
else:
raise MsticpyKeyVaultConfigError(
"Check that you have specified the right value for VaultName"
+ " in your configuration",
title="Key Vault vault name not found.",
)
if vault_uri:
self.vault_uri = vault_uri
else:
vault_uri = self.settings.keyvault_uri
if vault_uri:
self.vault_uri = vault_uri.format(vault=vault_name)
else:
cloud = self.settings.cloud
raise MsticpyKeyVaultConfigError(
f"Could not | |
<filename>Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py_bureau/utils_agg.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 19:27:07 2018
@author: kazuki.onodera
"""
stats = ['min', 'mean', 'max', 'var']
stats_sum = ['min', 'mean', 'max', 'var', 'sum']
# =============================================================================
# prev
# =============================================================================
prev_num_aggregations = {
# TODO: optimize stats
'AMT_ANNUITY': stats,
'AMT_APPLICATION': stats,
'AMT_CREDIT': stats,
'APP_CREDIT_PERC': stats,
'AMT_DOWN_PAYMENT': stats,
'AMT_GOODS_PRICE': stats,
'HOUR_APPR_PROCESS_START': stats,
'RATE_DOWN_PAYMENT': stats,
'DAYS_DECISION': stats,
'CNT_PAYMENT': stats,
'total_debt': stats,
'AMT_CREDIT-d-total_debt': stats,
'AMT_GOODS_PRICE-d-total_debt': stats,
'AMT_GOODS_PRICE-d-AMT_CREDIT': stats,
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL': stats,
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL': stats,
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL': stats,
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL': stats,
'AMT_ANNUITY-d-app_AMT_CREDIT': stats,
'AMT_APPLICATION-d-app_AMT_CREDIT': stats,
'AMT_CREDIT-d-app_AMT_CREDIT': stats,
'AMT_GOODS_PRICE-d-app_AMT_CREDIT': stats,
'AMT_ANNUITY-d-app_AMT_ANNUITY': stats,
'AMT_APPLICATION-d-app_AMT_ANNUITY': stats,
'AMT_CREDIT-d-app_AMT_ANNUITY': stats,
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY': stats,
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE': stats,
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE': stats,
'AMT_CREDIT-d-app_AMT_GOODS_PRICE': stats,
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE': stats,
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL': stats,
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL': stats,
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL': stats,
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL': stats,
'AMT_ANNUITY-m-app_AMT_CREDIT': stats,
'AMT_APPLICATION-m-app_AMT_CREDIT': stats,
'AMT_CREDIT-m-app_AMT_CREDIT': stats,
'AMT_GOODS_PRICE-m-app_AMT_CREDIT': stats,
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL': stats,
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL': stats,
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL': stats,
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL': stats,
'AMT_ANNUITY-m-app_AMT_ANNUITY': stats,
'AMT_APPLICATION-m-app_AMT_ANNUITY': stats,
'AMT_CREDIT-m-app_AMT_ANNUITY': stats,
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY': stats,
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL': stats,
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL': stats,
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL': stats,
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL': stats,
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE': stats,
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE': stats,
'AMT_CREDIT-m-app_AMT_GOODS_PRICE': stats,
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE': stats,
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL': stats,
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL': stats,
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL': stats,
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL': stats,
'DAYS_FIRST_DRAWING-m-app_DAYS_BIRTH': stats,
'DAYS_FIRST_DRAWING-m-app_DAYS_EMPLOYED': stats,
'DAYS_FIRST_DRAWING-m-app_DAYS_REGISTRATION': stats,
'DAYS_FIRST_DRAWING-m-app_DAYS_ID_PUBLISH': stats,
'DAYS_FIRST_DRAWING-m-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_FIRST_DUE-m-app_DAYS_BIRTH': stats,
'DAYS_FIRST_DUE-m-app_DAYS_EMPLOYED': stats,
'DAYS_FIRST_DUE-m-app_DAYS_REGISTRATION': stats,
'DAYS_FIRST_DUE-m-app_DAYS_ID_PUBLISH': stats,
'DAYS_FIRST_DUE-m-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_LAST_DUE_1ST_VERSION-m-app_DAYS_BIRTH': stats,
'DAYS_LAST_DUE_1ST_VERSION-m-app_DAYS_EMPLOYED': stats,
'DAYS_LAST_DUE_1ST_VERSION-m-app_DAYS_REGISTRATION': stats,
'DAYS_LAST_DUE_1ST_VERSION-m-app_DAYS_ID_PUBLISH': stats,
'DAYS_LAST_DUE_1ST_VERSION-m-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_LAST_DUE-m-app_DAYS_BIRTH': stats,
'DAYS_LAST_DUE-m-app_DAYS_EMPLOYED': stats,
'DAYS_LAST_DUE-m-app_DAYS_REGISTRATION': stats,
'DAYS_LAST_DUE-m-app_DAYS_ID_PUBLISH': stats,
'DAYS_LAST_DUE-m-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_TERMINATION-m-app_DAYS_BIRTH': stats,
'DAYS_TERMINATION-m-app_DAYS_EMPLOYED': stats,
'DAYS_TERMINATION-m-app_DAYS_REGISTRATION': stats,
'DAYS_TERMINATION-m-app_DAYS_ID_PUBLISH': stats,
'DAYS_TERMINATION-m-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_FIRST_DRAWING-d-app_DAYS_BIRTH': stats,
'DAYS_FIRST_DRAWING-d-app_DAYS_EMPLOYED': stats,
'DAYS_FIRST_DRAWING-d-app_DAYS_REGISTRATION': stats,
'DAYS_FIRST_DRAWING-d-app_DAYS_ID_PUBLISH': stats,
'DAYS_FIRST_DRAWING-d-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_FIRST_DUE-d-app_DAYS_BIRTH': stats,
'DAYS_FIRST_DUE-d-app_DAYS_EMPLOYED': stats,
'DAYS_FIRST_DUE-d-app_DAYS_REGISTRATION': stats,
'DAYS_FIRST_DUE-d-app_DAYS_ID_PUBLISH': stats,
'DAYS_FIRST_DUE-d-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_LAST_DUE_1ST_VERSION-d-app_DAYS_BIRTH': stats,
'DAYS_LAST_DUE_1ST_VERSION-d-app_DAYS_EMPLOYED': stats,
'DAYS_LAST_DUE_1ST_VERSION-d-app_DAYS_REGISTRATION': stats,
'DAYS_LAST_DUE_1ST_VERSION-d-app_DAYS_ID_PUBLISH': stats,
'DAYS_LAST_DUE_1ST_VERSION-d-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_LAST_DUE-d-app_DAYS_BIRTH': stats,
'DAYS_LAST_DUE-d-app_DAYS_EMPLOYED': stats,
'DAYS_LAST_DUE-d-app_DAYS_REGISTRATION': stats,
'DAYS_LAST_DUE-d-app_DAYS_ID_PUBLISH': stats,
'DAYS_LAST_DUE-d-app_DAYS_LAST_PHONE_CHANGE': stats,
'DAYS_TERMINATION-d-app_DAYS_BIRTH': stats,
'DAYS_TERMINATION-d-app_DAYS_EMPLOYED': stats,
'DAYS_TERMINATION-d-app_DAYS_REGISTRATION': stats,
'DAYS_TERMINATION-d-app_DAYS_ID_PUBLISH': stats,
'DAYS_TERMINATION-d-app_DAYS_LAST_PHONE_CHANGE': stats,
'cnt_paid': ['min', 'mean', 'max', 'var', 'sum'],
'cnt_paid_ratio': stats,
'cnt_unpaid': ['min', 'mean', 'max', 'var', 'sum'],
'amt_paid': ['min', 'mean', 'max', 'var', 'sum'],
'amt_unpaid': ['min', 'mean', 'max', 'var', 'sum'],
'active': ['min', 'mean', 'max', 'var', 'sum'],
'completed': ['min', 'mean', 'max', 'var', 'sum'],
# diff & chng
'total_debt_diff': stats,
'total_debt_pctchange': stats,
'AMT_CREDIT-d-total_debt_diff': stats,
'AMT_CREDIT-d-total_debt_pctchange': stats,
'AMT_GOODS_PRICE-d-total_debt_diff': stats,
'AMT_GOODS_PRICE-d-total_debt_pctchange': stats,
'AMT_GOODS_PRICE-d-AMT_CREDIT_diff': stats,
'AMT_GOODS_PRICE-d-AMT_CREDIT_pctchange': stats,
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_ANNUITY-d-app_AMT_CREDIT_diff': stats,
'AMT_ANNUITY-d-app_AMT_CREDIT_pctchange': stats,
'AMT_APPLICATION-d-app_AMT_CREDIT_diff': stats,
'AMT_APPLICATION-d-app_AMT_CREDIT_pctchange': stats,
'AMT_CREDIT-d-app_AMT_CREDIT_diff': stats,
'AMT_CREDIT-d-app_AMT_CREDIT_pctchange': stats,
'AMT_GOODS_PRICE-d-app_AMT_CREDIT_diff': stats,
'AMT_GOODS_PRICE-d-app_AMT_CREDIT_pctchange': stats,
'AMT_ANNUITY-d-app_AMT_ANNUITY_diff': stats,
'AMT_ANNUITY-d-app_AMT_ANNUITY_pctchange': stats,
'AMT_APPLICATION-d-app_AMT_ANNUITY_diff': stats,
'AMT_APPLICATION-d-app_AMT_ANNUITY_pctchange': stats,
'AMT_CREDIT-d-app_AMT_ANNUITY_diff': stats,
'AMT_CREDIT-d-app_AMT_ANNUITY_pctchange': stats,
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY_diff': stats,
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY_pctchange': stats,
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE_diff': stats,
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE_diff': stats,
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_CREDIT-d-app_AMT_GOODS_PRICE_diff': stats,
'AMT_CREDIT-d-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE_diff': stats,
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_CREDIT_diff': stats,
'AMT_ANNUITY-m-app_AMT_CREDIT_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_CREDIT_diff': stats,
'AMT_APPLICATION-m-app_AMT_CREDIT_pctchange': stats,
'AMT_CREDIT-m-app_AMT_CREDIT_diff': stats,
'AMT_CREDIT-m-app_AMT_CREDIT_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_CREDIT_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_CREDIT_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_ANNUITY_diff': stats,
'AMT_ANNUITY-m-app_AMT_ANNUITY_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_ANNUITY_diff': stats,
'AMT_APPLICATION-m-app_AMT_ANNUITY_pctchange': stats,
'AMT_CREDIT-m-app_AMT_ANNUITY_diff': stats,
'AMT_CREDIT-m-app_AMT_ANNUITY_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE_diff': stats,
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE_diff': stats,
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_CREDIT-m-app_AMT_GOODS_PRICE_diff': stats,
'AMT_CREDIT-m-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL_pctchange': stats,
}
# =============================================================================
# POS
# =============================================================================
pos_num_aggregations = {
# TODO: optimize stats
'MONTHS_BALANCE': ['min', 'max', 'mean', 'size'],
'SK_DPD': ['max', 'mean', 'var'],
'SK_DPD_DEF': ['max', 'mean', 'var'],
'CNT_INSTALMENT-m-CNT_INSTALMENT_FUTURE': stats,
'CNT_INSTALMENT_FUTURE-d-CNT_INSTALMENT': stats,
# diff
'SK_DPD-m-SK_DPD_DEF': ['max', 'mean', 'var', 'sum'],
'CNT_INSTALMENT_FUTURE_diff': stats,
'CNT_INSTALMENT_FUTURE_pctchange': stats,
'SK_DPD_diff': stats,
'SK_DPD_pctchange': stats,
'SK_DPD_DEF_diff': stats,
'SK_DPD_DEF_pctchange': stats,
# 'SK_DPD_diff_over0': ['max', 'mean', 'var', 'sum'],
# 'SK_DPD_diff_over5': ['max', 'mean', 'var', 'sum'],
# 'SK_DPD_diff_over10': ['max', 'mean', 'var', 'sum'],
# 'SK_DPD_diff_over15': ['max', 'mean', 'var', 'sum'],
# 'SK_DPD_diff_over20': ['max', 'mean', 'var', 'sum'],
# 'SK_DPD_diff_over25': ['max', 'mean', 'var', 'sum'],
}
# =============================================================================
# ins
# =============================================================================
ins_num_aggregations = {
# TODO: optimize stats
'NUM_INSTALMENT_VERSION': ['nunique'],
# app
'DAYS_ENTRY_PAYMENT-m-app_DAYS_BIRTH': stats,
'DAYS_ENTRY_PAYMENT-m-app_DAYS_EMPLOYED': stats,
'DAYS_ENTRY_PAYMENT-m-app_DAYS_REGISTRATION': stats,
'DAYS_ENTRY_PAYMENT-m-app_DAYS_ID_PUBLISH': stats,
'DAYS_ENTRY_PAYMENT-m-app_DAYS_LAST_PHONE_CHANGE': stats,
'AMT_PAYMENT-d-app_AMT_INCOME_TOTAL': stats,
'AMT_PAYMENT-d-app_AMT_CREDIT': stats,
'AMT_PAYMENT-d-app_AMT_ANNUITY': stats,
'AMT_PAYMENT-d-app_AMT_GOODS_PRICE': stats,
# prev
'NUM_INSTALMENT_ratio': stats,
'AMT_PAYMENT-d-AMT_ANNUITY': stats,
'DPD': ['max', 'mean', 'sum', 'nunique'],
'DBD': ['max', 'mean', 'sum', 'nunique'],
'AMT_INSTALMENT': ['min', 'max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum'],
'amt_ratio': stats,
'amt_delta': stats,
'days_weighted_delay': ['min', 'max', 'mean', 'sum'],
# 'delayed_day_over0': stats_sum,
# 'delayed_money_0': stats_sum,
# 'delayed_money_ratio_0': stats_sum,
# 'not-delayed_day_0': stats_sum,
# 'not-delayed_money_0': stats_sum,
# 'not-delayed_money_ratio_0': stats_sum,
# 'delayed_day_over5': stats_sum,
# 'delayed_money_5': stats_sum,
# 'delayed_money_ratio_5': stats_sum,
# 'not-delayed_day_5': stats_sum,
# 'not-delayed_money_5': stats_sum,
# 'not-delayed_money_ratio_5': stats_sum,
# 'delayed_day_over10': stats_sum,
# 'delayed_money_10': stats_sum,
# 'delayed_money_ratio_10': stats_sum,
# 'not-delayed_day_10': stats_sum,
# 'not-delayed_money_10': stats_sum,
# 'not-delayed_money_ratio_10': stats_sum,
# 'delayed_day_over15': stats_sum,
# 'delayed_money_15': stats_sum,
# 'delayed_money_ratio_15': stats_sum,
# 'not-delayed_day_15': stats_sum,
# 'not-delayed_money_15': stats_sum,
# 'not-delayed_money_ratio_15': stats_sum,
# 'delayed_day_over20': stats_sum,
# 'delayed_money_20': stats_sum,
# 'delayed_money_ratio_20': stats_sum,
# 'not-delayed_day_20': stats_sum,
# 'not-delayed_money_20': stats_sum,
# 'not-delayed_money_ratio_20': stats_sum,
# 'delayed_day_over25': stats_sum,
# 'delayed_money_25': stats_sum,
# 'delayed_money_ratio_25': stats_sum,
# 'not-delayed_day_25': stats_sum,
# 'not-delayed_money_25': stats_sum,
# 'not-delayed_money_ratio_25': stats_sum,
# 'delayed_day_over30': stats_sum,
# 'delayed_money_30': stats_sum,
# 'delayed_money_ratio_30': stats_sum,
# 'not-delayed_day_30': stats_sum,
# 'not-delayed_money_30': stats_sum,
# 'not-delayed_money_ratio_30': stats_sum,
# 'delayed_day_over35': stats_sum,
# 'delayed_money_35': stats_sum,
# 'delayed_money_ratio_35': stats_sum,
# 'not-delayed_day_35': stats_sum,
# 'not-delayed_money_35': stats_sum,
# 'not-delayed_money_ratio_35': stats_sum,
# 'delayed_day_over40': stats_sum,
# 'delayed_money_40': stats_sum,
# 'delayed_money_ratio_40': stats_sum,
# 'not-delayed_day_40': stats_sum,
# 'not-delayed_money_40': stats_sum,
# 'not-delayed_money_ratio_40': stats_sum,
# 'delayed_day_over45': stats_sum,
# 'delayed_money_45': stats_sum,
# 'delayed_money_ratio_45': stats_sum,
# 'not-delayed_day_45': stats_sum,
# 'not-delayed_money_45': stats_sum,
# 'not-delayed_money_ratio_45': stats_sum,
}
# =============================================================================
# cre
# =============================================================================
cre_num_aggregations = {
# # TODO: optimize stats
'AMT_BALANCE': stats_sum,
'AMT_CREDIT_LIMIT_ACTUAL': stats_sum,
'AMT_DRAWINGS_ATM_CURRENT': stats_sum,
'AMT_DRAWINGS_CURRENT': stats_sum,
'AMT_DRAWINGS_OTHER_CURRENT': stats_sum,
'AMT_DRAWINGS_POS_CURRENT': stats_sum,
'AMT_INST_MIN_REGULARITY': stats_sum,
'AMT_PAYMENT_CURRENT': stats_sum,
'AMT_PAYMENT_TOTAL_CURRENT': stats_sum,
'AMT_RECEIVABLE_PRINCIPAL': stats_sum,
'AMT_RECIVABLE': stats_sum,
'AMT_TOTAL_RECEIVABLE': stats_sum,
'CNT_DRAWINGS_ATM_CURRENT': stats_sum,
'CNT_DRAWINGS_CURRENT': stats_sum,
'CNT_DRAWINGS_OTHER_CURRENT': stats_sum,
'CNT_DRAWINGS_POS_CURRENT': stats_sum,
'CNT_INSTALMENT_MATURE_CUM': stats_sum,
'SK_DPD': stats_sum,
'SK_DPD_DEF': stats_sum,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL': stats_sum,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL': stats_sum,
'AMT_BALANCE-d-app_AMT_CREDIT': stats_sum,
'AMT_BALANCE-d-app_AMT_ANNUITY': stats_sum,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE': stats_sum,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT': stats_sum,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL': stats_sum,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL': stats_sum,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT': stats_sum,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY': stats_sum,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE': stats_sum,
'SK_DPD-m-SK_DPD_DEF': stats_sum,
'SK_DPD-m-SK_DPD_DEF_over0': stats_sum,
'SK_DPD-m-SK_DPD_DEF_over5': stats_sum,
'SK_DPD-m-SK_DPD_DEF_over10': stats_sum,
'SK_DPD-m-SK_DPD_DEF_over15': stats_sum,
'SK_DPD-m-SK_DPD_DEF_over20': stats_sum,
'SK_DPD-m-SK_DPD_DEF_over25': stats_sum,
# diff
'AMT_BALANCE_diff': stats,
'AMT_BALANCE_pctchange': stats,
'AMT_CREDIT_LIMIT_ACTUAL_diff': stats,
'AMT_CREDIT_LIMIT_ACTUAL_pctchange': stats,
'AMT_DRAWINGS_ATM_CURRENT_diff': stats,
'AMT_DRAWINGS_ATM_CURRENT_pctchange': stats,
'AMT_DRAWINGS_CURRENT_diff': stats,
'AMT_DRAWINGS_CURRENT_pctchange': stats,
'AMT_DRAWINGS_OTHER_CURRENT_diff': stats,
'AMT_DRAWINGS_OTHER_CURRENT_pctchange': stats,
'AMT_DRAWINGS_POS_CURRENT_diff': stats,
'AMT_DRAWINGS_POS_CURRENT_pctchange': stats,
'AMT_INST_MIN_REGULARITY_diff': stats,
'AMT_INST_MIN_REGULARITY_pctchange': stats,
'AMT_PAYMENT_CURRENT_diff': stats,
'AMT_PAYMENT_CURRENT_pctchange': stats,
'AMT_PAYMENT_TOTAL_CURRENT_diff': stats,
'AMT_PAYMENT_TOTAL_CURRENT_pctchange': stats,
'AMT_RECEIVABLE_PRINCIPAL_diff': stats,
'AMT_RECEIVABLE_PRINCIPAL_pctchange': stats,
'AMT_RECIVABLE_diff': stats,
'AMT_RECIVABLE_pctchange': stats,
'AMT_TOTAL_RECEIVABLE_diff': stats,
'AMT_TOTAL_RECEIVABLE_pctchange': stats,
'CNT_DRAWINGS_ATM_CURRENT_diff': stats,
'CNT_DRAWINGS_ATM_CURRENT_pctchange': stats,
'CNT_DRAWINGS_CURRENT_diff': stats,
'CNT_DRAWINGS_CURRENT_pctchange': stats,
'CNT_DRAWINGS_OTHER_CURRENT_diff': stats,
'CNT_DRAWINGS_OTHER_CURRENT_pctchange': stats,
'CNT_DRAWINGS_POS_CURRENT_diff': stats,
'CNT_DRAWINGS_POS_CURRENT_pctchange': stats,
'CNT_INSTALMENT_MATURE_CUM_diff': stats,
'CNT_INSTALMENT_MATURE_CUM_pctchange': stats,
'SK_DPD_diff': stats,
'SK_DPD_pctchange': stats,
'SK_DPD_DEF_diff': stats,
'SK_DPD_DEF_pctchange': stats,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_BALANCE-d-app_AMT_CREDIT_diff': stats,
'AMT_BALANCE-d-app_AMT_CREDIT_pctchange': stats,
'AMT_BALANCE-d-app_AMT_ANNUITY_diff': stats,
'AMT_BALANCE-d-app_AMT_ANNUITY_pctchange': stats,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE_diff': stats,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE_pctchange': stats,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL_diff': stats,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL_pctchange': stats,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT_diff': stats,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL_diff': stats,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL_pctchange': stats,
# diff diff
'AMT_BALANCE_diff_diff': stats,
'AMT_BALANCE_diff_pctchange': stats,
'AMT_BALANCE_pctchange_diff': stats,
'AMT_BALANCE_pctchange_pctchange': stats,
'AMT_CREDIT_LIMIT_ACTUAL_diff_diff': stats,
'AMT_CREDIT_LIMIT_ACTUAL_diff_pctchange': stats,
'AMT_CREDIT_LIMIT_ACTUAL_pctchange_diff': stats,
'AMT_CREDIT_LIMIT_ACTUAL_pctchange_pctchange': stats,
'AMT_DRAWINGS_ATM_CURRENT_diff_diff': stats,
'AMT_DRAWINGS_ATM_CURRENT_diff_pctchange': stats,
'AMT_DRAWINGS_ATM_CURRENT_pctchange_diff': stats,
'AMT_DRAWINGS_ATM_CURRENT_pctchange_pctchange': stats,
'AMT_DRAWINGS_CURRENT_diff_diff': stats,
'AMT_DRAWINGS_CURRENT_diff_pctchange': stats,
'AMT_DRAWINGS_CURRENT_pctchange_diff': stats,
'AMT_DRAWINGS_CURRENT_pctchange_pctchange': stats,
'AMT_DRAWINGS_OTHER_CURRENT_diff_diff': stats,
'AMT_DRAWINGS_OTHER_CURRENT_diff_pctchange': stats,
'AMT_DRAWINGS_OTHER_CURRENT_pctchange_diff': stats,
'AMT_DRAWINGS_OTHER_CURRENT_pctchange_pctchange': stats,
'AMT_DRAWINGS_POS_CURRENT_diff_diff': stats,
'AMT_DRAWINGS_POS_CURRENT_diff_pctchange': stats,
'AMT_DRAWINGS_POS_CURRENT_pctchange_diff': stats,
'AMT_DRAWINGS_POS_CURRENT_pctchange_pctchange': stats,
'AMT_INST_MIN_REGULARITY_diff_diff': stats,
'AMT_INST_MIN_REGULARITY_diff_pctchange': stats,
'AMT_INST_MIN_REGULARITY_pctchange_diff': stats,
'AMT_INST_MIN_REGULARITY_pctchange_pctchange': stats,
'AMT_PAYMENT_CURRENT_diff_diff': stats,
'AMT_PAYMENT_CURRENT_diff_pctchange': stats,
'AMT_PAYMENT_CURRENT_pctchange_diff': stats,
'AMT_PAYMENT_CURRENT_pctchange_pctchange': stats,
'AMT_PAYMENT_TOTAL_CURRENT_diff_diff': stats,
'AMT_PAYMENT_TOTAL_CURRENT_diff_pctchange': stats,
'AMT_PAYMENT_TOTAL_CURRENT_pctchange_diff': stats,
'AMT_PAYMENT_TOTAL_CURRENT_pctchange_pctchange': stats,
'AMT_RECEIVABLE_PRINCIPAL_diff_diff': stats,
'AMT_RECEIVABLE_PRINCIPAL_diff_pctchange': stats,
'AMT_RECEIVABLE_PRINCIPAL_pctchange_diff': stats,
'AMT_RECEIVABLE_PRINCIPAL_pctchange_pctchange': stats,
'AMT_RECIVABLE_diff_diff': stats,
'AMT_RECIVABLE_diff_pctchange': stats,
'AMT_RECIVABLE_pctchange_diff': stats,
'AMT_RECIVABLE_pctchange_pctchange': stats,
'AMT_TOTAL_RECEIVABLE_diff_diff': stats,
'AMT_TOTAL_RECEIVABLE_diff_pctchange': stats,
'AMT_TOTAL_RECEIVABLE_pctchange_diff': stats,
'AMT_TOTAL_RECEIVABLE_pctchange_pctchange': stats,
'CNT_DRAWINGS_ATM_CURRENT_diff_diff': stats,
'CNT_DRAWINGS_ATM_CURRENT_diff_pctchange': stats,
'CNT_DRAWINGS_ATM_CURRENT_pctchange_diff': stats,
'CNT_DRAWINGS_ATM_CURRENT_pctchange_pctchange': stats,
'CNT_DRAWINGS_CURRENT_diff_diff': stats,
'CNT_DRAWINGS_CURRENT_diff_pctchange': stats,
'CNT_DRAWINGS_CURRENT_pctchange_diff': stats,
'CNT_DRAWINGS_CURRENT_pctchange_pctchange': stats,
'CNT_DRAWINGS_OTHER_CURRENT_diff_diff': stats,
'CNT_DRAWINGS_OTHER_CURRENT_diff_pctchange': stats,
'CNT_DRAWINGS_OTHER_CURRENT_pctchange_diff': stats,
'CNT_DRAWINGS_OTHER_CURRENT_pctchange_pctchange': stats,
'CNT_DRAWINGS_POS_CURRENT_diff_diff': stats,
'CNT_DRAWINGS_POS_CURRENT_diff_pctchange': stats,
'CNT_DRAWINGS_POS_CURRENT_pctchange_diff': stats,
'CNT_DRAWINGS_POS_CURRENT_pctchange_pctchange': stats,
'CNT_INSTALMENT_MATURE_CUM_diff_diff': stats,
'CNT_INSTALMENT_MATURE_CUM_diff_pctchange': stats,
'CNT_INSTALMENT_MATURE_CUM_pctchange_diff': stats,
'CNT_INSTALMENT_MATURE_CUM_pctchange_pctchange': stats,
'SK_DPD_diff_diff': stats,
'SK_DPD_diff_pctchange': stats,
'SK_DPD_pctchange_diff': stats,
'SK_DPD_pctchange_pctchange': stats,
'SK_DPD_DEF_diff_diff': stats,
'SK_DPD_DEF_diff_pctchange': stats,
'SK_DPD_DEF_pctchange_diff': stats,
'SK_DPD_DEF_pctchange_pctchange': stats,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL_diff_diff': stats,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL_diff_pctchange': stats,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL_pctchange_diff': stats,
'AMT_BALANCE-d-app_AMT_INCOME_TOTAL_pctchange_pctchange': stats,
'AMT_BALANCE-d-app_AMT_CREDIT_diff_diff': stats,
'AMT_BALANCE-d-app_AMT_CREDIT_diff_pctchange': stats,
'AMT_BALANCE-d-app_AMT_CREDIT_pctchange_diff': stats,
'AMT_BALANCE-d-app_AMT_CREDIT_pctchange_pctchange': stats,
'AMT_BALANCE-d-app_AMT_ANNUITY_diff_diff': stats,
'AMT_BALANCE-d-app_AMT_ANNUITY_diff_pctchange': stats,
'AMT_BALANCE-d-app_AMT_ANNUITY_pctchange_diff': stats,
'AMT_BALANCE-d-app_AMT_ANNUITY_pctchange_pctchange': stats,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE_diff_diff': stats,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE_diff_pctchange': stats,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE_pctchange_diff': stats,
'AMT_BALANCE-d-app_AMT_GOODS_PRICE_pctchange_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL_diff_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL_diff_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL_pctchange_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_INCOME_TOTAL_pctchange_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT_diff_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT_diff_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT_pctchange_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_CREDIT_pctchange_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY_diff_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY_diff_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY_pctchange_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_ANNUITY_pctchange_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE_diff_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE_diff_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE_pctchange_diff': stats,
'AMT_DRAWINGS_CURRENT-d-app_AMT_GOODS_PRICE_pctchange_pctchange': stats,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL_diff_diff': stats,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL_diff_pctchange': stats,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL_pctchange_diff': stats,
'AMT_BALANCE-d-AMT_CREDIT_LIMIT_ACTUAL_pctchange_pctchange': stats,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT_diff_diff': stats,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT_diff_pctchange': stats,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT_pctchange_diff': stats,
'AMT_BALANCE-d-AMT_DRAWINGS_CURRENT_pctchange_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL_diff_diff': stats,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL_diff_pctchange': stats,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL_pctchange_diff': stats,
'AMT_DRAWINGS_CURRENT-d-AMT_CREDIT_LIMIT_ACTUAL_pctchange_pctchange': stats,
}
# =============================================================================
#
# =============================================================================
bure_num_aggregations = {
# TODO: optimize stats
'DAYS_CREDIT': stats_sum,
'CREDIT_DAY_OVERDUE': stats_sum,
'DAYS_CREDIT_ENDDATE': stats_sum,
'DAYS_ENDDATE_FACT': stats_sum,
'AMT_CREDIT_MAX_OVERDUE': stats_sum,
'CNT_CREDIT_PROLONG': stats_sum,
'AMT_CREDIT_SUM': stats_sum,
'AMT_CREDIT_SUM_DEBT': stats_sum,
'AMT_CREDIT_SUM_LIMIT': stats_sum,
'AMT_CREDIT_SUM_OVERDUE': stats_sum,
'DAYS_CREDIT_UPDATE': stats_sum,
'AMT_ANNUITY': stats_sum,
# app
'AMT_CREDIT_SUM-d-app_AMT_INCOME_TOTAL': stats_sum,
'AMT_CREDIT_SUM-d-app_AMT_CREDIT': stats_sum,
'AMT_CREDIT_SUM-d-app_AMT_ANNUITY': stats_sum,
'AMT_CREDIT_SUM-d-app_AMT_GOODS_PRICE': stats_sum,
'AMT_CREDIT_SUM_DEBT-d-app_AMT_INCOME_TOTAL': stats_sum,
'AMT_CREDIT_SUM_DEBT-d-app_AMT_CREDIT': stats_sum,
'AMT_CREDIT_SUM_DEBT-d-app_AMT_ANNUITY': stats_sum,
'AMT_CREDIT_SUM_DEBT-d-app_AMT_GOODS_PRICE': stats_sum,
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_INCOME_TOTAL': stats_sum,
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_CREDIT': stats_sum,
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_ANNUITY': stats_sum,
'AMT_CREDIT_SUM_LIMIT-d-app_AMT_GOODS_PRICE': stats_sum,
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 25 01:24:10 2021
@author: <NAME>
This module contains all of the functions used to model drunks leaving a pub
and moving around a town. It is used by the drunk_model and
measure_drunks_moves modules.
"""
import csv
import matplotlib.animation
import matplotlib.pyplot
import operator
import drunksframework
import random
def catch_input(default_value, desired_type, input_message = "Input: ",
failure_message = "Invalid input.",
default_message = "Default value used.", num_attempts = 3):
"""
Function to better catch type errors in user input. If the input can be
parsed using desired_type, then this value is returned. Otherwise, the
user will be asked again a number of times (up to num_attempts) and if the
input still results as an error, then the default_value is returned.
Parameters
----------
default_value : str
Value returned if all inputs fail. Must be able to be parsed by
desired_type.
desired_type : type
Desired type of the input (e.g. str, int).
input_message : str, optional
Prompt to user for input. The default is "Input: ".
failure_message : str, optional
Message to print when input fails. The default is "Invalid input.".
default_message : str, optional
Message to print when default_value used. The default is "Default
value used.".
num_attempts : int, optional
Number of times to attempt to prompt for input. The default is 3.
Returns
-------
type as specified by desired_type
Value of input if successful, or default_value otherwise.
"""
attempt = 0
while attempt < num_attempts:
try:
return desired_type(input(input_message))
break
except:
print(failure_message)
attempt += 1
continue
else:
print(default_message)
return desired_type(default_value)
def import_town(data_file):
"""
Reads town raster data from a CSV file.
Parameters
----------
data_file : str
Name of CSV raster data file to use for the town.
Returns
-------
town : list
List (cols) of lists (rows) representing raster data of the town.
"""
# Read in town data and format it as a list of lists
with open(data_file, newline = "") as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
town = []
for row in reader:
rowlist = []
for value in row:
rowlist.append(value)
town.append(rowlist)
return town
def get_building_coords(town):
"""
Generates a dictionary of all (x,y) co-ordinates that are within buildings
in the town, where the keys are the buildings' numbers (or "pub" for the
pub) and the values are lists of co-ordinates associated with the building.
Data must have 25 houses (numbered as multiples of 10 from 10 to 250) and
1 pub.
Parameters
----------
town : list
List (cols) of lists (rows) representing raster data of the town.
Returns
-------
building_coords : dict
Keys are the buildings' numbers (or "pub" for the pub) and the values
are lists of all co-ordinates that are within the building.
"""
#Create empty dictionary to collect building co-ordinates
building_coords = {}
# Create list of co-ordinates for each building in the town
# Dictionary key is either "pub" or building number and value is list of
# coords
for n in [1, *range(10, 260, 10)]:
if n == 1:
building_name = "pub"
else:
building_name = n
building_coords[building_name] = []
for y in range(len(town)):
for x in range(len(town[y])):
if town[y][x] == n:
building_coords[building_name].append((x, y))
return building_coords
"""
# Make pub clearer for plotting
for i in range(len(town)):
for j in range(len(town[i])):
if town[i][j] == 1:
town[i][j] = -50
"""
def get_pub_front_door(building_coords):
"""
Gets the co-ordinates just outside the front door (south-west corner) of
the pub.
Parameters
----------
building_coords : dict
Keys are the buildings' numbers (or "pub" for the pub) and the values
are lists of all co-ordinates that are within the building.
Returns
-------
front_door_coords : tuple
A length 2 tuple giving the (x,y) co-ordinates of the pub's front door.
"""
# Set front door coords to be outside bottom left corner of pub
front_door_y = min(building_coords["pub"],
key = operator.itemgetter(1))[1] - 1
front_door_x = min(building_coords["pub"],
key = operator.itemgetter(0))[0] - 1
front_door_coords = (front_door_x, front_door_y)
return front_door_coords
def get_pub_back_door(building_coords):
"""
Gets the co-ordinates just outside the back door (north-east corner) of
the pub.
Parameters
----------
building_coords : dict
Keys are the buildings' numbers (or "pub" for the pub) and the values
are lists of all co-ordinates that are within the building.
Returns
-------
back_door_coords : tuple
A length 2 tuple giving the (x,y) co-ordinates of the pub's back door.
"""
# Set back door coords to be outside top right corner of pub
back_door_y = max(building_coords["pub"],
key = operator.itemgetter(1))[1] + 1
back_door_x = max(building_coords["pub"],
key = operator.itemgetter(0))[0] + 1
back_door_coords = (back_door_x, back_door_y)
return back_door_coords
def create_drunks(town, building_coords, front_door_coords, back_door_coords,
drunk_level_lower, drunk_level_higher):
"""
Creates a list of 25 drunks using the drunk class framework, one for each
house in the town data.
Parameters
----------
town : list
List (cols) of lists (rows) representing raster data of the town.
building_coords : dict
Keys are the buildings' numbers (or "pub" for the pub) and the values
are lists of all co-ordinates that are within the building.
front_door_coords : tuple
A length 2 tuple giving the (x,y) co-ordinates of the pub's front door.
back_door_coords : tuple
A length 2 tuple giving the (x,y) co-ordinates of the pub's back door.
drunk_level_lower : int
Lower limit for drunks' drunk level - will be chosen randomly between
lower and higher level for each drunk.
drunk_level_higher : int
Upper limit for drunks' drunk level - will be chosen randomly between
lower and higher level for each drunk.
Returns
-------
drunks : list
List of 25 drunks with ids that are multiples of 10 from 10 to 250.
"""
drunks = []
# Create drunks - start at front or back door of pub at random
for id in range(10, 260, 10):
pub_door_coords = random.choice([front_door_coords, back_door_coords])
drunks.append(
drunksframework.Drunk(id = id,
#x = building_coords[20][0][0],
#y = building_coords[20][0][1],
x = pub_door_coords[0],
y = pub_door_coords[1],
town = town,
building_coords = building_coords,
drunk_level = random.randint(
drunk_level_lower,drunk_level_higher
)
)
)
return drunks
def update(frame_number, drunks, fig, town,
drunk_level_lower,
drunk_level_higher,
cmap = matplotlib.pyplot.cm.get_cmap('RdYlBu')):
"""
Uses the drunks' move and sober_up methods once per drunk in the drunks
list and then plots the new state of the drunks and town.
This function is used when creating an animation of the drunks' movement
around the town.
Parameters
----------
frame_number : int
Iteration of the model.
drunks : list
List of 25 instances of drunk class, with ids that are multiples of 10
between 10 and 250.
fig : matplotlib.figure.Figure
Figure used for plotting.
town : list
List of lists representing raster data of the town.
drunk_level_lower : int
Lower limit of starting drunk_level for all drunks, used to scale
colors for plotting.
drunk_level_higher : int
Upper limit of starting drunk_level for all drunks, used to scale
colors for plotting.
cmap : matplotlib.colors.LinearSegmentedColormap, optional
Color map for plotting the color of drunks according to their
drunk_level. The default is matplotlib.pyplot.cm.get_cmap('RdYlBu').
Returns
-------
None.
"""
# global carry_on
fig.clear()
# Move drunks
for drunk in drunks:
#print(drunks[j].x)
drunk.move()
drunk.sober_up()
#if (drunk.x, drunk.y) in building_coords["pub"]:
# print(drunk.id)
# break
#print(drunks[5].x)
#print(drunks[5].y)
# Plot town without ticks on axes
matplotlib.pyplot.imshow(town)
matplotlib.pyplot.xlim(0, len(town[0]))
matplotlib.pyplot.ylim(0, len(town))
matplotlib.pyplot.tick_params(left = False, right = False ,
labelleft = False, labelbottom = False,
bottom = False)
# Plot drunks
for drunk in drunks:
matplotlib.pyplot.scatter(drunk.x, drunk.y,
c = float(drunk.drunk_level),
cmap = cmap,
vmin = drunk_level_lower,
vmax = drunk_level_higher)
"""
# Print how long it took to get all drunks home
if all([drunk.is_home for drunk in drunks]):
carry_on = False
print("All drunks home in " + str(frame_number) + " moves.")
"""
# Define gen function that stops if all drunks home or if num_of_moves met
def gen_function(num_of_moves, drunks, town):
"""
Generator function used in creating animation of drunks | |
RR, LL, RL or LR!"
stokes_num = [self.stokes_dict_inv[stokes_] for stokes_ in stokes]
return to_boolean_array(stokes_num, self.nstokes)
def _get_uvdata_slice(self, baselines=None, start_time=None, stop_time=None,
bands=None, stokes=None):
"""
Return tuple of index arrays that represent portion of ``UVData.uvdata``
array with given values of baselines, times, bands, stokes.
"""
if baselines is None:
baselines = self.baselines
indxs = self._get_baselines_indexes(baselines)
if start_time is not None or stop_time is not None:
indxs = np.logical_and(indxs, self._get_times_indexes(start_time,
stop_time))
if bands is None:
bands_indxs = self._conver_bands_to_indexes(xrange(self.nif))
else:
bands_indxs = self._conver_bands_to_indexes(bands)
if stokes is None:
stokes = self.stokes
stokes_indxs = self._convert_stokes_to_indexes(stokes)
return np.ix_(indxs, bands_indxs, stokes_indxs)
def _convert_uvdata_slice_to_bool(self, sl):
"""
Convert indexing tuple to boolean array of ``UVData.uvdata`` shape.
:param sl:
Tuple of indexing arrays. Output of ``self._get_uvdata_slice``.
:return:
Boolean numpy array with shape of ``UVData.uvdata``.
"""
boolean = np.zeros(self.uvdata.shape, dtype=bool)
boolean[sl] = True
return boolean
# FIXME: Choose only one stokes parameter
def _choose_uvdata(self, start_time=None, stop_time=None, baselines=None,
bands=None, stokes=None, freq_average=False):
"""
Method that returns chosen data from ``_data`` numpy structured array
based on user specified parameters.
:param start_time: (optional)
Instance of ``astropy.time.Time`` class. (default: ``None``)
:param stop_time: (optional)
Instance of ``astropy.time.Time`` class. (default: ``None``)
:param baselines: (optional)
One or iterable of baselines numbers or ``None``. If ``None`` then
use all baselines. (default: ``None``)
:param bands: (optional)
Iterable of IF numbers (0 to #IF-1) or ``None``. If ``None`` then
use all IFs. (default: ``None``)
:param stokes: (optional)
Any string of: ``I``, ``Q``, ``U``, ``V``, ``RR``, ``LL``, ``RL``,
``LR`` or ``None``. If ``None`` then use all available correlations.
If ``I``, ``Q``, ``U``, ``V`` then must be iterable with only one
item (any single Stokes parameter). (default: ``None``)
:return:
Numpy.ndarray that is part of (copy) ``UVData.uvdata`` array with
shape (#N, #IF, #STOKES).
"""
# Copy with shape (#N, #IF, #STOKES)
uvdata = self.uvdata_weight_masked
if start_time is None:
start_time = self.times[0]
if stop_time is None:
stop_time = self.times[-1]
# FIXME: Choose only one stokes parameter
if stokes is None:
stokes = self.stokes
if stokes and (stokes[0] in self.stokes):
sl = self._get_uvdata_slice(baselines, start_time, stop_time, bands,
stokes)
result = uvdata[sl]
elif check_issubset(stokes, ('I', 'Q', 'U', 'V')):
assert len(stokes) == 1, "Only one Stokes parameter allowed!"
if stokes in ('I', 'V'):
sl_rr = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['RR'])
sl_ll = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['LL'])
if stokes == 'I':
# I = 0.5 * (RR + LL)
result = 0.5 * (uvdata[sl_rr] + uvdata[sl_ll])
else:
# V = 0.5 * (RR - LL)
result = 0.5 * (uvdata[sl_rr] - uvdata[sl_ll])
if stokes in ('Q', 'U'):
sl_rl = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['RL'])
sl_lr = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['LR'])
if stokes == 'Q':
# V = 0.5 * (LR + RL)
result = 0.5 * (uvdata[sl_lr] + uvdata[sl_rl])
else:
# V = 0.5 * 1j * (LR - RL)
result = 0.5 * 1j * (uvdata[sl_lr] - uvdata[sl_rl])
else:
raise Exception("Stokes must be iterable consisting of following "
"items only: I, Q, U, V, RR, LL, RL, LR!")
if freq_average:
result = np.ma.mean(result, axis=1).squeeze()
return result
def noise_v(self, average_bands=False):
"""
Calculate noise for each baseline using Stokes ``V`` data.
:param average_bands: (optional)
Boolean - average bands after noise calculation?
:return:
Dictionary with keys - baseline numbers & values - numpy arrays with
shape (#bands, #stokes) or (#stokes,) if ``average_bands=True``.
"""
if self._noise_v is None:
baseline_noises = dict()
for baseline in self.baselines:
uvdata = self._choose_uvdata(baselines=[baseline])
v = uvdata[..., 0] - uvdata[..., 1]
mask = np.logical_or(np.isnan(v), v.mask)
# #groups, #bands
data = np.ma.array(v, mask=mask)
mstd = list()
for band_data in data.T:
mstd.append(0.5 * (biweight_midvariance(band_data.real) +
biweight_midvariance(band_data.imag)))
baseline_noises[baseline] =\
np.array(mstd).repeat(self.nstokes).reshape((self.nif,
self.nstokes))
self._noise_v = baseline_noises.copy()
if average_bands:
return {baseline: np.nanmean(mstd, axis=0) for baseline, mstd in
self._noise_v.items()}
return self._noise_v
def noise_diffs(self, average_bands=False):
"""
Calculate noise for each baseline using successive differences approach
(Brigg's dissertation).
:param average_bands: (optional)
Boolean - average bands after noise calculation?
:return:
Dictionary with keys - baseline numbers & values - numpy arrays with
shape (#bands, #stokes) or (#stokes,) if ``average_bands=True``.
"""
if self._noise_diffs is None:
baseline_noises = dict()
for baseline in self.baselines:
uvdata = self._choose_uvdata(baselines=[baseline])
diffs = uvdata[:-1, ...] - uvdata[1:, ...]
mask = np.logical_or(np.isnan(diffs), diffs.mask)
# #groups, #bands
data = np.ma.array(diffs, mask=mask)
mstd = np.zeros((self.nif, self.nstokes))
for if_ in xrange(self.nif):
for stoke in xrange(self.nstokes):
data_ = data[:, if_, stoke]
# mstd[if_, stoke] += biweight_midvariance(data_.real)
# mstd[if_, stoke] += biweight_midvariance(data_.imag)
mstd[if_, stoke] += np.std(data_.real)
mstd[if_, stoke] += np.std(data_.imag)
mstd[if_, stoke] *= 0.5
baseline_noises[baseline] = mstd
self._noise_diffs = baseline_noises.copy()
if average_bands:
return {baseline: np.nanmean(mstd, axis=0) for baseline, mstd in
self._noise_diffs.items()}
return self._noise_diffs
def noise(self, split_scans=False, use_V=True, average_freq=False):
"""
Calculate noise for each baseline. If ``split_scans`` is True then
calculate noise for each scan too. If ``use_V`` is True then use stokes
V data (`RR`` - ``LL``) for computation assuming no signal in V. Else
use successive differences approach (Brigg's dissertation).
:param split_scans: (optional)
Should we calculate noise for each scan? (default: ``False``)
:param use_V: (optional)
Use stokes V data (``RR`` - ``LL``) to calculate noise assuming no
signal in stokes V? If ``False`` then use successive differences
approach (see Brigg's dissertation). (default: ``True``)
:param average_freq: (optional)
Use IF-averaged data for calculating noise? (default: ``False``)
:return:
Dictionary with keys - baseline numbers & values - arrays of shape
([#scans], [#IF], [#stokes]). It means (#scans, #IF) if
``split_scans=True`` & ``use_V=True``, (#IF, #stokes) if
``split_scans=False`` & ``use_V=False``, (#scans, #IF, #stokes) if
``split_scans=True``, ``use_V=False`` & ``average_freq=False`` etc.
"""
baselines_noises = dict()
if use_V:
# Calculate dictionary {baseline: noise} (if split_scans is False)
# or {baseline: [noises]} if split_scans is True.
if not split_scans:
for baseline in self.baselines:
baseline_uvdata = self._choose_uvdata(baselines=[baseline])
if average_freq:
baseline_uvdata = np.mean(baseline_uvdata, axis=1)
v = (baseline_uvdata[..., 0] - baseline_uvdata[..., 1]).real
mask = ~np.isnan(v)
baselines_noises[baseline] =\
np.asarray(mad_std(np.ma.array(v, mask=np.invert(mask)).data,
axis=0))
# np.asarray(np.std(np.ma.array(v, mask=np.invert(mask)).data,
# axis=0))
else:
# Use each scan
for baseline in self.baselines:
baseline_noise = list()
try:
for scan_bl_indxs in self.scans_bl[baseline]:
# (#obs in scan, #nif, #nstokes,)
scan_baseline_uvdata = self.uvdata[scan_bl_indxs]
if average_freq:
# (#obs in scan, #nstokes,)
scan_baseline_uvdata = np.mean(scan_baseline_uvdata,
axis=1)
v = (scan_baseline_uvdata[..., 0] -
scan_baseline_uvdata[..., 1]).real
mask = ~np.isnan(v)
scan_noise = np.asarray(np.std(np.ma.array(v,
mask=np.invert(mask)).data,
axis=0))
baseline_noise.append(scan_noise)
baselines_noises[baseline] = np.asarray(baseline_noise)
except TypeError:
baselines_noises[baseline] = None
else:
if not split_scans:
for baseline in self.baselines:
# (#, #IF, #Stokes)
baseline_uvdata = self._choose_uvdata(baselines=[baseline])
if average_freq:
baseline_uvdata = np.mean(baseline_uvdata, axis=1)
# (#, #IF, #Stokes)
differences = (baseline_uvdata[:-1, ...] -
baseline_uvdata[1:, ...])
mask = np.isnan(differences)
# (#IF, #Stokes)
baselines_noises[baseline] = \
np.asarray([mad_std(np.ma.array(differences,
mask=mask).real[..., i], axis=0) for i
in range(self.nstokes)]).T
else:
# Use each scan
for baseline in self.baselines:
baseline_noise = list()
try:
for scan_bl_indxs in self.scans_bl[baseline]:
# (#obs in scan, #nif, #nstokes,)
scan_baseline_uvdata = self.uvdata[scan_bl_indxs]
if average_freq:
# shape = (#obs in scan, #nstokes,)
scan_baseline_uvdata = np.mean(scan_baseline_uvdata,
axis=1)
# (#obs in scan, #nif, #nstokes,)
differences = (scan_baseline_uvdata[:-1, ...] -
scan_baseline_uvdata[1:, ...])
mask = ~np.isnan(differences)
# (nif, nstokes,)
scan_noise = np.asarray([mad_std(np.ma.array(differences,
mask=np.invert(mask)).real[..., i],
axis=0) for i in
range(self.nstokes)]).T
baseline_noise.append(scan_noise)
baselines_noises[baseline] = np.asarray(baseline_noise)
except TypeError:
baselines_noises[baseline] = None
return baselines_noises
def noise_add(self, noise=None, df=None, split_scans=False):
"""
Add noise to visibilities. Here std - standard deviation of
real/imaginary component.
:param noise:
Mapping from baseline number to:
1) std of noise. Will use one value of std for all stokes and IFs.
2) iterable of stds. Will use different values of std for different
IFs.
:param df: (optional)
Number of d.o.f. for standard Student t-distribution used as noise
model. If set to ``None`` then use gaussian noise model. (default:
``None``)
:param split_scans: (optional)
Is parameter ``noise`` is mapping from baseline numbers to
iterables of std of noise for each scan on baseline? (default:
``False``)
"""
# TODO: if on df before generating noise values
for baseline, baseline_stds in noise.items():
# i - IF number, std (#IF, #Stokes)
for i, std in enumerate(baseline_stds):
# (#, 1, #stokes)
for stokes in self.stokes:
j = self.stokes_dict_inv[stokes]
baseline_uvdata =\
self._choose_uvdata(baselines=[baseline], bands=[i],
stokes=[stokes])
# (#, #IF, #CH, #stokes)
n = len(baseline_uvdata)
sl = self._get_uvdata_slice(baselines=[baseline], bands=[i],
stokes=[stokes])
noise_to_add | |
<filename>httpswhatcms.orgAPIEndpointDetectkey=/general.py
import os
import pandas as pd
from queue import Queue
# Each website is a separate project (folder)
def create_project_dir(project_name):
if not os.path.exists(project_name):
print('Creating directory ' + project_name)
os.makedirs(project_name)
# Create queue and crawled files (if not created)
def create_data_files(project_name, base_url):
queue = os.path.join(project_name , 'queue.txt')
crawled = os.path.join(project_name ,"crawled.txt")
ecommerce = os.path.join(project_name ,"ecommerce.csv")
if not os.path.isfile(queue):
write_file(queue, base_url)
if not os.path.isfile(crawled):
write_file(crawled, '')
if not os.path.isfile(ecommerce):
write_file(ecommerce, '')
# Create a new file
def write_file(path, data):
with open(path, 'w') as f:
f.write(data)
# Add data onto an existing file
def append_to_file(path, data):
with open(path, 'a') as file:
file.write(data + '\n')
# Add data onto an existing file
def append_to_csv_file(path, data):
with open(path, 'a') as file:
file.write(data)
def read_file(path_dir ):
with open(path_dir , 'r') as f:
ff = []
for line in f:
ff.append(line)
return(ff)
# Delete the contents of a file
def delete_file_contents(path):
open(path, 'w').close()
# Read a file and convert each line to set items
def file_to_set(file_name):
results = set()
with open(file_name, 'rt') as f:
for line in f:
results.add(line.replace('\n', ''))
return results
def set_to_file(links, file_name):
with open(file_name,"w") as f:
for l in sorted(links):
f.write(l+"\n")
def shops_name_in_there (category_name):
a = pd.read_csv('EXCEL\{}\shop.csv'.format(category_name))
return(a)
def how_many_shop_is_there (category_name):
a = pd.read_csv('EXCEL\{}\shop.csv'.format(category_name))
return(a.shape[0])
def convert_set_to_array (set_data):
f=[]
for i in set_data:
f.append(i)
return f
def read (ecommerce_file):
try:
return pd.read_csv(ecommerce_file)
except Exception:
f = []
for i in range(30):
f.append(i)
return pd.DataFrame(columns=f)
def get_first():
f = []
for i in range(30):
f.append(i)
return pd.DataFrame(columns=f)
def save_in_file (df , ecommerce_file):
df.to_csv(ecommerce_file)
def add_array_in_csv_file (path ,array , page_url ):
# url = page_url.split('builtwith.com/')[1]
url = page_url
try:
string = url.split('\n')[0] + ',' + ' | {}'.format(array) + '\n'
except Exception:
string = url + ',' + ' | {}'.format(array)+ '\n'
with open(path, 'a') as f:
f.write(string)
urls = set()
for i in read_file('category.txt'):
try:
i = i.split('\n')[0]
except Exception :
pass
for c in shops_name_in_there(i)['Shops']:
try:
c = c.split('\n')[0]
except Exception :
pass
if c.lower().find('www') >-1:
c = c[c.lower().find('www'):]
if c.lower().find('/')>-1:
c = c[c.lower().find('www'):c.lower().find('/')]
else:
c = c[c.lower().find('www'):]
elif c.lower().find('http://')>-1:
c = c[c.lower().find('http://')+7:]
if c.lower().find('/')>-1:
c = c[:c.lower().find('/')]
else:
c = c
elif c.lower().find('https://')>-1:
c = c[c.lower().find('https://')+8:]
if c.lower().find('/')>-1:
c = c[:c.lower().find('/')]
else:
c = c
urls.add(c)
write_file('urls.txt' , '')
set_to_file(urls ,'urls.txt' )
# import csv
# reader = csv.DictReader(open("ecommerce.csv"))
# for raw in reader:
# print(raw)
import csv
def read_all_urls():
with open('ecommerce.csv','rt')as f:
data = csv.reader(f)
urls = []
for row in data:
urls.append(row[0])
return urls
def read_all_urls_in_category(file):
try:
with open('{}\ecommerce.csv'.format(file),'rt')as f:
data = csv.reader(f)
urls = []
for row in data:
urls.append(row[0])
return urls
except Exception:
return False
def give_me_ecommerce_of_this_site(url):
with open('ecommerce.csv','rt')as f:
data = csv.reader(f)
urls = []
for row in data:
if url == row[0]:
return row[1][row[1].find("['")+2 : row[1].find("']")]
def give_me_ecommerce_of_each_categorys(stringg , category):
try:
with open('{}\ecommerce.csv'.format(category),'rt')as f:
data = csv.reader(f)
for row in data:
if row[0].find(stringg)>-1:
return row[1]
except Exception:
return False
# # # # # # # # # # # # # # # # all_urls = set()
# # # # # # # # # # # # # # # # all_urls = file_to_set('urls.txt')
# # # # # # # # # # # # # # # # all_urls_in_csv = read_all_urls()
# # # # # # # # # # # # # # # # bbbb = set()
# # # # # # # # # # # # # # # # bbbb = file_to_set('urls.txt')
# # # # # # # # # # # # # # # # for url in all_urls:
# # # # # # # # # # # # # # # # if url in all_urls_in_csv :
# # # # # # # # # # # # # # # # bbbb.remove(url)
# # # # # # # # # # # # # # # # # set_to_file(bbbb , 'urls.txt')
# # # # # # # # # # # # # # # # set_to_file(bbbb , 'remaind_urls.txt')
# set_to_file(bbbb , 'urls.txt')
# for file in read_file('category.txt'):
# try:
# file = file.split('\n')[0]
# except Exception :
# pass
# urls_in_each_categorys = read_all_urls_in_category(file)
# if urls_in_each_categorys != False :
# urls = read_all_urls()
# for url in urls_in_each_categorys :
# if not url in urls :
# ecommerce = give_me_ecommerce_of_each_categorys(url , file)
# if ecommerce != False :
# add_array_in_csv_file('ecommerce.csv' ,ecommerce , url )
# os.remove('{}\crawled.txt'.format(file))
# os.remove('{}\ecommerce.csv'.format(file))
# os.remove('{}\queue.txt'.format(file))
# os.rmdir(file)
# from bs4 import BeautifulSoup
# import requests
# page_url = 'https://builtwith.com/poshmark.com'
# print('page url = ' + page_url)
# page = requests.get(page_url)
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.find_all('a')
# for i in a_tags:
# if i.get('href').find('.com/shop/')>-1 :
# print( 'ecommerce = ' + i.text)
# page_url = 'https://builtwith.com/store.yahoo.com'
# print('page url = ' + page_url)
# page = requests.get(page_url)
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.find_all('a')
# for i in a_tags:
# if i.get('href').find('.com/shop/')>-1 :
# print( 'ecommerce = ' + i.text)
# page_url = 'https://builtwith.com/www.Adorama.com'
# print('page url = ' + page_url)
# page = requests.get(page_url )
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.find_all('a')
# for i in a_tags:
# if i.get('href').find('.com/shop/')>-1 :
# print( 'ecommerce = ' + i.text)
# page_url = 'https://builtwith.com/www.americanbookwarehouse.com'
# print('page url = ' + page_url)
# page = requests.get(page_url )
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.find_all('a')
# for i in a_tags:
# if i.get('href').find('.com/shop/')>-1 :
# print( 'ecommerce = ' + i.text)
# page_url = 'https://builtwith.com/www.basspro.com'
# print('page url = ' + page_url)
# page = requests.get(page_url )
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.find_all('a')
# for i in a_tags:
# if i.get('href').find('.com/shop/')>-1 :
# print( 'ecommerce = ' + i.text)
# import urllib
# import json
# print('page url = ' + page_url)
# page = requests.get(page_url )
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.find_all('a')
# for i in a_tags:
# if i.get('href').find('.com/shop/')>-1 :
# print( 'ecommerce = ' + i.text)
# from selenium import webdriver
# from bs4 import BeautifulSoup
# import requests
# driver = webdriver.Chrome()
# page_url = 'https://w3techs.com/sites/info/www.zazzle.com'
# contents = driver.page_source
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.findAll('a')
# buttons_tags = soup.findAll('input')
# for j in buttons_tags:
# if j.get('type').find('submit')>-1 :
# if j.get('wtx-context') :
# driver = webdriver.Chrome()
# driver.get(page_url)
# button = driver.find_element_by_name('add_site')
# button.click()
# contents = driver.page_source
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.findAll('a')
# for i in a_tags:
# print(i)
# if i.get('href').find('/details/cm-')>-1 :
# print(i.text)
# from urllib.request import urlopen
# from bs4 import BeautifulSoup
# import requests
# import time
# from selenium import webdriver
# page_url = 'https://w3techs.com/sites/info/brittandjules.com'
# page = requests.get(page_url)
# contents = page.content
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.findAll('a')
# buttons_tags = soup.findAll('input')
# for j in buttons_tags:
# print(j)
# if j.attrs['type'].find('submit')>-1 and j.attrs['value'].find('Crawl ')>-1 :
# driver = webdriver.Chrome()
# driver.get(page_url)
# button = driver.find_element_by_name('add_site')
# button.click()
# contents = driver.page_source
# soup = BeautifulSoup(contents , features="html.parser")
# a_tags = soup.findAll('a')
# driver.close()
# import urllib , json , request
# # for i in a_tags:
# # if i.attrs['href'].find('/details/cm-')>-1 :
# with urllib.request.urlopen("https://whatcms.org/APIEndpoint/Detect?key={}&url={}" .format('4274ab6009a7fffcfc759a2dcdaa9e623b510d975ff0a13e9da7e47e174994f897f2a8' , 'brittandjules.com')) as url:
# # print(url)
# # response = urllib.urlopen(url)
# data = json.loads(url.read().decode())
# print(data)
# if data["result"]["name"] != 'null':
# print (data["result"]["name"])
# elif data["result"]["msg"] == 'Too Many Requests':
# print ('give_me_another_apikey')
# elif data["result"]["msg"] == 'Failed: CMS or Host Not Found':
# print ('Host Not Found')
# # # # # # # # # # # # # # # # # # # # # from urllib.request import urlopen
# # # # # # # # # # # # # # # # # # # # # import urllib , json , request
# # # # # # # # # # # # # # # # # # # # # import requests
# # # # # # # # # # # # # # # # # # # # # import time
# # # # # # # # # # # # # # # # # # # # # import sys
# # # # # # # # # # # # # # # # # # # # # def get_CMS_of_remaind_urls(page_url):
# # # # # # # # # # # # # # # # # # # # # try:
# # # # # # # # # # # # # # # # | |
do so via calling thisObj._set_interface_ref() directly.
YANG Description: Reference to an interface or subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_interface_ref_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_interface_ref, is_container='container', yang_name="interface-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_ref must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_interface_ref_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_interface_ref, is_container='container', yang_name="interface-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__interface_ref = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_ref(self):
self.__interface_ref = YANGDynClass(base=yc_interface_ref_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_interface_ref, is_container='container', yang_name="interface-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_agent_information_option(self):
"""
Getter method for agent_information_option, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/agent_information_option (container)
YANG Description: Top-level container for relay agent information option
data
"""
return self.__agent_information_option
def _set_agent_information_option(self, v, load=False):
"""
Setter method for agent_information_option, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/agent_information_option (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_agent_information_option is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_agent_information_option() directly.
YANG Description: Top-level container for relay agent information option
data
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_agent_information_option_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_agent_information_option, is_container='container', yang_name="agent-information-option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """agent_information_option must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_agent_information_option_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_agent_information_option, is_container='container', yang_name="agent-information-option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__agent_information_option = t
if hasattr(self, '_set'):
self._set()
def _unset_agent_information_option(self):
self.__agent_information_option = YANGDynClass(base=yc_agent_information_option_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_agent_information_option, is_container='container', yang_name="agent-information-option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
id = __builtin__.property(_get_id, _set_id)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
interface_ref = __builtin__.property(_get_interface_ref, _set_interface_ref)
agent_information_option = __builtin__.property(_get_agent_information_option, _set_agent_information_option)
_pyangbind_elements = OrderedDict([('id', id), ('config', config), ('state', state), ('interface_ref', interface_ref), ('agent_information_option', agent_information_option), ])
class yc_interfaces_openconfig_relay_agent__relay_agent_dhcp_interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcp/interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for the list of interface references.
"""
__slots__ = ('_path_helper', '_extmethods', '__interface',)
_yang_name = 'interfaces'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcp', 'interfaces']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /relay_agent/dhcp/interfaces/interface (list)
YANG Description: List of interfaces on which the relay agent is
configured.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /relay_agent/dhcp/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of interfaces on which the relay agent is
configured.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = OrderedDict([('interface', interface), ])
class yc_dhcp_openconfig_relay_agent__relay_agent_dhcp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for global relay agent data
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state','__agent_information_option','__interfaces',)
_yang_name = 'dhcp'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__agent_information_option = YANGDynClass(base=yc_agent_information_option_openconfig_relay_agent__relay_agent_dhcp_agent_information_option, is_container='container', yang_name="agent-information-option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_relay_agent__relay_agent_dhcp_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcp']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /relay_agent/dhcp/config (container)
YANG Description: Configuration data for global DHCPv4
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /relay_agent/dhcp/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for global DHCPv4
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_relay_agent__relay_agent_dhcp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcp_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /relay_agent/dhcp/state (container)
YANG Description: Operational state data global DHCPv4
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /relay_agent/dhcp/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data global DHCPv4
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_relay_agent__relay_agent_dhcp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcp_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_agent_information_option(self):
"""
Getter method for agent_information_option, mapped from YANG variable /relay_agent/dhcp/agent_information_option (container)
YANG Description: Top-level container for relay agent information option
data
"""
return self.__agent_information_option
def _set_agent_information_option(self, v, load=False):
"""
Setter method for agent_information_option, mapped from YANG variable /relay_agent/dhcp/agent_information_option (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_agent_information_option is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_agent_information_option() directly.
YANG Description: Top-level container for relay agent information option
data
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_agent_information_option_openconfig_relay_agent__relay_agent_dhcp_agent_information_option, is_container='container', yang_name="agent-information-option", | |
#!/usr/bin/env python
# Copyright (c) 2021 FrostBit Software Lab
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import pygame
import math
import carla
import re
# Slider constants
SLIDER_RIGHT_OFFSET = 120
SLIDER_SIZE = 120
SLIDER_GAP = 90
SLIDER_Y = 15
# Color constants
BLACK = (0, 0, 0)
ORANGE = (255, 183, 0)
WHITE = (255, 255, 255)
GREY = (75, 75, 75)
BLUE = (0, 0, 255)
# ==============================================================================
# -- Global functions ----------------------------------------------------------
# ==============================================================================
# RH = Relative Humidity
# T = Temperature
# TD = Dew point
# https://stackoverflow.com/questions/27288021/formula-to-calculate-dew-point-from-temperature-and-humidity
def get_approx_dew_point(T, RH):
td = (T-(14.55 + 0.114 * T)*(1-(0.01*RH))-pow(((2.5+0.007*T)*(1-(0.01*RH))),3)-(15.9+0.117*T)*pow((1-(0.01*RH)), 14))
return td
# https://earthscience.stackexchange.com/questions/20577/relative-humidity-approximation-from-dew-point-and-temperature
def get_approx_relative_humidity(T, TD):
rh = int(100*(math.exp((17.625*TD)/(243.04+TD))/math.exp((17.625*T)/(243.04+T))))
return rh
# https://bmcnoldy.rsmas.miami.edu/Humidity.html
def get_approx_temp(TD, RH):
t = 243.04*(((17.625*TD)/(243.04+TD))-math.log(RH/100))/(17.625+math.log(RH/100)-((17.625*TD)/(243.04+TD)))
return t
def get_slider_offset(offset=40):
'''Return offset between each slider'''
global SLIDER_GAP
SLIDER_GAP += offset
return SLIDER_GAP
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
# ==============================================================================
# -- Global functions ----------------------------------------------------------
# ==============================================================================
# RH = Relative Humidity
# T = Temperature
# TD = Dew point
# https://stackoverflow.com/questions/27288021/formula-to-calculate-dew-point-from-temperature-and-humidity
def get_approx_dew_point(T, RH):
td = (T-(14.55 + 0.114 * T)*(1-(0.01*RH))-pow(((2.5+0.007*T)*(1-(0.01*RH))),3)-(15.9+0.117*T)*pow((1-(0.01*RH)), 14))
return td
# https://earthscience.stackexchange.com/questions/20577/relative-humidity-approximation-from-dew-point-and-temperature
def get_approx_relative_humidity(T, TD):
rh = int(100*(math.exp((17.625*TD)/(243.04+TD))/math.exp((17.625*T)/(243.04+T))))
return rh
# https://bmcnoldy.rsmas.miami.edu/Humidity.html
def get_approx_temp(TD, RH):
t = 243.04*(((17.625*TD)/(243.04+TD))-math.log(RH/100))/(17.625+math.log(RH/100)-((17.625*TD)/(243.04+TD)))
return t
# ==============================================================================
# -- INFO_HUD -------------------------------------------------------------
# ==============================================================================
class InfoHud(object):
def __init__(self, width, height, display):
self.dim = (width, height)
self.screen = display
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self.preset_slider = Slider
self.temp_slider = Slider
self.dewpoint_slider = Slider
self.humidity = 0
self.snow_amount_slider = Slider
self.ice_slider = Slider
self.precipitation_slider = Slider
self.fog_slider = Slider
self.fog_falloff = Slider
self.wind_slider = Slider
self.wind_dir_slider = Slider
self.particle_slider = Slider
self.time_slider = Slider
self.month_slider = Slider
self.sliders = []
self._font_mono = pygame.font.Font(mono, 18 if os.name == 'nt' else 18)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.logo = pygame.image.load('images/WinterSim_White_Color.png')
self.logo = pygame.transform.scale(self.logo, (262,61))
self.logo_rect = self.logo.get_rect()
self._info_text = []
self._weather_presets = []
self.preset_names = []
self.muonio = False
self._weather_presets_all = find_weather_presets()
for preset in self._weather_presets_all:
if preset[0].temperature <= 0: # only get winter presets
self._weather_presets.append(preset)
self.preset_names.append(str(preset[1]))
self.preset_names.append("Custom") # add 'Custom' prest for the last list index,
# this is shown if sliders are changed manually
self.preset_count = len(self._weather_presets)
self.months = [
'January','February','March','April','May','June',
'July','August','September','October','November','December']
self.muonio_sun_positions = [
[12.5, 1.36, -43.6], [12.5, 9.25, -35.11],
[12.5, 20.13, -24.24],[12.5, 31.99, -12.37],
[12.5, 41.03, -2.74], [12.5, 45.39, 1.60],
[12.5, 43.51, 0.05], [12.5, 35.97, -8.07],
[12.5, 24.94, -19.04],[12.5, 13.44, -30.56],
[12.5, 3.66, -40.75], [12.5, -0.56, -45.32]]
self.rovaniemi_sun_positions = [
[12.5, 2.37, -44.6], [12.5, 9.38, -37.29],
[12.5, 19.60, -27.48], [12.5, 33.05, -14.16],
[12.5, 41.31, -3.84], [12.5, 46.84, 1.46],
[12.5, 45.01, -1.06], [12.5, 36.14, -9.35],
[12.5, 26.32, -19.63], [12.5, 15.62, -30.60],
[12.5, 4.56, -42.72], [12.5, 0.65, -46.77]]
# create checkboxe(s)
self.boxes = []
self.button = Checkbox(self.screen, 20, 650, 0, caption='Static Tiretracks (F5)')
self.boxes.append(self.button)
self.make_sliders()
def setup(self, preset, map_name):
self.update_sliders(preset)
self.filtered_map_name = map_name
self.muonio = self.filtered_map_name == "Muonio"
def make_sliders(self):
'''Make sliders and add them in to list'''
self.preset_slider = Slider(self, "Preset", 0, self.preset_count, 0, SLIDER_GAP)
self.temp_slider = Slider(self, "Temperature", 0, 40, -40, get_slider_offset())
self.dewpoint_slider = Slider(self, "Dewpoint", 0, 40, -40, get_slider_offset())
self.ice_slider = Slider(self, "Friction", 0, 4, 0, get_slider_offset())
self.precipitation_slider = Slider(self, "Precipitation", 0, 100, 0, get_slider_offset())
self.snow_amount_slider = Slider(self, "Snow amount", 0, 100, 0, get_slider_offset())
self.particle_slider = Slider(self, "Snow p. size", 0.5, 7, 0.5, get_slider_offset())
self.fog_slider = Slider(self, "Fog", 0, 100, 0, get_slider_offset())
self.fog_falloff = Slider(self, "Fog falloff", 0.0, 2.0, 0.0, get_slider_offset())
self.wind_slider = Slider(self, "Wind intensity", 0, 70, 0, get_slider_offset())
self.wind_dir_slider = Slider(self, "Wind direction", 0, 179, -179, get_slider_offset())
self.time_slider = Slider(self, "Time", 10, 24, 0, get_slider_offset())
self.month_slider = Slider(self, "Month", 0, 11, 0, get_slider_offset())
def update_sliders(self, preset, month=None, clock=None):
'''Update slider positions if weather is changed without moving sliders
wrapped in try-expect block just in-case preset doesn't have certain weather parameter'''
try:
self.snow_amount_slider.val = preset.snow_amount
self.ice_slider.val = preset.ice_amount
self.temp_slider.val = preset.temperature
self.precipitation_slider.val = preset.precipitation
self.fog_slider.val = preset.fog_density
self.fog_falloff.val = preset.fog_falloff
self.wind_slider.val = preset.wind_intensity * 100.0
self.particle_slider.val = preset.particle_size
self.humidity = preset.relative_humidity
self.dewpoint_slider.val = preset.dewpoint
self.wind_dir_slider.val = preset.wind_direction
except AttributeError as e:
print(e, "not implemented")
if month and clock:
self.month_slider.val = month
self.time_slider.val = clock
def get_month(self, val):
if self.muonio:
return self.months[val], self.muonio_sun_positions[val]
else:
return self.months[val], self.rovaniemi_sun_positions[val]
# Update hud text values
def tick(self, world, clock, hud):
self._notifications.tick(world, clock)
month, sundata = self.get_month(int(hud.month_slider.val))
preset = hud.preset_names[int(hud.preset_slider.val)]
self._info_text = [
' Weather Control',
'----------------------------',
'',
'Preset: {}'.format(preset),
'',
'Temperature: {}°C'.format(round(hud.temp_slider.val,1)),
'',
'Humidity: {}%'.format(round((hud.humidity), 1)),
'',
'Dewpoint: {}°'.format(round((hud.dewpoint_slider.val), 1)),
'',
'Friction level: {}'.format(int(hud.ice_slider.val)),
'',
'Precipitation: {}%'.format(round((hud.precipitation_slider.val), 1)),
'',
'Amount of Snow: {} cm'.format(round(hud.snow_amount_slider.val)),
'Snow particle size: {} mm'.format(round((hud.particle_slider.val), 1)),
'',
'Fog: {}%'.format(int(hud.fog_slider.val)),
'Fog Falloff: {}'.format(round((hud.fog_falloff.val), 1)),
'',
'Wind Intensity: {} m/s'.format(round((hud.wind_slider.val/10), 1)),
'Wind Direction: {}°'.format(round((hud.wind_dir_slider.val), 1)),
'',
'Time: {}:00'.format(int(hud.time_slider.val)),
'Month: {}'.format(month),
'',
'----------------------------',
'',
'Press C to change',
'weather preset',
'',
'Press B to get real time',
'weather']
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def render(self, world, display, weather):
"""Render hud texts into pygame window"""
display_rect = display.get_rect()
self.logo_rect.topright = tuple(map(lambda i, j: i - j, display_rect.topright, (5,-2)))
display.blit(self.logo, self.logo_rect)
info_surface = pygame.Surface((345, self.dim[1]))
info_surface.set_alpha(100)
info_surface.fill(GREY)
display.blit(info_surface, (0, 0))
v_offset = 4
for item in self._info_text:
surface = self._font_mono.render(item, True, WHITE)
display.blit(surface, (18, v_offset + 10))
v_offset += 18
self._notifications.render(display)
# render checkboxes to pygame window
for box in self.boxes:
box.render_checkbox()
# render sliders to pygame window
for slider in self.sliders:
if slider.hit:
slider.move()
weather.tick(self, world, world._weather_presets[0], slider)
world.world.set_weather(weather.weather)
slider.render(display, slider)
# ==============================================================================
# -- Checkbox ----------------------------------------------------------------
# ==============================================================================
class Checkbox:
def __init__(self, surface, x, y, idnum, color=(230, 230, 230),
caption="", outline_color=(255, 255, 255), check_color=(0, 0, 0),
font_size=16, font_color=(255, 255, 255), text_offset=(20, 1), checkbox_size=12):
self.surface = surface
self.x = x
self.y = y
self.color = color
self.caption = caption
self.oc = outline_color
self.cc = check_color
self.fs = font_size
self.fc = font_color
self.to = text_offset
default_font = 'courier'
self.ft = default_font
self.checkbox_size = checkbox_size
self.idnum = idnum
self.checkbox_obj = pygame.Rect(self.x, self.y, checkbox_size, checkbox_size)
self.checkbox_outline = self.checkbox_obj.copy()
self.checked = True
def _draw_button_text(self):
self.font = pygame.font.SysFont(self.ft, self.fs)
self.font_surf = self.font.render(self.caption, True, self.fc)
w, h = self.font.size(self.caption)
self.font_pos = (self.x + self.to[0], self.y + 12 / 2 - h / 2 + self.to[1])
self.surface.blit(self.font_surf, self.font_pos)
def render_checkbox(self):
if self.checked:
pygame.draw.rect(self.surface, self.color, self.checkbox_obj)
pygame.draw.rect(self.surface, self.oc, self.checkbox_outline, 1)
pygame.draw.circle(self.surface, self.cc, (self.x + 6, self.y + 6), 4)
else:
pygame.draw.rect(self.surface, self.color, self.checkbox_obj)
pygame.draw.rect(self.surface, self.oc, self.checkbox_outline, 1)
self._draw_button_text()
def update_checkbox(self, pos):
x, y = pos
px, py, w, h = self.checkbox_obj
if px < x < px + w and py < y < py + w:
self.checked ^= True
return True
else:
return False
# ==============================================================================
# -- SliderObject -------------------------------------------------------------
# ==============================================================================
class Slider():
def __init__(self, InfoHud, name, val, maxi, mini, pos):
self.hud = InfoHud
self.font = pygame.font.SysFont("ubuntumono", 20)
self.name = name
self.val = val # start value
self.maxi = maxi # maximum at slider position right
self.mini = mini # minimum at slider position left
self.xpos = 358 # x-location on screen
self.ypos = pos
self.surf = pygame.surface.Surface((250, 100))
# The hit attribute indicates slider movement due to mouse interaction.
self.hit = False
self.txt_surf = self.font.render(name, 1, WHITE)
self.txt_rect = self.txt_surf.get_rect()
self.txt_rect.left = 6
self.txt_rect.top = 8
# Static graphics - slider background #
pygame.draw.rect(self.surf, ORANGE, [SLIDER_RIGHT_OFFSET, SLIDER_Y, SLIDER_SIZE, 1], 0)
#borders
line_width = 1
width = 250
height = 27
# top line #first = starting point on width, second = starting point on height,
# third = width, fourth = height
pygame.draw.rect(self.surf, WHITE, [0,0, width,line_width]) # top line
pygame.draw.rect(self.surf, WHITE, [0, height-line_width,width,line_width]) # bottom line
pygame.draw.rect(self.surf, WHITE, [0,0, line_width, height]) # left line
pygame.draw.rect(self.surf, WHITE, [width-line_width,0,line_width, height+line_width]) # right line
# this surface never changes
self.surf.blit(self.txt_surf, self.txt_rect)
# dynamic graphics | |
{2}
RETURN NEW;
END;
$$ LANGUAGE plpgsql;""" # 0 - имя функции, 1 - блок DECLARE с внутренними переменными, 2 - текст функции
sql_template = """DROP TRIGGER IF EXISTS {0} ON {1};
CREATE TRIGGER {0}
{2} ON {1} FOR EACH ROW EXECUTE PROCEDURE {3}();""" # 0 - имя триггера, 1 - имя таблицы, 2 - тип действия, например (BEFORE INSERT), 3 - имя функции
for trig in trig_opt:
trig_name = trig.get('name')
func_name = trig_name + '_FUNC'
table_name = trig.get('t_name')
trig_action = trig.get('action')
trig_text = trig.get('text')
trig_text.replace('\r', '')
trig_text_list = trig_text.split('\n')
new_trig_text_list = []
declare = []
into_vars = []
for row in trig_text_list:
row = row.strip()
row = row.upper()
if row.strip().startswith('--') or not row:
continue
if 'AS' in row and len(row)==2:
continue
if 'BEGIN' in row and len(row)==5:
continue
if 'END' in row and len(row)==3:
continue
if 'DECLARE' in row:
#есть объявления переменных
#убираем из text объявления
#сделаем добавление в секцию DECLARE
new_row = row.replace('DECLARE', '')
new_row = new_row.replace('VARIABLE', '')
declare.append(new_row)
continue
if 'INTO' in row and 'INSERT' not in row:
_, i_v = row.strip().split(':')
into_vars.append(i_v.replace(';', ''))
row = row.replace('INTO:', '').replace('INTO', '').replace(':%s'%i_v, ';').replace(i_v, ';')
if 'EXCEPTION' in row:
row = "RAISE EXCEPTION USING MESSAGE = 'insert or update Error';"
row = row.replace('CURRENT_TIMESTAMP', 'now()')
new_trig_text_list.append(row)
trig_text = '\n'.join(new_trig_text_list)
if len(into_vars) > 0:
new_stri = []
trig_text = trig_text.split('SELECT')
for i, a in enumerate(trig_text):
if len(declare) > 0:
a = a.replace(':', '')
if i < len(into_vars):
new_stri.append(a + into_vars[i] + ':=')
else:
new_stri.append(a)
trig_text = 'SELECT'.join(new_stri)
if len(declare) > 0:
declare = 'DECLARE ' + '\n'.join(declare)
else:
declare = ''
if_count = trig_text.count('IF')
if if_count > 0:
ppos = 0
for q in range(if_count):
if_pos = trig_text.find('IF', ppos)
then_pos = trig_text.find('THEN', if_pos)
if 'ELSE' not in trig_text:
semi_pos = trig_text.find(';', then_pos)
else:
else_pos = trig_text.find('ELSE', then_pos)
semi_pos = trig_text.find(';', else_pos)
trig_text = trig_text[:semi_pos+1] + '\nEND IF;\n' + trig_text[semi_pos+1:]
ppos = semi_pos
ddot_inx = trig_text.count(':=')
ddot = 0
for i in range(ddot_inx):
ddot = trig_text.find(':=', ddot)
semi_pos = trig_text.find(';', ddot)
trig_text = trig_text[:ddot+2] + '(' + trig_text[ddot+2:semi_pos] + ')' + trig_text[semi_pos:]
ddot = semi_pos
sql_func = sql_template_func.format(func_name, declare, trig_text)
if 'BEFORE DELETE' in trig_action:
sql_func = sql_func.replace('RETURN NEW', 'RETURN OLD')
sql_tri = sql_template.format(trig_name, table_name, trig_action, func_name)
sql = '\n'.join([sql_func, sql_tri])
print('creating trigger %s' % trig_name)
yield sql
def set_exec(ini, sql, debug=False, fetch=False):
result = None
with PGConn(ini, debug=debug) as db:
db.execute(sql)
if fetch:
result = db.fetch()
if result:
return result
else:
return
def get_pg_tables(ini, debug=False):
res = None
name = None
fields = None
old_name = None
sql = """select table_name, column_name, ordinal_position from information_schema.columns where table_schema='public' order by table_name, ordinal_position;"""
res = set_exec(ini, sql, debug=debug, fetch=1)
if res:
for row in res:
name = row[0]
if name != name.lower():
name = "\"%s\"" % name
else:
name = name.lower()
if old_name == None:
#первая строка
fields = []
fields.append(row[1])
old_name = name
continue
if name == old_name:
fields.append(row[1])
else:
yield old_name, fields
fields = []
fields.append(row[1])
old_name = name
yield old_name, fields
def _gen_get_data(kwargs):
ini = kwargs.get('ini')
debug = kwargs.get('debug')
fields = kwargs.get('fields')
name = kwargs.get('name')
c1 = kwargs.get('c1')
c2 = kwargs.get('c2')
sql_fields = kwargs.get('sql_fields')
cpu = kwargs.get('cpu')
pk = kwargs.get('pk')
print('start pump: thread-> %s ; from position-> %s' %(cpu, c1), flush=True)
sql_template = """select {0} from {1} r order by r.{2} asc rows {3} to {4}"""
sql_t = """insert into {0} ({1}) values ({2}) ON CONFLICT DO NOTHING;"""
sql = sql_template.format(sql_fields, name, pk, c1, c2)
#print(sql)
params = []
rows = None
ccc = 0
#print("sql\t", sql)
res = get_fdb(ini, sql, debug=debug)
if res:
for row in res:
row_ins = []
for col in row:
row_ins.append(col)
params.append(row_ins)
ccc += 1
#проверяем, может какое поле bytea
byteas = []
sql_check = """select table_name, column_name, data_type
from information_schema.columns
where table_name = '%s' and data_type = 'bytea';""" % name
res = None
#print(sql_check)
res = set_exec(ini, sql_check, debug=debug, fetch=True)
#print("_"*20)
#print(res)
if res:
for row in res:
byteas.append(row[1].lower())
qqs = []
iis = []
for i, f in enumerate(fields):
if f in byteas:
iis.append(i)
q = '%s'
qqs.append(q)
values = ','.join(qqs)
for i in iis:
for para in params:
if para[i]:
# print(para[i])
try:
data = para[i].encode()
except:
# print('data:', para[i], sep="\t")
data = para[i]
para[i] = psycopg2.Binary(data)
#print(para[i])
sql_fields = ','.join([_check_names(ini, fi) for fi in fields])
with PGConn(ini, debug=debug) as db:
sql = sql_t.format(name, sql_fields, values)
#print(sql)
#print(params)
db.executemany(sql, params)
#print('ccc->', ccc)
return ccc
def _check_names(ini, in_field):
if in_field in ini.params.excludes:
in_field = '"%s"' % in_field
return in_field
def get_fb_data(ini, name, fields, only=None, exclude=None, debug=False):
print('pumping %s' % name, flush=True)
#print(only)
if only and name.lower() != only.lower():
print('skipping', flush=True)
return
if exclude and name.lower() in exclude:
print('skipping', flush=True)
return
res = None
pg_name = name
#print("*"*20)
#print(name)
#print(name.upper())
if name != name.lower():
fb_name = "\"%s\"" % name
else:
fb_name = name.upper()
sql = """select count(*) from %s""" % fb_name
#print(sql)
res = get_fdb(ini, sql, debug=debug)
if res:
total_count = int(res[0][0])
else:
return
if total_count == 0:
return
#нужно отключить триггеры, если они есть
sql_trig = f"""select c.relname, t.tgrelid, t.tgname, t.tgfoid
from pg_trigger t
join pg_class c on c.oid = t.tgrelid
where c.relname = '{name}';"""
triggers = set_exec(ini, sql_trig, debug=debug, fetch=1)
tt = []
if triggers:
for row in triggers:
tt.append(row[2])
triggers = tt
triggers = ','.join(triggers)
c1 = 1
cnt = ini.params.potion
c2 = c1 + cnt - 1
sql = """select R.RDB$RELATION_NAME, R.RDB$FIELD_NAME, tt PK
from RDB$FIELDS F, RDB$RELATION_FIELDS R
left JOIN (
SELECT c.RDB$RELATION_NAME rn, seg.RDB$FIELD_NAME jn, c.RDB$CONSTRAINT_TYPE tt
FROM RDB$RELATION_CONSTRAINTS c
JOIN RDB$INDEX_SEGMENTS seg on seg.RDB$INDEX_NAME = c.RDB$INDEX_NAME
WHERE c.RDB$CONSTRAINT_TYPE = 'PRIMARY KEY' AND c.RDB$RELATION_NAME NOT CONTAINING '$'
) on r.RDB$FIELD_NAME = jn AND r.RDB$RELATION_NAME = rn
JOIN RDB$RELATIONS rel on rel.RDB$RELATION_NAME = r.RDB$RELATION_NAME
where F.RDB$FIELD_NAME = R.RDB$FIELD_SOURCE and R.RDB$SYSTEM_FLAG = 0 and R.RDB$RELATION_NAME NOT CONTAINING '$' and rel.RDB$VIEW_SOURCE IS NULL
AND r.RDB$RELATION_NAME = '%s'
AND tt is NOT NULL
order by R.RDB$RELATION_NAME, R.RDB$FIELD_POSITION""" % fb_name
sql_ind = """SELECT ic.oid,pg_get_indexdef(ic.oid),ic.relname AS name, am.amname, i.indisprimary AS pri,
i.indisunique AS uni, i.indkey AS fields, i.indclass AS fopclass,
i.indisclustered, ic.oid AS indid, c.oid AS relid, ds.description,
u.usename, pg_get_expr(i.indexprs, i.indrelid) AS expr,
ts.spcname, pg_get_expr(i.indpred, i.indrelid) AS wh,
cn.oid IS NOT NULL AS iscn, cn.oid as constroid
FROM pg_index i INNER JOIN pg_class c ON i.indrelid = c.oid
INNER JOIN pg_class ic ON i.indexrelid = ic.oid
INNER JOIN pg_am am ON ic.relam = am.oid
LEFT OUTER JOIN pg_description ds ON ds.objoid = ic.oid
LEFT OUTER JOIN pg_user u ON u.usesysid = ic.relowner
LEFT OUTER JOIN pg_constraint cn ON i.indrelid = cn.conrelid AND ic.relname = cn.conname
LEFT OUTER JOIN pg_tablespace ts ON ts.oid = ic.reltablespace
WHERE
c.oid = ('%s')::regclass::oid and not i.indisprimary
ORDER BY ic.relname;"""% fb_name
res = None
res = get_fdb(ini, sql, debug=debug)
print(res)
if res:
pk = res[0][1].strip()
else:
pk = 'RDB$DB_KEY'
f_ins = []
for field in fields:
field = _check_names(ini, field)
f_ins.append('r.' + field)
sql_fields = ','.join(f_ins)
cc = 0
indices_drop = None
indeces_sqls = None
#отключаем индексы (удаляем)
res = set_exec(ini, sql_ind, debug=debug, fetch=1)
if res:
indices_drop, indeces_sqls = _get_pg_indeces(ini, res)
if indices_drop:
set_exec(ini, indices_drop, debug=debug)
#db.execute(indices_drop)
cpu_number = ini.params.cpu
sql_triggers = f"""alter table {name} %s trigger %s;"""
#отключаем триггеры
if triggers:
set_exec(ini, sql_triggers % ('disable', triggers), debug=debug)
while total_count > 0:
#создаем пулл в зависимости от количества ядер
pool = ThreadPool(cpu_number)
p_list = []
for cpu in range(cpu_number):
if total_count < 0:
pump_params = {'ini': ini, 'sql_fields': sql_fields, 'name': name, 'c1': 1, 'c2': 0, 'pk': pk, 'fields': fields, 'debug':debug, 'cpu': cpu}
else:
pump_params = {'ini': ini, 'sql_fields': sql_fields, 'name': name, 'c1': c1, 'c2': c2, 'pk': pk, 'fields': fields, 'debug':debug, 'cpu': cpu}
p_list.append(pump_params)
c2 = c2 + cnt
c1 = c1 + cnt
total_count -= cnt
results = pool.map(_gen_get_data, p_list)
pool.close()
pool.join()
q = 0
for r in results:
q += int(r)
cc += q
#включаем триггеры
if triggers:
set_exec(ini, sql_triggers % ('enable', triggers), debug=debug)
#включаем индексы (создаем)
if indeces_sqls:
set_exec(ini, indeces_sqls, debug=debug)
print('total->', cc, flush=True)
def _get_pg_indeces(ini, res):
indices = []
drops = []
sql_drop = "DROP INDEX IF EXISTS %s ;"
for row in res:
| |
"
"to report this bug.")
def _apply(self, fn):
for module in self.children():
module._apply(fn)
def compute_should_use_set_data(tensor, tensor_applied):
if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
# If the new tensor has compatible tensor type as the existing tensor,
# the current behavior is to change the tensor in-place using `.data =`,
# and the future behavior is to overwrite the existing tensor. However,
# changing the current behavior is a BC-breaking change, and we want it
# to happen in future releases. So for now we introduce the
# `torch.__future__.get_overwrite_module_params_on_conversion()`
# global flag to let the user control whether they want the future
# behavior of overwriting the existing tensor or not.
return not torch.__future__.get_overwrite_module_params_on_conversion()
else:
return False
for key, param in self._parameters.items():
if param is None:
continue
# Tensors stored in modules are graph leaves, and we don't want to
# track autograd history of `param_applied`, so we have to use
# `with torch.no_grad():`
with torch.no_grad():
param_applied = fn(param)
should_use_set_data = compute_should_use_set_data(param, param_applied)
if should_use_set_data:
param.data = param_applied
out_param = param
else:
assert isinstance(param, Parameter)
assert param.is_leaf
out_param = Parameter(param_applied, param.requires_grad)
self._parameters[key] = out_param
if param.grad is not None:
with torch.no_grad():
grad_applied = fn(param.grad)
should_use_set_data = compute_should_use_set_data(param.grad, grad_applied)
if should_use_set_data:
out_param.grad.data = grad_applied
else:
assert param.grad.is_leaf
out_param.grad = grad_applied.requires_grad_(param.grad.requires_grad)
for key, buf in self._buffers.items():
if buf is not None:
self._buffers[key] = fn(buf)
return self
def apply(self: T, fn: Callable[['Module'], None]) -> T:
r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
as well as self. Typical use includes initializing the parameters of a model
(see also :ref:`nn-init-doc`).
Args:
fn (:class:`Module` -> None): function to be applied to each submodule
Returns:
Module: self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>> print(m)
>>> if type(m) == nn.Linear:
>>> m.weight.fill_(1.0)
>>> print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1., 1.],
[ 1., 1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1., 1.],
[ 1., 1.]])
Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
"""
for module in self.children():
module.apply(fn)
fn(self)
return self
def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:
r"""Moves all model parameters and buffers to the GPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on GPU while being optimized.
.. note::
This method modifies the module in-place.
Args:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.cuda(device))
def xpu(self: T, device: Optional[Union[int, device]] = None) -> T:
r"""Moves all model parameters and buffers to the XPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on XPU while being optimized.
.. note::
This method modifies the module in-place.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.xpu(device))
def cpu(self: T) -> T:
r"""Moves all model parameters and buffers to the CPU.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.cpu())
def type(self: T, dst_type: Union[dtype, str]) -> T:
r"""Casts all parameters and buffers to :attr:`dst_type`.
.. note::
This method modifies the module in-place.
Args:
dst_type (type or string): the desired type
Returns:
Module: self
"""
return self._apply(lambda t: t.type(dst_type))
def float(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``float`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.float() if t.is_floating_point() else t)
def double(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``double`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.double() if t.is_floating_point() else t)
def half(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``half`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.half() if t.is_floating_point() else t)
def bfloat16(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)
def to_empty(self: T, *, device: Union[str, device]) -> T:
r"""Moves the parameters and buffers to the specified device without copying storage.
Args:
device (:class:`torch.device`): The desired device of the parameters
and buffers in this module.
Returns:
Module: self
"""
return self._apply(lambda t: torch.empty_like(t, device=device))
@overload
def to(self: T, device: Optional[Union[int, device]] = ..., dtype: Optional[Union[dtype, str]] = ...,
non_blocking: bool = ...) -> T:
...
@overload
def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
...
@overload
def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
...
def to(self, *args, **kwargs):
r"""Moves and/or casts the parameters and buffers.
This can be called as
.. function:: to(device=None, dtype=None, non_blocking=False)
:noindex:
.. function:: to(dtype, non_blocking=False)
:noindex:
.. function:: to(tensor, non_blocking=False)
:noindex:
.. function:: to(memory_format=torch.channels_last)
:noindex:
Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
floating point or complex :attr:`dtype`\ s. In addition, this method will
only cast the floating point or complex parameters and buffers to :attr:`dtype`
(if given). The integral parameters and buffers will be moved
:attr:`device`, if that is given, but with dtypes unchanged. When
:attr:`non_blocking` is set, it tries to convert/move asynchronously
with respect to the host if possible, e.g., moving CPU Tensors with
pinned memory to CUDA devices.
See below for examples.
.. note::
This method modifies the module in-place.
Args:
device (:class:`torch.device`): the desired device of the parameters
and buffers in this module
dtype (:class:`torch.dtype`): the desired floating point or complex dtype of
the parameters and buffers in this module
tensor (torch.Tensor): Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module
memory_format (:class:`torch.memory_format`): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument)
Returns:
Module: self
Examples::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
[-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
[-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
[-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
[-0.5112, -0.2324]], dtype=torch.float16)
>>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
>>> linear.weight
Parameter containing:
tensor([[ 0.3741+0.j, 0.2382+0.j],
[ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
>>> linear(torch.ones(3, 2, dtype=torch.cdouble))
tensor([[0.6122+0.j, 0.1150+0.j],
[0.6122+0.j, 0.1150+0.j],
[0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
"""
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
if dtype is not None:
if not (dtype.is_floating_point or dtype.is_complex):
raise TypeError('nn.Module.to only accepts floating point or complex '
'dtypes, but got desired dtype={}'.format(dtype))
if dtype.is_complex:
warnings.warn(
"Complex modules are a new feature under active development whose design may change, "
"and some modules might not work as expected when using complex tensors as parameters or buffers. "
"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.md "
"if a complex module does not work as expected.")
def convert(t):
if convert_to_format is not None and t.dim() in (4, 5):
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
non_blocking, memory_format=convert_to_format)
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
return self._apply(convert)
def register_backward_hook(
self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
r"""Registers a backward hook on the module.
This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and
the behavior of this function will change in future versions.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
if self._is_full_backward_hook is True:
raise RuntimeError("Cannot use both regular backward hooks and full backward | |
not None:
rule['vertexColorStreamName'] = vertex_color_stream
mesh_group['rules']['rules'].append(rule)
def mesh_group_add_skin_rule(self, mesh_group: dict, max_weights_per_vertex: int = 4, weight_threshold: float = 0.001) -> None:
"""Adds a Skin rule.
Parameters
----------
mesh_group :
Mesh Group to add the rule to.
max_weights_per_vertex :
Max number of joints that can influence a vertex.
weight_threshold :
Weight values below this value will be treated as 0.
"""
rule = {
'$type': 'SkinRule',
'maxWeightsPerVertex': max_weights_per_vertex,
'weightThreshold': weight_threshold
}
mesh_group['rules']['rules'].append(rule)
def mesh_group_add_tangent_rule(self, mesh_group: dict,
tangent_space: TangentSpaceSource = TangentSpaceSource.SCENE,
tspace_method: TangentSpaceMethod = TangentSpaceMethod.TSPACE) -> None:
"""Adds a Tangent rule to control tangent space generation.
Parameters
----------
mesh_group :
Mesh Group to add the rule to.
tangent_space :
Tangent space source. 0 = Scene, 1 = MikkT Tangent Generation.
tspace_method :
MikkT Generation method. 0 = TSpace, 1 = TSpaceBasic.
"""
rule = {
'$type': 'TangentsRule',
'tangentSpace': int(tangent_space),
'tSpaceMethod': int(tspace_method)
}
mesh_group['rules']['rules'].append(rule)
def __add_physx_base_mesh_group(self, name: str, physics_material: typing.Optional[str] = None) -> dict:
import azlmbr.math
group = {
'$type': '{5B03C8E6-8CEE-4DA0-A7FA-CD88689DD45B} MeshGroup',
'id': azlmbr.math.Uuid_CreateRandom().ToString(),
'name': name,
'NodeSelectionList': {
'selectedNodes': [],
'unselectedNodes': []
},
"MaterialSlots": [
"Material"
],
"PhysicsMaterials": [
self.__default_or_value(physics_material, "<Default Physics Material>")
],
"rules": {
"rules": []
}
}
self.manifest['values'].append(group)
return group
def add_physx_triangle_mesh_group(self, name: str,
merge_meshes: bool = True,
weld_vertices: bool = False,
disable_clean_mesh: bool = False,
force_32bit_indices: bool = False,
suppress_triangle_mesh_remap_table: bool = False,
build_triangle_adjacencies: bool = False,
mesh_weld_tolerance: float = 0.0,
num_tris_per_leaf: int = 4,
physics_material: typing.Optional[str] = None) -> dict:
"""Adds a Triangle type PhysX Mesh Group to the scene.
Parameters
----------
name :
Name of the mesh group.
merge_meshes :
When true, all selected nodes will be merged into a single collision mesh.
weld_vertices :
When true, mesh welding is performed. Clean mesh must be enabled.
disable_clean_mesh :
When true, mesh cleaning is disabled. This makes cooking faster.
force_32bit_indices :
When true, 32-bit indices will always be created regardless of triangle count.
suppress_triangle_mesh_remap_table :
When true, the face remap table is not created.
This saves a significant amount of memory, but the SDK will not be able to provide the remap
information for internal mesh triangles returned by collisions, sweeps or raycasts hits.
build_triangle_adjacencies :
When true, the triangle adjacency information is created.
mesh_weld_tolerance :
If mesh welding is enabled, this controls the distance at
which vertices are welded. If mesh welding is not enabled, this value defines the
acceptance distance for mesh validation. Provided no two vertices are within this
distance, the mesh is considered to be clean. If not, a warning will be emitted.
num_tris_per_leaf :
Mesh cooking hint for max triangles per leaf limit. Fewer triangles per leaf
produces larger meshes with better runtime performance and worse cooking performance.
physics_material :
Configure which physics material to use.
Returns
-------
dict
The newly created mesh group.
"""
group = self.__add_physx_base_mesh_group(name, physics_material)
group["export method"] = 0
group["TriangleMeshAssetParams"] = {
"MergeMeshes": merge_meshes,
"WeldVertices": weld_vertices,
"DisableCleanMesh": disable_clean_mesh,
"Force32BitIndices": force_32bit_indices,
"SuppressTriangleMeshRemapTable": suppress_triangle_mesh_remap_table,
"BuildTriangleAdjacencies": build_triangle_adjacencies,
"MeshWeldTolerance": mesh_weld_tolerance,
"NumTrisPerLeaf": num_tris_per_leaf
}
return group
def add_physx_convex_mesh_group(self, name: str, area_test_epsilon: float = 0.059, plane_tolerance: float = 0.0006,
use_16bit_indices: bool = False,
check_zero_area_triangles: bool = False,
quantize_input: bool = False,
use_plane_shifting: bool = False,
shift_vertices: bool = False,
gauss_map_limit: int = 32,
build_gpu_data: bool = False,
physics_material: typing.Optional[str] = None) -> dict:
"""Adds a Convex type PhysX Mesh Group to the scene.
Parameters
----------
name :
Name of the mesh group.
area_test_epsilon :
If the area of a triangle of the hull is below this value, the triangle will be
rejected. This test is done only if Check Zero Area Triangles is used.
plane_tolerance :
The value is used during hull construction. When a new point is about to be added
to the hull it gets dropped when the point is closer to the hull than the planeTolerance.
use_16bit_indices :
Denotes the use of 16-bit vertex indices in Convex triangles or polygons.
check_zero_area_triangles :
Checks and removes almost zero-area triangles during convex hull computation.
The rejected area size is specified in Area Test Epsilon.
quantize_input :
Quantizes the input vertices using the k-means clustering.
use_plane_shifting :
Enables plane shifting vertex limit algorithm. Plane shifting is an alternative
algorithm for the case when the computed hull has more vertices than the specified vertex
limit.
shift_vertices :
Convex hull input vertices are shifted to be around origin to provide better
computation stability
gauss_map_limit :
Vertex limit beyond which additional acceleration structures are computed for each
convex mesh. Increase that limit to reduce memory usage. Computing the extra structures
all the time does not guarantee optimal performance.
build_gpu_data :
When true, additional information required for GPU-accelerated rigid body
simulation is created. This can increase memory usage and cooking times for convex meshes
and triangle meshes. Convex hulls are created with respect to GPU simulation limitations.
Vertex limit is set to 64 and vertex limit per face is internally set to 32.
physics_material :
Configure which physics material to use.
Returns
-------
dict
The newly created mesh group.
"""
group = self.__add_physx_base_mesh_group(name, physics_material)
group["export method"] = 1
group["ConvexAssetParams"] = {
"AreaTestEpsilon": area_test_epsilon,
"PlaneTolerance": plane_tolerance,
"Use16bitIndices": use_16bit_indices,
"CheckZeroAreaTriangles": check_zero_area_triangles,
"QuantizeInput": quantize_input,
"UsePlaneShifting": use_plane_shifting,
"ShiftVertices": shift_vertices,
"GaussMapLimit": gauss_map_limit,
"BuildGpuData": build_gpu_data
}
return group
def add_physx_primitive_mesh_group(self, name: str,
primitive_shape_target: PrimitiveShape = PrimitiveShape.BEST_FIT,
volume_term_coefficient: float = 0.0,
physics_material: typing.Optional[str] = None) -> dict:
"""Adds a Primitive Shape type PhysX Mesh Group to the scene
Parameters
----------
name :
Name of the mesh group.
primitive_shape_target :
The shape that should be fitted to this mesh. If BEST_FIT is selected, the
algorithm will determine which of the shapes fits best.
volume_term_coefficient :
This parameter controls how aggressively the primitive fitting algorithm will try
to minimize the volume of the fitted primitive. A value of 0 (no volume minimization) is
recommended for most meshes, especially those with moderate to high vertex counts.
physics_material :
Configure which physics material to use.
Returns
-------
dict
The newly created mesh group.
"""
group = self.__add_physx_base_mesh_group(name, physics_material)
group["export method"] = 2
group["PrimitiveAssetParams"] = {
"PrimitiveShapeTarget": int(primitive_shape_target),
"VolumeTermCoefficient": volume_term_coefficient
}
return group
def physx_mesh_group_decompose_meshes(self, mesh_group: dict, max_convex_hulls: int = 1024,
max_num_vertices_per_convex_hull: int = 64,
concavity: float = .001,
resolution: float = 100000,
mode: DecompositionMode = DecompositionMode.VOXEL,
alpha: float = .05,
beta: float = .05,
min_volume_per_convex_hull: float = 0.0001,
plane_downsampling: int = 4,
convex_hull_downsampling: int = 4,
pca: bool = False,
project_hull_vertices: bool = True) -> None:
"""Enables and configures mesh decomposition for a PhysX Mesh Group.
Only valid for convex or primitive mesh types.
Parameters
----------
mesh_group :
Mesh group to configure decomposition for.
max_convex_hulls :
Controls the maximum number of hulls to generate.
max_num_vertices_per_convex_hull :
Controls the maximum number of triangles per convex hull.
concavity :
Maximum concavity of each approximate convex hull.
resolution :
Maximum number of voxels generated during the voxelization stage.
mode :
Select voxel-based approximate convex decomposition or tetrahedron-based
approximate convex decomposition.
alpha :
Controls the bias toward clipping along symmetry planes.
beta :
Controls the bias toward clipping along revolution axes.
min_volume_per_convex_hull :
Controls the adaptive sampling of the generated convex hulls.
plane_downsampling :
Controls the granularity of the search for the best clipping plane.
convex_hull_downsampling :
Controls the precision of the convex hull generation process
during the clipping plane selection stage.
pca :
Enable or disable normalizing the mesh before applying the convex decomposition.
project_hull_vertices :
Project the output convex hull vertices onto the original source mesh to increase
the floating point accuracy of the results.
"""
mesh_group['DecomposeMeshes'] = True
mesh_group['ConvexDecompositionParams'] = {
"MaxConvexHulls": max_convex_hulls,
"MaxNumVerticesPerConvexHull": max_num_vertices_per_convex_hull,
"Concavity": concavity,
"Resolution": resolution,
"Mode": int(mode),
"Alpha": alpha,
"Beta": beta,
"MinVolumePerConvexHull": min_volume_per_convex_hull,
"PlaneDownsampling": plane_downsampling,
"ConvexHullDownsampling": convex_hull_downsampling,
"PCA": pca,
"ProjectHullVertices": project_hull_vertices
}
def physx_mesh_group_add_selected_node(self, mesh_group: dict, node: str) -> None:
"""Adds a node to the selected nodes list
Parameters
----------
mesh_group :
Mesh group to add to.
node :
Node path to add.
"""
mesh_group['NodeSelectionList']['selectedNodes'].append(node)
def physx_mesh_group_add_unselected_node(self, mesh_group: | |
else:
err_msg = ("Input argument %r is not of type 'complex'!"
% (name))
e13.raise_error(err_msg, TypeError, logger)
# Check for float
elif 'float' in args:
# Check if float is provided and continue if so
if issubclass(values.dtype.type, (np.integer, np.floating)):
# Remove 'float' from args and check it again
args.remove('float')
values = np.asanyarray(values, dtype=float)
continue
else:
err_msg = "Input argument %r is not of type 'float'!" % (name)
e13.raise_error(err_msg, TypeError, logger)
# Check for integer
elif 'int' in args:
# Check if int is provided and continue if so
if issubclass(values.dtype.type, np.integer):
# Remove 'int' from args and check it again
args.remove('int')
continue
else:
err_msg = "Input argument %r is not of type 'int'!" % (name)
e13.raise_error(err_msg, TypeError, logger)
# Check for negative value
elif 'neg' in args:
# Check if value is negative and continue if so
try:
index = list(np.argwhere(values >= 0)[0])
except IndexError:
args.remove('neg')
continue
else:
err_msg = ("Input argument '%s%s' is not negative!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Check for non-negative value
elif 'nneg' in args:
# Check if value is non-negative and continue if so
try:
index = list(np.argwhere(values < 0)[0])
except IndexError:
args.remove('nneg')
continue
else:
err_msg = ("Input argument '%s%s' is not non-negative!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Check for normalized value [-1, 1]
elif 'normal' in args:
# Check if value is normal and continue if so
try:
index = list(np.argwhere(abs(values) > 1)[0])
except IndexError:
args.remove('normal')
continue
else:
err_msg = ("Input argument '%s%s' is not normalized!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Check for non-positive value
elif 'npos' in args:
# Check if value is non-positive and continue if so
try:
index = list(np.argwhere(values > 0)[0])
except IndexError:
args.remove('npos')
continue
else:
err_msg = ("Input argument '%s%s' is not non-positive!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Check for non-zero value
elif 'nzero' in args:
# Check if value is non-zero and continue if so
try:
index = list(np.argwhere(values == 0)[0])
except IndexError:
args.remove('nzero')
continue
else:
err_msg = ("Input argument '%s%s' is not non-zero!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Check for positive value
elif 'pos' in args:
# Check if value is positive and continue if so
try:
index = list(np.argwhere(values <= 0)[0])
except IndexError:
args.remove('pos')
continue
else:
err_msg = ("Input argument '%s%s' is not positive!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# If none of the criteria is found, the criteria are invalid
else:
err_msg = ("Input argument 'args' contains invalid elements (%s)!"
% (args))
e13.raise_error(err_msg, ValueError, logger)
# If no criteria are left, it must be a finite value
else:
# Check if value is finite and continue if so
try:
index = list(np.argwhere(~np.isfinite(values))[0])
except IndexError:
pass
except TypeError:
err_msg = ("Input argument '%s%s' is not of type 'int' or 'float'!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, TypeError, logger)
else:
err_msg = ("Input argument '%s%s' is not finite!"
% (name, index if values.ndim != 0 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Convert values back to its original type
if(arr_type == 'tuple'):
values = tuple(values.tolist())
elif(arr_type == 'list'):
values = values.tolist()
elif(arr_type == 'scalar'):
values = values.item()
# Return values
return(values)
# Define class factory that returns a specialized h5py.File class
def get_PRISM_File(prism_hdf5_file):
"""
Returns a class definition ``PRISM_File(mode, emul_s=None, **kwargs)``.
This class definition is a specialized version of the :class:`~h5py.File`
class with the filename automatically set to `prism_hdf5_file` and added
logging to the constructor and destructor methods.
Parameters
----------
prism_hdf5_file : str
Absolute path to the master HDF5-file that is used in a
:obj:`~prism.Pipeline` instance.
Returns
-------
PRISM_File : class
Definition of the class ``PRISM_File(mode, emul_s=None, **kwargs)``.
"""
# Split provided prism_hdf5_file up into parts
parts = path.splitext(prism_hdf5_file)
# Override h5py's File.__init__() and __exit__() methods
class PRISM_File(h5py.File):
"""
Custom :class:`~h5py.File` class that automatically knows where all
*PRISM* HDF5-files are located when created by the
:func:`~get_PRISM_File` class factory. Additionally, certain keyword
arguments have default values and the opening/closing of an HDF5-file
is logged.
"""
# Override __init__() to include default settings and logging
def __init__(self, mode, emul_s=None, **kwargs):
"""
Opens the master HDF5-file `prism_hdf5_file` in `mode` according to
some set of default parameters.
Parameters
----------
mode : {'r'; 'r+'; 'w'; 'w-'/'x'; 'a'}
String indicating how the HDF5-file needs to be opened.
Optional
--------
emul_s : int or None. Default: None
If int, number indicating the requested emulator system file to
open.
If *None*, the master HDF5-file itself is opened.
kwargs : dict. Default: ``{'driver': None, 'libver': 'earliest'}``
Other keyword arguments that need to be given to the
:func:`~h5py.File` function.
"""
# Save emul_s as a property
self.emul_s = emul_s
# Set default settings
hdf5_kwargs = {'driver': None,
'libver': 'earliest'}
# Check emul_s and obtain proper logger
if self.emul_s is None:
# Only controller opens master file for writing, so use CLogger
sub_str = ''
logger = getCLogger('M-HDF5')
else:
sub_str = '_%i' % (self.emul_s)
logger = getRLogger('S-HDF5')
# Add sub_str to filename
filename = ''.join([parts[0], sub_str, parts[1]])
# Update hdf5_kwargs with provided ones
hdf5_kwargs.update(kwargs)
# Log that an HDF5-file is being opened
if self.emul_s is None:
logger.info("Opening master HDF5-file (mode: %r)." % (mode))
else:
logger.info("Opening system HDF5-file %i (mode: %r)."
% (self.emul_s, mode))
# Inheriting File __init__()
super().__init__(filename, mode, **hdf5_kwargs)
# Override __exit__() to include logging
def __exit__(self, *args):
# Log that an HDF5-file will be closed
if self.emul_s is None:
logger = getCLogger('M-HDF5')
logger.info("Closing master HDF5-file.")
else:
logger = getRLogger('S-HDF5')
logger.info("Closing system HDF5-file %i." % (self.emul_s))
# Inheriting File __exit__()
super().__exit__(*args)
# Return PRISM_File class definition
return(PRISM_File)
# Define function that prints a string with the BibTeX entry to PRISM's paper
def get_bibtex():
"""
Prints a string that gives the BibTeX entry for citing the *PRISM* paper
(Van der Velden et al. 2019, ApJS, 242, 22).
"""
# Create string with BibTeX entry
bibtex = dedent(
r"""
@ARTICLE{2019ApJS..242...22V,
author = {{<NAME>}, E. and {<NAME>. and {<NAME>.
and {<NAME>. and {<NAME>.},
title = "{Model dispersion with PRISM; an alternative to MCMC for
rapid analysis of models}",
journal = {\apjs},
keywords = {Astrophysics - Instrumentation and Methods for
Astrophysics, Physics - Computational Physics},
year = "2019",
month = "Jun",
volume = {242},
number = {2},
eid = {22},
pages = {22},
doi = {10.3847/1538-4365/ab1f7d},
archivePrefix = {arXiv},
eprint = {1901.08725},
primaryClass = {astro-ph.IM},
adsurl = {https://ui.adsabs.harvard.edu/abs/2019ApJS..242...22V},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
""")
# Print the string
print(bibtex.strip())
# This function returns a logging.Formatter used for PRISM logging
def get_formatter():
"""
Returns a :obj:`~logging.Formatter` object containing the default logging
formatting.
"""
# Set formatting strings
fmt = "[%(asctime)s][%(levelname)-4s] %(name)-10s \t%(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
# Initialize Formatter class and return it
return(logging.Formatter(fmt, datefmt))
# This function returns a logging.Handler used for PRISM logging
def get_handler(filename):
"""
Returns a :obj:`~logging.Handler` object containing the default logging
handling settings.
"""
# Initialize Handler class
handler = logging.FileHandler(filename, mode='a', encoding='utf-8')
# Add name to handler
handler.set_name('prism_base')
# Set logLevel to DEBUG
handler.setLevel('DEBUG')
# Add formatter to handler
handler.setFormatter(get_formatter())
# Return handler
return(handler)
# Define function that prints a string with all PRISM package information
def get_info():
"""
Prints a string that gives an overview of all information relevant to the
*PRISM* package distribution.
"""
# Create info list
info_list = []
# Add header to info_list
info_list.append(dedent("""
Configuration
-------------"""))
# Add platform to info_list
info_list.append("Platform: %s %i-bit"
% (platform.system(), calcsize('P')*8))
# Add python version to info_list
info_list.append("Python: %s" % (platform.python_version()))
# Add PRISM version to info_list
info_list.append("Version: %s" % (__version__))
# Access PRISM metadata
prism_dist = get_distribution('prism')
# Get list of all PRISM requirements
req_list = [req.name for req in prism_dist.requires()]
# Sort the requirements list
req_list.sort()
# Make requirements | |
<reponame>sandyfloren/sequenceAlignment
#!/usr/bin/env python3
# Name: <NAME> (afloren)
# Group Members: None
"""This module provides tools for personalizing Multiple Sequence Alignments.
Currently, only genomic 23andMe tab-delimited files are suitable input data.
23andMe is still using human genome build 37, which makes it necessary to
convert NCBI genomic SNP locations data from hg38 to hg37. This is done with
the MyVariant tool: https://pypi.org/project/myvariant/
Other important information is retrieved from NCBI databases using
Biopython's Entrez, and sequence manipulation and alignment uses other Bio
modules, including SeqIO, AlignIO, etc: https://biopython.org,
Copyright 1999-2003 by <NAME>.
This program requires an email address and
NCBI API key. Please use your own. To obtain a key and make an account,
visit: https://www.ncbi.nlm.nih.gov/account/
All the SNPs in a given mRNA sequence can be found on dbSNP's GeneView page,
which provides a batch query service that seems to be broken. Because of
this, it was necessary to scrape the data directly using Selenium. Thus in
order to obtain the data it is important to install the latest version of
the Google Chrome Webdriver and browser:
https://chromedriver.chromium.org/getting-started
https://www.google.com/chrome/
Classes:
GenomeReader: Defines objects to read 23andMe files.
-Adapted from FastAreader by <NAME> for BME160, UCSC, Feb 2020
SequenceAlignment: Define objects to create a personalized alignments.
CommandLine: Handle the command line, usage and help requests.
-Adapted from CommandLine by <NAME> for BME160, UCSC, Mar 2020
Methods:
getChromosome: Find the chromosome number for a given gene ID.
getAccessionsFromGene: Find all accessions in GeneView for a gene.
getGeneFromAccession: Find the gene ID from an accesssion number.
rsidsToHG37Positions: Find the positions on hg37 from a list of rsids.
geneIDtoHG37Positions: Find the positions on hg37 of all SNPS in a gene.
Example command line usage:
python3 sequenceAligment.py -gI 79068 -acc NM_001080432.2 -f phylip
exampleGenomeFile.txt outputFile.txt
python3 sequenceAlignment.py
"""
import sys
import pandas as pd
from Bio import Entrez, SeqIO, AlignIO, Align
from Bio.Alphabet import IUPAC
from Bio.Align import MultipleSeqAlignment
from Bio.SeqRecord import SeqRecord
from urllib.error import HTTPError
# this try/except block was adapted from Bio.NaiveBayes, Copyright 2000 by
# <NAME>
try:
from selenium import webdriver
except ImportError:
raise ImportError("sequenceAlignment.py requires installation of the "
"Selenium module.")
try:
import myvariant
except ImportError:
raise ImportError("sequenceAlignment.py requires installation of the "
"myvariant module.")
# change these for personal use
Entrez.api_key = 'YOUR KEY HERE'
Entrez.email = 'YOUR EMAIL HERE'
Entrez.tool = "sequenceAlignment.py"
def getChromosome(geneID):
"""Find what chromosome a gene is on.
args:
geneID (str or int): the gene ID number
returns:
chrom (int): the chromosome number
"""
handle = Entrez.efetch(db='gene', id=geneID, rettype='xml')
result = Entrez.read(handle)
chrom = int(
result[0]['Entrezgene_locus'][0]['Gene-commentary_accession'][-2:])
return chrom
def getAccessionsFromGene(geneID):
"""Return a list of valid mRNA accession numbers from GeneView.
args:
geneID (str or int): the gene ID number
returns:
accessions (list): the valid accession numbers
Does the same thing as SequenceAlignment.validAccessions(), but
without creating a SequenceAlignment object.
"""
url = f'https://www.ncbi.nlm.nih.gov/SNP/snp_ref.cgi?locusId={geneID}'
driver = webdriver.Chrome("/usr/local/bin/chromedriver")
driver.get(url)
mrnaElements = driver.find_elements_by_class_name('gm_mrna')
accessions = list(
elem.text for elem in mrnaElements)
driver.close()
return accessions
def getGeneFromAccession(accession):
"""Find the gene ID for a given nucleotide accession number.
args:
accession (str): the accession number
returns:
geneID (str): the gene ID
"""
try:
handle = Entrez.efetch(db='nuccore', id=accession, rettype='gb',
retmode='xml')
result = Entrez.read(handle)
except HTTPError:
raise ValueError("Invalid accession number.")
# find the geneID from the EFetch XML result
for dictElement in result[0]['GBSeq_feature-table']:
if dictElement['GBFeature_key'] == 'CDS':
for elem in dictElement['GBFeature_quals']:
if elem['GBQualifier_name'] == 'db_xref':
if 'GeneID' in elem['GBQualifier_value']:
stringElement = elem['GBQualifier_value']
geneID = stringElement[stringElement.find(':') + 1:]
break
return geneID
def geneLocationHG37(geneID):
"""Find the genomic position of a gene on hg37."""
url = f'https://www.ncbi.nlm.nih.gov/gene/?term={geneID}'
driver = webdriver.Chrome("/usr/local/bin/chromedriver")
driver.get(url)
locXpath = '//*[@id="ui-ncbigrid-11"]/tbody/tr[2]/td[5]'
chromXpath = '//*[@id="ui-ncbigrid-11"]/tbody/tr[2]/td[4]'
text = driver.find_element_by_xpath(locXpath).text
text = text[text.find('(') + 1: text.find(')') + 1]
text = text[0:text.find(',')]
location = text.split('..')
chrom = driver.find_element_by_xpath(chromXpath).text
driver.close()
startPos = location[0]
endPos = location[1]
return chrom, startPos, endPos
def getHG37PositionsInRange(chromosome, startPos, endPos):
"""Return a DataFrame containing hg37 positions for all rsids in a range.
args:
chromosome (int or str): the chromosome number
startPos (int or str): the start position on the chromosome
endPos (int or str): the end position on the chromosome
returns:
df (DataFrame): all the rsids found in the genomic range
between startPos and endPos, indexed by rsid
chromosome (int or str): the chromosome number
"""
queryString = f'chr{chromosome}:{startPos}-{endPos}'
mv = myvariant.MyVariantInfo()
gen = mv.query(queryString, scopes='dbsnp.rsid',
fields='dbsnp.rsid, dbsnp.hg19.start', fetch_all=True,
assembly='hg37')
rsids = {}
for row in gen:
try:
rsid = (row['dbsnp']['rsid'])
start = (row['dbsnp']['hg19']['start'])
rsids[rsid] = start
except KeyError:
continue
df = pd.DataFrame.from_dict(rsids, orient='index')
return df, chromosome
def rsidsToHG37Positions(rsidList):
"""Return a DataFrame containing hg37 positions for a list of rsids.
args:
rsidList (list of str): the rsids
returns:
df (DataFrame): all the rsids found in the genomic range
between startPos and endPos, indexed by rsid
"""
mv = myvariant.MyVariantInfo()
gen = mv.querymany(rsidList, scopes='dbsnp.rsid',
fields='dbsnp.rsid, dbsnp.hg19.start', fetch_all=True,
assembly='hg37')
rsids = {}
for row in gen:
try:
rsid = (row['dbsnp']['rsid'])
start = (row['dbsnp']['hg19']['start'])
rsids[rsid] = start
except KeyError:
continue
df = pd.DataFrame.from_dict(rsids, orient='index')
return df
def geneIDtoHG37Positions(geneID):
"""Return a DataFrame containing hg37 positions for all rsids in a gene.
args:
geneID (int or str): the geneID
returns:
df (DataFrame): all the rsids found in the genomic range
between startPos and endPos, indexed by rsid
chromosome (int or str): the chromosome number
"""
return getHG37PositionsInRange(geneLocationHG37(geneID))
class GenomeReader:
"""Define objects to read genome files.
instantiation:
thisReader = GenomeReader ('genome_John_Cleese_v5_full.txt')
usage:
for rsid, genotype, chrom, pos in thisReader.readGenome():
print (rsid, genotype, chrom, pos)
"""
def __init__(self, filename=''):
"""Contructor: save attribute filename."""
self.filename = filename
def openFile(self):
"""Handle file opens, allowing stdin."""
if self.filename is '':
return sys.stdin
else:
return open(self.filename)
def readGenome(self):
"""Parse the data from a 23andMe genome file."""
count = 0
with self.openFile() as handle:
line = handle.readline()
while line:
# get rid of comments at top of file and internal 23andMe
# markers.
if not line.startswith('#') and not line.startswith('i'):
entry = line.split()
rsid = entry[0]
location = entry[2]
chromosome = entry[1]
genotype = entry[3] # save genotype
count += 1
yield rsid, genotype, chromosome, location
line = handle.readline()
class SequenceAligment:
"""Define objects to create a personal alignment to the reference genome.
instantiation:
sa = SequenceAlignment('79068,' 'NM_001080432.2',
genome_file='genome_John_Cleese_v5_full.txt', indels=True)
usage:
from Bio import AlignIO
AlignIO.write([sa.get_alignment()], handle=outfile.aln,
format='clustal')
Given a gene ID, an accession number, and a file of SNP data,
a SequenceAlignment object will scrape the dbSNP GeneView webpage using a
Chrome webdriver from the selenium module. In the future, I would like to
find a faster way to collect SNP data for a specific transcript sequence,
but as of yet I have not found one.
The SNP data scraped from GeneView are represented as a pandas DataFrame
object, which is subsequently joined with another DataFrame containing
all the corresponding rsids (and their genotypes) from the input genome
file. After dropping NaN values, what is left is a much smaller DataFrame
containing only SNPs that are present in both the sequence of interest
and the genome file.
This small subset of SNPs is then reduced to an even smaller DataFrame
containing only non-wildtype (mutant) alleles. A Seq object is then
created for the reference sequence, from which a MutableSeq object is
then created and modified according to the mutations found in the genome
file.
Finally, the two Seqs are used to create a MultipleSeqAlignment object,
comparing the personalized sequence to the reference sequence.
"""
# allowable accession number prefixes
NUCLEOTIDE_PREFIXES = {'NM', 'NR', 'XM', 'XR'}
PROTEIN_PREFIXES = {'AP', 'NP', 'YP', 'XP', 'WP'}
# ambiguous bases. Used to convert a heterozygous genotype to a Seq object,
# e.g. an 'AG' genotype would be represented as 'R'
AMBIGUOUS_DNA_LETTERS = {
'AA': 'A',
'CC': 'C',
'TT': 'T',
'GG': 'G',
'AC': 'M',
'CA': 'M',
'AG': 'R',
'GA': 'R',
'AT': 'W',
'TA': 'W',
'CG': 'S',
'GC': 'S',
'CT': 'Y',
'TC': 'Y',
'GT': 'K',
'TG': 'K'
}
def __init__(self, geneID, accession, genomeFile='', indels=True):
"""Initialize a new SequenceAnalysis object.
Args:
geneID (str or int): the gene ID number
accession (str): the accession number
genomeFile (str): the output filename/path. Defaults to ""
indels (bool): whether or not to include insertions/deletions
in the alignment. Defaults to True
"""
self.geneID = geneID
self.accessionNumber = accession
self.genomeFile = genomeFile
# if indels = | |
if country == 'uk':
if card[0] == 240 and active_country == 'uk':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 245 and active_country == 'uk':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 302 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_victory', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 368 and active_country == 'us':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 369 and active_country == 'us':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_victory', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - Beginning of " + function.countryid2name[active_country] + " Victory step"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
def status_extra_victory_point(country, db):
print('in status_extra_victory_point - ' + country)
s = {'ge': [40, 49], 'jp':[91, 92, 93, 94, 95, 96], 'it':[159, 160, 162, 163, 164], 'uk':[], 'su':[], 'us':[], 'fr':[], 'ch':[]}
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
text = ""
extra_point = 0
for card in avaliable_card:
if country == 'ge':
if card[0] == 40 and 24 in function.control_space_list('ge', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 49 and 15 in function.control_space_list('ge', db):
if 11 in function.control_space_list('ge', db):
text += function.countryid2name[country] + " gain 2 point from <b>" + card[1] + "</b>\n"
extra_point += 2
else:
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if country == 'jp':
if card[0] == 91:
navy_count = db.execute("select count(*) from piece where control = 'jp' and type = 'navy' and location != 'none';").fetchall()
if navy_count[0][0] >= 3:
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 92:
c92_point = 0
if 48 in function.control_space_list('jp', db):
c92_point += 1
if 49 in function.control_space_list('jp', db):
c92_point += 1
if 51 in function.control_space_list('jp', db):
c92_point += 1
if c92_point > 0:
text += function.countryid2name[country] + " gain " + str(c92_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c92_point
if card[0] == 93:
c93_point = 0
if 33 in function.control_space_list('jp', db):
c93_point += 1
if 36 in function.control_space_list('jp', db):
c93_point += 1
if 45 in function.control_space_list('jp', db):
c93_point += 1
if c93_point > 0:
text += function.countryid2name[country] + " gain " + str(c93_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c93_point
if card[0] == 94:
c94_point = 0
if 30 in function.control_space_list('jp', db):
c94_point += 1
if 42 in function.control_space_list('jp', db):
c94_point += 1
if c94_point > 0:
text += function.countryid2name[country] + " gain " + str(c94_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c94_point
if card[0] == 95 and not {39, 46}.isdisjoint(set(function.control_space_list('jp', db))):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 96 and 44 in function.control_space_list('jp', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if country == 'it':
if card[0] == 159 and not {20, 24}.isdisjoint(set(function.control_space_list('it', db))):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 160 and 22 in function.control_space_list('it', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 162:
c162_point = 0
if 13 in function.control_space_list('it', db) or 13 in function.control_space_list('ge', db):
c162_point += 1
if 19 in function.control_space_list('it', db) or 19 in function.control_space_list('ge', db):
c162_point += 1
if 25 in function.control_space_list('it', db) or 25 in function.control_space_list('ge', db):
c162_point += 1
if c162_point > 0:
text += function.countryid2name[country] + " gain " + str(c162_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c162_point
if card[0] == 163 and 12 in function.control_space_list('it', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 164:
navy_count = db.execute("select count(*) from piece where control = 'it' and type = 'navy' and location != 'none';").fetchall()
text += function.countryid2name[country] + " gain " + str(navy_count[0][0]) + " point from <b>" + card[1] + "</b>\n"
extra_point += navy_count[0][0]
if extra_point > 0:
return extra_point, text
else:
return None
#----------------------Draw Step-----------------------
def status_draw_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_draw_handler_info - ' + country)
s = {'ge': [48], 'jp':[], 'it':[], 'uk':[], 'su':[295, 298], 'us':[366], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 48 and active_country == 'ge':
card_count = db.execute("select count(*) from card where location = 'deck' and control = 'ge';").fetchall()
if card_count[0][0] != 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 295 and active_country == 'su':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 298 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_draw', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 366 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_draw', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + "Beginning of " + function.countryid2name[active_country] + " Draw step:"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Discard Step-----------------------
def status_discard_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_discard_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[], 'su':[301], 'us':[351], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'su':
if card[0] == 301 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_discard', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_discard', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 351 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_discard', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_discard', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + "Beginning of " + function.countryid2name[active_country] + " Discard step:"
else:
reply_markup = None
text = None
| |
:param delete_strokes: Delete the strokes that are currently stored.
:type delete_strokes: bool
'''
pass
@staticmethod
def select(pred: 'UnaryPredicate1D'):
''' Selects the ViewEdges of the ViewMap verifying a specified condition.
:param pred: The predicate expressing this condition.
:type pred: 'UnaryPredicate1D'
'''
pass
@staticmethod
def sequential_split(starting_pred: 'UnaryPredicate0D',
stopping_pred: 'UnaryPredicate0D',
sampling: float = 0.0):
''' sequential_split(pred, sampling=0.0) Splits each chain of the current set of chains in a sequential way. The points of each chain are processed (with a specified sampling) sequentially. The first point of the initial chain is the first point of one of the resulting chains. The splitting ends when no more chain can start.
:param starting_pred: The predicate on a point that expresses the starting condition. Each time this condition is verified, a new chain begins
:type starting_pred: 'UnaryPredicate0D'
:param stopping_pred: The predicate on a point that expresses the stopping condition. The chain ends as soon as this predicate is verified.
:type stopping_pred: 'UnaryPredicate0D'
:param pred: The predicate on a point that expresses the splitting condition. Each time the condition is verified, the chain is split into two chains. The resulting set of chains is a partition of the initial chain
:type pred: 'UnaryPredicate0D'
:param sampling: The resolution used to sample the chain for the predicates evaluation. (The chain is not actually resampled; a virtual point only progresses along the curve using this resolution.)
:type sampling: float
'''
pass
@staticmethod
def sort(pred: 'BinaryPredicate1D'):
''' Sorts the current set of chains (or viewedges) according to the comparison predicate given as argument.
:param pred: The binary predicate used for the comparison.
:type pred: 'BinaryPredicate1D'
'''
pass
class SShape:
''' Class to define a feature shape. It is the gathering of feature elements from an identified input shape.
'''
bbox: 'BBox' = None
''' The bounding box of the SShape.
:type: 'BBox'
'''
edges: typing.List['FEdge'] = None
''' The list of edges constituting this SShape.
:type: typing.List['FEdge']
'''
id: 'Id' = None
''' The Id of this SShape.
:type: 'Id'
'''
name: str = None
''' The name of the SShape.
:type: str
'''
vertices: typing.List['SVertex'] = None
''' The list of vertices constituting this SShape.
:type: typing.List['SVertex']
'''
def __init__(self):
''' __init__(brother) Creates a SShape class using either a default constructor or copy constructor.
:param brother: An SShape object.
:type brother: 'SShape'
'''
pass
def add_edge(self, edge: 'FEdge'):
''' Adds an FEdge to the list of FEdges.
:param edge: An FEdge object.
:type edge: 'FEdge'
'''
pass
def add_vertex(self, vertex: 'SVertex'):
''' Adds an SVertex to the list of SVertex of this Shape. The SShape attribute of the SVertex is also set to this SShape.
:param vertex: An SVertex object.
:type vertex: 'SVertex'
'''
pass
def compute_bbox(self):
''' Compute the bbox of the SShape.
'''
pass
class SVertex:
''' Class hierarchy: Interface0D > SVertex Class to define a vertex of the embedding.
'''
curvatures: tuple = None
''' Curvature information expressed in the form of a seven-element tuple (K1, e1, K2, e2, Kr, er, dKr), where K1 and K2 are scalar values representing the first (maximum) and second (minimum) principal curvatures at this SVertex, respectively; e1 and e2 are three-dimensional vectors representing the first and second principal directions, i.e. the directions of the normal plane where the curvature takes its maximum and minimum values, respectively; and Kr, er and dKr are the radial curvature, radial direction, and the derivative of the radial curvature at this SVertex, respectively.
:type: tuple
'''
id: 'Id' = None
''' The Id of this SVertex.
:type: 'Id'
'''
normals: typing.List['mathutils.Vector'] = None
''' The normals for this Vertex as a list. In a sharp surface, an SVertex has exactly one normal. In a smooth surface, an SVertex can have any number of normals.
:type: typing.List['mathutils.Vector']
'''
normals_size: int = None
''' The number of different normals for this SVertex.
:type: int
'''
point_2d: 'mathutils.Vector' = None
''' The projected 3D coordinates of the SVertex.
:type: 'mathutils.Vector'
'''
point_3d: 'mathutils.Vector' = None
''' The 3D coordinates of the SVertex.
:type: 'mathutils.Vector'
'''
viewvertex: 'ViewVertex' = None
''' If this SVertex is also a ViewVertex, this property refers to the ViewVertex, and None otherwise.
:type: 'ViewVertex'
'''
def __init__(self):
''' __init__(brother) __init__(point_3d, id) Builds a SVertex using the default constructor, copy constructor or the overloaded constructor which builds a SVertex from 3D coordinates and an Id.
:param brother: A SVertex object.
:type brother: 'SVertex'
:param point_3d: A three-dimensional vector.
:type point_3d: 'mathutils.Vector'
:param id: An Id object.
:type id: 'Id'
'''
pass
def add_fedge(self, fedge: 'FEdge'):
''' Add an FEdge to the list of edges emanating from this SVertex.
:param fedge: An FEdge.
:type fedge: 'FEdge'
'''
pass
def add_normal(self, normal: typing.List['mathutils.Vector']):
''' Adds a normal to the SVertex's set of normals. If the same normal is already in the set, nothing changes.
:param normal: A three-dimensional vector.
:type normal: typing.List['mathutils.Vector']
'''
pass
class SVertexIterator:
''' Class hierarchy: Iterator > SVertexIterator Class representing an iterator over SVertex of a ViewEdge . An instance of an SVertexIterator can be obtained from a ViewEdge by calling verticesBegin() or verticesEnd().
'''
object: 'SVertex' = None
''' The SVertex object currently pointed by this iterator.
:type: 'SVertex'
'''
t: float = None
''' The curvilinear abscissa of the current point.
:type: float
'''
u: float = None
''' The point parameter at the current point in the 1D element (0 <= u <= 1).
:type: float
'''
def __init__(self):
''' __init__(brother) __init__(vertex, begin, previous_edge, next_edge, t) Build an SVertexIterator using either the default constructor, copy constructor, or the overloaded constructor that starts iteration from an SVertex object vertex.
:param brother: An SVertexIterator object.
:type brother: 'SVertexIterator'
:param vertex: The SVertex from which the iterator starts iteration.
:type vertex: 'SVertex'
:param begin: The first SVertex of a ViewEdge.
:type begin: 'SVertex'
:param previous_edge: The previous FEdge coming to vertex.
:type previous_edge: 'FEdge'
:param next_edge: The next FEdge going out from vertex.
:type next_edge: 'FEdge'
:param t: The curvilinear abscissa at vertex.
:type t: float
'''
pass
class Stroke:
''' Class hierarchy: Interface1D > Stroke Class to define a stroke. A stroke is made of a set of 2D vertices ( StrokeVertex ), regularly spaced out. This set of vertices defines the stroke's backbone geometry. Each of these stroke vertices defines the stroke's shape and appearance at this vertex position.
'''
id: 'Id' = None
''' The Id of this Stroke.
:type: 'Id'
'''
length_2d: float = None
''' The 2D length of the Stroke.
:type: float
'''
medium_type: 'MediumType' = None
''' The MediumType used for this Stroke.
:type: 'MediumType'
'''
texture_id: int = None
''' The ID of the texture used to simulate th marks system for this Stroke.
:type: int
'''
tips: bool = None
''' True if this Stroke uses a texture with tips, and false otherwise.
:type: bool
'''
def Stroke(self):
''' Stroke(brother) Creates a Stroke using the default constructor or copy constructor
'''
pass
def compute_sampling(self, n: int) -> float:
''' Compute the sampling needed to get N vertices. If the specified number of vertices is less than the actual number of vertices, the actual sampling value is returned. (To remove Vertices, use the RemoveVertex() method of this class.)
:param n: The number of stroke vertices we eventually want in our Stroke.
:type n: int
:rtype: float
:return: The sampling that must be used in the Resample(float) method.
'''
pass
def insert_vertex(self, vertex: 'StrokeVertex',
next: 'StrokeVertexIterator'):
''' Inserts the StrokeVertex given as argument into the Stroke before the point specified by next. The length and curvilinear abscissa are updated consequently.
:param vertex: The StrokeVertex to insert in the Stroke.
:type vertex: 'StrokeVertex'
:param next: A StrokeVertexIterator pointing to the StrokeVertex before which vertex must be inserted.
:type next: 'StrokeVertexIterator'
'''
pass
def | |
<reponame>IDEHCO3/kanban-backend
import requests, os, sys
#se'rvidor = ''
#servidor = 'http://LUC00557347.ibge.gov.br/'
SERVER = 'http://LUC00557196:8000/'
#SERVER = "http://172.30.11.72:8000/"
class RequestTest():
def __init__(self, uri, expec_status_code, method='GET', default_server=SERVER):
self.method = method
self.uri = default_server + uri
self.expec_status_code = expec_status_code
arr_get_for_non_spatial_resource = [
RequestTest("controle-list/usuario-list/1/", 200),
RequestTest("controle-list/usuario-list/1/nome,email", 200),
RequestTest("controle-list/usuario-list/1/projection/nome,email", 200),
]
arr_get_for_collection = [
RequestTest('controle-list/gasto-list/count-resource', 200),
RequestTest('controle-list/gasto-list/offset-limit/1&10', 200),
RequestTest('controle-list/gasto-list/offset-limit/1&10/data,valor', 400),
RequestTest('controle-list/gasto-list/group-by-count/tipo_gasto', 200),
RequestTest('controle-list/gasto-list/filter/tipo_gasto/eq/3', 200),
RequestTest('api/bcim/unidades-federativas/filter/geom/within/' + SERVER + 'api/bcim/municipios/3159407/geom/*', 200),
RequestTest('api/bcim/unidades-federativas/?*contains=POINT(-42 -21)', 200),
RequestTest('api/bcim/unidades-federativas/?*contains=POINT(-42 -21)&sigla=RJ', 200),
RequestTest('api/bcim/unidades-federativas/?*contains=URL&sigla=RJ', 200),
RequestTest('api/bcim/unidades-federativas/contains/POINT(-42 -21)', 200),
RequestTest('api/bcim/aldeias-indigenas/within/POLYGON((-41.8 -21.2,-41.8 -17.8,-28.8 -17.8,-28.8 -21.,-41.8 -21.2))/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/PA/*', 200),
RequestTest('api/bcim/unidades-federativas/filter/sigla/in/ES&PA/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*or/within/' + SERVER + 'api/bcim/unidades-federativas/PA/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/PA/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*and/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*and/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*and/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*', 200),
RequestTest('api/bcim/aldeias-indigenas/filter/id_objeto/eq/841/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/geom/*or/' + SERVER + 'api/bcim/unidades-federativas/PR/*', 200),
RequestTest('api/bcim/municipios/within/{"type":"Polygon","coordinates":[[[-48.759514611370854,-28.3426735036349],[-48.631647133384185,-28.3426735036349],[-48.631647133384185,-28.082673631081306],[-48.759514611370854,-28.082673631081306],[-48.759514611370854,-28.3426735036349]]]}', 200),
RequestTest('api/bcim/municipios/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*', 200),
RequestTest('api/bcim/municipios/filter/geom/overlaps/' + SERVER + 'api/bcim/unidades-federativas/ES/*or/geom/within/' + SERVER + 'api/bcim/unidades-federativas/ES/*and/geocodigo/startswith/32/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/PA/', 200),
RequestTest('api/bcim/aldeias-indigenas/within/' + SERVER + 'api/bcim/unidades-federativas/PA', 200),
RequestTest('api/bcim/aldeias-indigenas/collect/nome&geom/buffer/0.5', 200),
RequestTest('api/bcim/unidades-federativas/filter/sigla/in/RJ&ES/*collect/nome&geom/buffer/0.2', 200),
RequestTest('api/bcim/aldeias-indigenas/offset-limit/0&2/nome,geom,nomeabrev/*collect/nome&geom/buffer/0.5', 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/aldeias-indigenas/offset-limit/0/2/ and ignore the rest - act as offset-limit operation)
RequestTest('api/bcim/aldeias-indigenas/offset-limit/0&2/nome,geom/*collect/geom/buffer/0.5', 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/aldeias-indigenas/offset-limit/0/2/ and ignore the rest - act as offset-limit operation)
]
arr_get_for_spatial_operations = [
RequestTest("api/bcim/unidades-federativas/ES/area", 200),
RequestTest("api/bcim/unidades-federativas/ES/boundary", 200),
RequestTest("api/bcim/unidades-federativas/ES/buffer/0.2", 200),
RequestTest("api/bcim/unidades-federativas/ES/centroid", 200),
RequestTest("api/bcim/unidades-federativas/ES/contains/" + SERVER + "api/bcim/aldeias-indigenas/587/", 200),
RequestTest("api/bcim/unidades-federativas/ES/convex_hull", 200),
RequestTest("api/bcim/aldeias-indigenas/587/coords", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/crosses/" + SERVER + "api/bcim/municipios/3126406", 200),
RequestTest("api/bcim/unidades-federativas/RJ/difference/" + SERVER + "api/bcim/municipios/3304300/", 200),
RequestTest("api/bcim/unidades-federativas/ES/dims", 200),
RequestTest("api/bcim/aldeias-indigenas/589/disjoint/" + SERVER + "api/bcim/unidades-federativas/RJ/", 200),
RequestTest("api/bcim/unidades-federativas/ES/distance/" + SERVER + "api/bcim/unidades-federativas/AM/", 200),
RequestTest("api/bcim/unidades-federativas/ES/empty", 200),
RequestTest("api/bcim/unidades-federativas/ES/envelope", 200),
RequestTest("api/bcim/unidades-federativas/ES/equals/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/ES/equals_exact/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/ES/ewkb", 200),
RequestTest("api/bcim/unidades-federativas/ES/ewkt", 200),
RequestTest("api/bcim/unidades-federativas/ES/extent", 200),
RequestTest("api/bcim/unidades-federativas/ES/geom_type", 200),
RequestTest("api/bcim/unidades-federativas/ES/geom_typeid", 200),
RequestTest("api/bcim/unidades-federativas/ES/hasz", 200),
RequestTest("api/bcim/unidades-federativas/ES/hex", 200),
RequestTest("api/bcim/unidades-federativas/ES/hexewkb", 200),
RequestTest("api/bcim/unidades-federativas/ES/intersection/" + SERVER + "api/bcim/unidades-federativas/RJ/envelope/", 200),
RequestTest("api/bcim/unidades-federativas/ES/intersects/" + SERVER + "api/bcim/unidades-federativas/RJ/", 200),
RequestTest("api/bcim/aldeias-indigenas/587/json", 200),
RequestTest("api/bcim/aldeias-indigenas/587/kml", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/length", 200),
RequestTest("api/bcim/unidades-federativas/ES/num_geom", 200),
RequestTest("api/bcim/municipios/3301009/overlaps/" + SERVER + "api/bcim/unidades-federativas/ES", 200),
RequestTest("api/bcim/unidades-federativas/ES/point_on_surface", 200),
RequestTest("api/bcim/unidades-federativas/ES/relate/" + SERVER + "api/bcim/unidades-federativas/GO/", 200),
RequestTest("api/bcim/unidades-federativas/ES/relate_pattern/" + SERVER + "api/bcim/unidades-federativas/GO/&FF*FF****", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/ring", 200),
RequestTest("api/bcim/unidades-federativas/ES/simple", 200),
RequestTest("api/bcim/unidades-federativas/ES/simplify/0.0&False", 200),
RequestTest("api/bcim/unidades-federativas/ES/srid", 200),
RequestTest("api/bcim/unidades-federativas/ES/srs", 200),
RequestTest("api/bcim/vegetacoes-de-restinga/2947/sym_difference/" + SERVER + "api/bcim/unidades-federativas/ES", 200),
RequestTest("api/bcim/unidades-federativas/AM/touches/" + SERVER + "api/bcim/unidades-federativas/RJ/", 200),
RequestTest("api/bcim/unidades-federativas/ES/transform/4326&false", 200),
RequestTest("api/bcim/unidades-federativas/ES/union/" + SERVER + "api/bcim/unidades-federativas/RJ", 200),
RequestTest("api/bcim/unidades-federativas/ES/valid", 200),
RequestTest("api/bcim/unidades-federativas/ES/valid_reason", 200),
RequestTest("api/bcim/aldeias-indigenas/587/within/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/ES/wkb", 200),
RequestTest("api/bcim/unidades-federativas/ES/wkt", 200),
RequestTest("api/bcim/aldeias-indigenas/589/x", 200),
RequestTest("api/bcim/aldeias-indigenas/589/y", 200),
RequestTest("api/bcim/aldeias-indigenas/589/z", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/x", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/y", 200),
RequestTest("api/bcim/trechos-hidroviarios/59121/z", 200),
]
arr_get_for_projection = [
# only attributes
RequestTest("api/bcim/unidades-federativas/nome", 200),
RequestTest("api/bcim/unidades-federativas/nome/", 200),
RequestTest("api/bcim/unidades-federativas/nome,geom", 200),
RequestTest("api/bcim/unidades-federativas/nome,geom/", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo", 200), # attributes and projection
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/", 200),
# filter
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/filter/sigla/in/RJ&ES", 200),
# collect
RequestTest("api/bcim/unidades-federativas/collect/geom&nome/upper", 200),
RequestTest("api/bcim/unidades-federativas/projection/geom,nome/collect/geom&nome/upper", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geocodigo/collect/geom&nome/upper", 400), # collected attributes not in projection (must fail)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geocodigo/collect/geom&sigla/lower", 400), # operated attribute in projection but lists differs (priorize projection in this case)
# count_resource
RequestTest("api/bcim/unidades-federativas/count-resource", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/count-resource", 200),
# filter_and_collect
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES/*collect/geocodigo&sigla/lower", 200),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/filter/sigla/in/RJ&ES/*collect/geocodigo&sigla/lower", 200),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/filter/sigla/in/RJ&ES/*collect/sigla&geom/buffer/0.2", 400), # (must return status code 400)
# filter_and_count_resource
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES/*count-resource", 200),
RequestTest("api/bcim/unidades-federativas/projection/nome,geocodigo/filter/sigla/in/RJ&ES/*count-resource", 200),
# offset_limit
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/", 200),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,geocodigo/", 400),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0&2/", 200),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0&2/sigla,geocodigo/", 400),
RequestTest("api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0&2/nome,geocodigo,sigla/", 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/geocodigo,sigla/offset-limit/0/2/ and ignore the rest - act as offset-limit operation)
# distinct
RequestTest("controle-list/usuario-list/distinct/email", 200),
RequestTest("controle-list/usuario-list/distinct/id&nome&email", 200),
RequestTest("controle-list/usuario-list/projection/nome,email,data_nascimento/distinct/nome&email", 200),
# offset_limit_and_collect
RequestTest("api/bcim/unidades-federativas/offset-limit/5&2/collect/sigla&geom/buffer/0.8", 200),
RequestTest("api/bcim/unidades-federativas/offset-limit/5&2/geom,sigla/*collect/sigla&geom/buffer/0.8", 400),
RequestTest("api/bcim/unidades-federativas/offset-limit/5&2/sigla,geom,nome/*collect/sigla&geom/buffer/0.8", 400), # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/collect/sigla&geom/buffer/0.8", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/sigla,geocodigo/*collect/sigla&geom/buffer/0.8", 400), # projection list == collect list != offset_limit list # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/sigla,geom/*collect/nome&sigla&geom/buffer/0.8", 400), # projection list == offset_limit list != collect list # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5&2/sigla,geom/collect/sigla&geom/buffer/0.8", 400), # projection list == offset_limit list == collect list # WRONG SINTAX (SERVER EXECUTE ONLY api/bcim/unidades-federativas/projection/sigla,geom/offset-limit/5/2/ and ignore the rest - act as offset-limit operation)
#FeatureCollection operations
RequestTest("api/bcim/aldeias-indigenas/within/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,nomeabrev/within/" + SERVER + "api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/unidades-federativas/contains/" + SERVER + "api/bcim/aldeias-indigenas/623", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/contains/" + SERVER + "api/bcim/aldeias-indigenas/623", 200),
]
arr_get_for_complex_requests = [
#("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/ES/*collect/geom/buffer/0.2/!union/(" + SERVER + "api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/geom/buffer/0.2), 200),"
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/ES/*collect/geom/buffer/0.2/!union!/" + SERVER + "api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/geom/buffer/0.2", 200),
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/ES/*collect/nome&geom/buffer/0.2/!union!/" + SERVER + "api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/nome&geom/buffer/0.2", 200),
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/nome&geom/buffer/0.2/!union!/" + SERVER + "api/bcim/unidades-federativas/MG/envelope/", 200),
RequestTest("api/bcim/aldeias-indigenas/filter/geom/within/" + SERVER + "api/bcim/unidades-federativas/AM/*collect/nome&geom/buffer/0.2/!union!/Polygon((-51.04196101779323 -22.915330279829785, -39.86109832699603 -22.915330279829785, -39.86109832699603 -14.227537498798952, -51.04196101779323 -14.227537498798952, -51.04196101779323 -22.915330279829785))", 200),
]
arr_get_for_geometry_collection_operation = [
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/geom,nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*count-resource", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*count-resource", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*count-resource", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom&nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom/buffer/1.2", 200),
RequestTest("api/bcim/aldeias-indigenas/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome&geom/buffer/1.2", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom/buffer/1.2", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome&geom/buffer/1.2", 400),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom&nome/upper", 200),
RequestTest("api/bcim/aldeias-indigenas/projection/nome,geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/nome/upper", 400),
RequestTest("api/bcim/aldeias-indigenas/projection/geom/within/"+ SERVER +"api/bcim/unidades-federativas/ES/*collect/geom&nome/upper", 400),
]
arr_get_for_join_operation = [
# NonSpatialResource (1 resource) join FeatureResource (1 resource) (Not joinable)
#RequestTest("controle-list/usuario-list/1/join/data_nascimento&geocodigo/" + SERVER + "api/bcim/unidades-federativas/ES", 400),
# NonSpatialResource (1 resource) join FeatureResource (n resources) (Not joinable)
#RequestTest("controle-list/usuario-list/1/join/data_nascimento&geocodigo/" + SERVER + "api/bcim/unidades-federativas/", 400),
# FeatureResource (1 resource) join NonSpatialResource (1 resource)
RequestTest("api/bcim/municipios/3304557/join/geocodigo&geocodigo/http://172.30.10.86/api/munic-2015/planejamento-urbano-list/3243/", 200),
RequestTest('api/bcim/unidades-federativas/ES/join/geocodigo&uf_geocodigo/{"uf_geocodigo":"32","pib_estimado":1000000000}', 200),
#("api/bcim/unidades-federativas/ES/join/geocodigo&geocodigo/http://gabriel:8880/estados-list/unidade-federativa-list/2/", 200),
# FeatureResource (1 resource) join CollectionResource (n resources)
RequestTest("api/bcim/municipios/3304557/join/geocodigo&cod_municipio/http://172.30.10.86/api/pib-municipio/faturamento-list/filter/cod_municipio/eq/3304557", 200),
#("api/bcim/unidades-federativas/ES/join/geocodigo&geocodigo/http://gabriel:8880/estados-list/unidade-federativa-list/", 200),
# FeatureResource join NonSpatialResource (Not joinable)
RequestTest("api/bcim/municipios/3304557/join/geocodigo&nome/http://172.30.10.86/api/munic-2015/planejamento-urbano-list/3243/", 400),
#("api/bcim/unidades-federativas/ES/join/geocodigo&nome/http://gabriel:8880/estados-list/unidade-federativa-list/2/", 400),
# FeatureCollection (n resources) join CollectionResource (n resources)
RequestTest("api/bcim/unidades-federativas/join/geocodigo&cod_estado/http://172.30.10.86/esporte-list/cond-funcionamento-list/", 200),
#("api/bcim/unidades-federativas/join/geocodigo&geocodigo/http://gabriel:8880/estados-list/unidade-federativa-list/", 200),
# CollectionResource (n resources) join FeatureCollection (n resources)
#("esporte-list/cond-funcionamento-list/join/cod_estado&geocodigo/http://172.30.10.86/api/bcim/unidades-federativas/offset_limit/0&2/geocodigo,nome,geom", 200),
# FeatureCollection (n resources) join CollectionResource (n resources)
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG/join/geocodigo&cod_estado/http://172.30.10.86/esporte-list/cond-funcionamento-list/filter/cod_estado/in/31&32&33&35/", 200),
]
arr_options_for_collection_operation = [
RequestTest("controle-list/usuario-list/filter/id/gt/5/", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/collect/id&email/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/count-resource", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome,email", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/distinct/nome", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/group-by/nome", 400, method="OPTIONS"), # the operation 'group_by' doesn't exists anymore
RequestTest("controle-list/usuario-list/group-by-count/nome", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*collect/nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*collect/id&email/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*count-resource", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/collect/id&nome/upper", 200, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome/collect/nome/upper", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome,id/collect/id&nome/upper", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/offset-limit/0&2/nome/collect/id&nome/upper", 400, method="OPTIONS"),
RequestTest("controle-list/usuario-list/filter/id/gt/5/*count-resource", 200, method="OPTIONS"),
# Collection operation used by FeatureCollection
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/nome&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/collect/geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/count-resource", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,sigla", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,geom", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/distinct/nome", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/group-by/nome", 400, method="OPTIONS"), # the operation 'group_by' doesn't exists anymore
RequestTest("api/bcim/unidades-federativas/group-by-count/nome", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/nome/upper", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/nome&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/sigla&geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/sigla&geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/sigla&geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*collect/geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/nome/upper", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/nome&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom&sigla/lower", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/sigla&geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom/buffer/0.2", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/sigla&geom/area", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/sigla&geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/collect/geom/point_on_surface", 200, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome/collect/nome/upper", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/nome,sigla/collect/nome&sigla/lower", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/geom&sigla/lower", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/sigla&geom/buffer/0.2", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom/collect/geom/buffer/0.2", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom/collect/geom/area", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/sigla&geom/area", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/sigla,geom/collect/sigla&geom/point_on_surface", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/offset-limit/0&2/geom/collect/geom/point_on_surface", 400, method="OPTIONS"),
RequestTest("api/bcim/unidades-federativas/filter/sigla/in/RJ&ES&MG&SP/*count-resource", 200, method="OPTIONS"),
]
# The suffixed requests just need simple tests (once requests suffixed with '.jsonld' is just repassed to options() method)
# More complex tests must be applied in OPTIONS requests (without suffix)
arr_get_for_collect_operation_context = [
RequestTest("controle-list/usuario-list/filter/id/gt/5.jsonld", 200),
RequestTest("controle-list/usuario-list/collect/nome/upper.jsonld", 200),
RequestTest("controle-list/usuario-list/collect/id&email/upper.jsonld", 200),
RequestTest("controle-list/usuario-list/projection/id,email/collect/id&email/upper.jsonld", 200),
RequestTest("controle-list/usuario-list/projection/email/collect/id&email/upper.jsonld", 400),
RequestTest("api/bcim/unidades-federativas/collect/nome/upper.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/nome&sigla/lower.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom&sigla/lower.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/buffer/0.2.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom/buffer/0.2.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom/area.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/area.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/sigla&geom/point_on_surface.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/collect/geom/point_on_surface.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla,geom/collect/sigla&geom/area.jsonld", 200),
RequestTest("api/bcim/unidades-federativas/projection/sigla/collect/sigla&geom/area.jsonld", 400),
]
arr_get_for_tiff_resource = [
RequestTest('raster/imagem-exemplo-tile1-list/61/', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/bands', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/destructor', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/driver', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/extent', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/geotransform', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/height', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/info', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/metadata', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/name', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/origin', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/ptr', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/ptr_type', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/scale', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/skew', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/srid', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/srs', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/transform/3086', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/vsi_buffer', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/warp', 200),
RequestTest('raster/imagem-exemplo-tile1-list/61/width', 200),
]
arr_options_for_tiff_resource = [
RequestTest('raster/imagem-exemplo-tile1-list/61/', 200, method='OPTIONS')
]
def test_requests(request_test_list, test_label=''):
default_init_test_label = "Initializing test set:"
init_label_len = len(test_label) + len(default_init_test_label) + 5
print("\n\n" + init_label_len * "*" + "\n* " + default_init_test_label + " " + test_label + " *\n" + init_label_len * "*" + "\n\n")
requests_with_error = []
for request_test in request_test_list:
print('Executing: ' + request_test.uri)
if request_test.method == 'OPTIONS':
res = requests.options(request_test.uri)
else:
res = requests.get(request_test.uri)
if res.status_code != request_test.expec_status_code:#not in (200, | |
Jd[:,i] = np.ravel(Jtemp)
beta = beta + np.dot(Jvi[:,i-1], qd[i-1]).reshape((3,1))
# cross z0 x beta
Jvd = np.cross(np.array([0, 0, 1]).reshape((3,1)), beta, axis=0)
Jwd = np.array([0, 0, 0]).reshape((3,1))
Jtemp = np.concatenate((Jvd, Jwd))
Jd[:,0] = np.ravel(Jtemp)
return Jd
def eul2Ja(self, phi,theta,psi):
Ja = np.array([[ 0, -np.sin(phi), np.cos(phi) * np.sin(theta)],
[0, np.cos(phi), np.sin(phi) * np.sin(theta)],
[1, 0, np.cos(theta) ]])
return Ja
def rpy2Ja(self, r,p,y):
Ja = np.array([[ 1, 0, np.sin(p)],
[0, np.cos(r), -np.cos(p) * np.sin(r)],
[0, np.sin(r), np.cos(p) * np.cos(r)]])
return Ja
def quat2Ja_temp(self, q):
# Method from Robotics Handbook.
e0 = q.w
e1 = q.x
e2 = q.y
e3 = q.z
Es = np.array([[-e1, -e2, -e3],[e0, e3, -e2],[-e3, e0, e1], [e2, -e1, e0]])
# Es = np.array([[e0, e3, -e2],[-e3, e0, e1], [e2, -e1, e0]])
Eds = (1.0/2.0) * Es
return Eds
def quat2Ja(self, q):
# Method from
# Modelling and Control of Robot Manipulators
# Authors: Sciavicco, Lorenzo, Siciliano, Bruno
eta = q.w
e = np.array([q.x, q.y, q.z])
e_skew = np.array([[0, -e[2], e[1]],
[e[2], 0, -e[0]],
[-e[1], e[0], 0]])
eta_d = -(1.0/2.0) * e
e_d = (1.0/2.0)*(eta * np.eye(3) - e_skew)
Eds = np.vstack((eta_d, e_d))
return Eds
def quatprop_E(self, q):
eta = q.w
e = np.array([q.x, q.y, q.z])
e_skew = np.array([[0, -e[2], e[1]],
[e[2], 0, -e[0]],
[-e[1], e[0], 0]])
E = (eta * np.eye(3) - e_skew)
return E
def analyticJacobian(rb, J, x_orient,*args):
if args:
if args[0].lower() in ['rpy']:
A = rb.rpy2Ja(x_orient[0],x_orient[1],x_orient[2])
B = block_diag(np.eye(3),np.linalg.inv(A))
Ja = np.dot(B,J)
if args[0].lower() in ['quaternion']:
A = rb.quat2Ja(x_orient)
B = block_diag(np.eye(3), A)
Ja = np.dot(B,J)
if args[0].lower() in ['quaternion6']:
A = rb.quat2Ja(x_orient)
B = block_diag(np.eye(3), A[1:,:])
Ja = np.dot(B,J)
return Ja
def analyticJacobianDot(rb, J, Jd, quat, quat_d):
# What we need:
# qdd = (1/2)*wd*quat + (1/2)*w*quat_d
# Optional (fix later for online computation)
# Compute quaternion derivative
# A0 = rb.quat2Ja(quat)
# B0 = block_diag(np.eye(3), A0)
# Ja = np.dot(B,J)
# xd = np.dot(Ja, qd)
# quat_d = xd[:3]
# np.set_printoptions(suppress=True)
# Compute (1/2)*w*quat_d
A_qd = rb.quat2Ja(quat_d)
B1 = block_diag(np.zeros((3,3)), A_qd[1:,:])
B_qd = np.dot(B1, J)
# Compute (1/2)*wd*quat
A_q = rb.quat2Ja(quat)
B_q = block_diag(np.eye(3), A_q[1:,:])
# Computation
Jad = np.dot(B_q, Jd) + B_qd
return Jad
def analyticJacobianDot2(rb, J, Jd, qd, quat):
# What we need:
# qdd = (1/2)*wd*quat + (1/2)*w*quat_d
# Optional (fix later for online computation)
# Compute quaternion derivative
A0 = rb.quat2Ja(quat)
B0 = block_diag(np.eye(3), A0)
Ja = np.dot(B0,J)
xd = np.dot(Ja, qd)
quat_d_float = xd[3:]
quat_d = quaternion.from_float_array(quat_d_float)
# np.set_printoptions(suppress=True)
# Compute (1/2)*w*quat_d
A_qd = rb.quat2Ja(quat_d)
B1 = block_diag(np.zeros((3,3)), A_qd[:,:])
B_qd = np.dot(B1, J)
# Compute (1/2)*wd*quat
A_q = rb.quat2Ja(quat)
B_q = block_diag(np.eye(3), A_q[:,:])
# Computation
Jad = np.dot(B_q, Jd) + B_qd
return Jad
def pinv(self, J):
u, s, vh = np.linalg.svd(J.T, full_matrices=True)
u.shape, s.shape, vh.shape
rho = 0.2
S2 = np.dot(J.T,0)
for i in range(len(s)):
S2[i,i] = s[i] / (s[i]**2 + rho**2)
JpinvT = np.dot(np.dot(vh.T,S2.T),u.T)
Jpinv = JpinvT.T
return Jpinv
def rounding_quaternion(self, q0):
tol = np.finfo(np.float).eps
q = np.array([q0.w,q0.x,q0.y,q0.z])
for i in range(4):
if (q[i] < tol) and (q[i] > -tol):
q[i] = 0
return quaternion.from_float_array(q)
def mat2quat(self, M):
''' Calculate quaternion corresponding to given rotation matrix
Parameters
----------
M : array-like
3x3 rotation matrix
Returns
-------
q : (4,) array
closest quaternion to input matrix, having positive q[0]
Notes
-----
Method claimed to be robust to numerical errors in M
Constructs quaternion by calculating maximum eigenvector for matrix
K (constructed from input `M`). Although this is not tested, a
maximum eigenvalue of 1 corresponds to a valid rotation.
A quaternion q*-1 corresponds to the same rotation as q; thus the
sign of the reconstructed quaternion is arbitrary, and we return
quaternions with positive w (q[0]).
References
----------
* http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
* Bar-Itzhack, <NAME>. (2000), "New method for extracting the
quaternion from a rotation matrix", AIAA Journal of Guidance,
Control and Dynamics 23(6):1085-1087 (Engineering Note), ISSN
0731-5090
Examples
--------
>>> import numpy as np
>>> q = mat2quat(np.eye(3)) # Identity rotation
>>> np.allclose(q, [1, 0, 0, 0])
True
>>> q = mat2quat(np.diag([1, -1, -1]))
>>> np.allclose(q, [0, 1, 0, 0]) # 180 degree rotn around axis 0
True
'''
# Qyx refers to the contribution of the y input vector component to
# the x output vector component. Qyx is therefore the same as
# M[0,1]. The notation is from the Wikipedia article.
Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat
# Fill only lower half of symmetric matrix
K = np.array([
[Qxx - Qyy - Qzz, 0, 0, 0 ],
[Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0 ],
[Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0 ],
[Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]]
) / 3.0
# Use Hermitian eigenvectors, values for speed
vals, vecs = np.linalg.eigh(K)
# Select largest eigenvector, reorder to w,x,y,z quaternion
q = vecs[[3, 0, 1, 2], np.argmax(vals)]
# Prefer quaternion with positive w
# (q * -1 corresponds to same rotation as q)
if q[0] < 0:
q *= -1
return q
# Dynamics
def mdh_calc_transformation(rb, From, to, qc):
T = np.identity(4)
From = From
to = to
alp = np.zeros(rb.ndof)
a = np.zeros(rb.ndof)
th = np.zeros(rb.ndof)
d = np.zeros(rb.ndof)
for i in range(rb.ndof):
alp[i] = rb.joints[i].alpha
a[i] = rb.joints[i].a
th[i] = qc[i] + rb.joints[i].offset
d[i] = rb.joints[i].d
for i in range(From, to):
ct = np.cos(th[i] + 0)
st = np.sin(th[i] + 0)
ca = np.cos(alp[i])
sa = np.sin(alp[i])
A = np.array([[ct, -st, 0, a[i]],
[(st * ca), (ct * ca), -sa, (-d[i] * sa)],
[(st * sa), (ct * sa), ca, (d[i] * ca)],
[0, 0, 0, 1]])
T = np.dot(T, A)
# print(A)
return T
def mdh_invdyn(rb, qc, qcdot, qcddot, grav):
z0 = np.array([0, 0, 1])
R = np.identity(3)
Q = np.zeros((rb.ndof, 1))
grav = grav.reshape(3)
w = np.zeros((3))
wdot = np.zeros((3))
vdot = grav
Fm = np.empty((3,0))
Nm = np.empty((3,0))
for k in range(1):
q = qc[k, :].reshape((rb.ndof,1))
qdot = qcdot[k, :].reshape((rb.ndof,1))
qddot = qcddot[k, :].reshape((rb.ndof,1))
N_DOFS = rb.ndof
# Forward recursion
for i in range(N_DOFS):
T = rb.mdh_calc_transformation(i, i+1, q)
R = T[:3,:3]
p = np.array([rb.joints[i].a,
-rb.joints[i].d * np.sin(rb.joints[i].alpha),
rb.joints[i].d * np.cos(rb.joints[i].alpha)])
wdot_ = (np.dot(R.T, wdot) +
np.dot(z0,qddot[i,k]) +
np.cross(np.dot(R.T,w), np.dot(z0, qdot[i,k])))
w_ = (np.dot(R.T,w) +
np.dot(z0, qdot[i,k]))
vdot_ = np.dot(R.T, (vdot +
np.cross(wdot, p) +
np.cross(w, np.cross(w, p))))
wdot = wdot_
w = w_
vdot = vdot_
vcdot = (vdot + np.cross(wdot, rb.joints[i].r.reshape(3)) +
(np.cross(w, np.cross(w, rb.joints[i].r.reshape(3)))) )
F = np.dot(rb.joints[i].m, vcdot)
N = np.dot(rb.joints[i].inertia, wdot) + np.cross(w, np.dot(rb.joints[i].inertia, w))
Fm = np.append(Fm, F.reshape((3,1)), axis=1)
Nm = np.append(Nm, N.reshape((3,1)), axis=1)
n = np.zeros(3)
f = np.zeros(3)
# Backward recursion
for i in reversed(range(N_DOFS)):
if i+1 < N_DOFS:
p = np.array([[rb.joints[i+1].a], [-rb.joints[i+1].d * np.sin(rb.joints[i+1].alpha)],[rb.joints[i+1].d * np.cos(rb.joints[i+1].alpha)]])
T = rb.mdh_calc_transformation(i+1, i+2, q)
R = T[:3, :3]
else:
R = np.eye(3)
p = np.zeros(3).reshape(3,1)
n_ =(np.dot(R, n) +
np.cross(rb.joints[i].r.reshape(3), Fm[:,i]) +
np.cross(p.reshape(3), np.dot(R,f)) +
Nm[:,i] )
f_ = np.dot(R, f) + Fm[:,i]
n = n_
f = f_
Q[i,k] = np.dot(n.T, z0)
return Q
def calc_transformation(rb, From, to, qc):
T = np.identity(4)
From = From +1
to = to +1
alp = np.zeros(rb.ndof)
a = np.zeros(rb.ndof)
th = np.zeros(rb.ndof)
d = np.zeros(rb.ndof)
for i in range(rb.ndof):
alp[i] = rb.joints[i].alpha
a[i] = rb.joints[i].a
# th[i] = rb.joints[i].theta
# Since it is revolute:
th[i] = qc[i] + rb.joints[i].offset
d[i] = rb.joints[i].d
for i in range(From, to):
ct = np.cos(th[i] + 0)
st = np.sin(th[i] + 0)
ca = np.cos(alp[i])
sa = np.sin(alp[i])
A = np.array([[ct, -st * ca, st*sa, a[i]*ct],
[st, ct * ca, -ct * sa, a[i] * st],
[0, sa, ca, | |
<filename>src/sgraph/selement.py
import sys
from typing import Optional, Dict
from sgraph.selementmergedexception import SElementMergedException
from sgraph.selementassociation import SElementAssociation
DEBUG = False
class SElement:
__slots__ = 'name', 'parent', 'children', 'childrenDict', 'outgoing', 'incoming', 'attrs', \
'human_readable_name'
def __init__(self, parent: Optional['SElement'], name: str):
if name == '':
# sys.stderr.write('Creating with empty name\n')
pass
if '/' in name:
name = name.replace('/', '__slash__')
if self == parent:
raise Exception('Self loop in model\n')
self.name = name
self.human_readable_name = ''
self.parent = None # type: Optional[SElement]
if parent is not None:
self.parent = parent
if self.name not in self.parent.childrenDict:
self.parent.children.append(self)
self.parent.childrenDict[self.name] = self
else:
if DEBUG:
raise Exception(
'Error: overlapping elements related to {} under {}, types: '
'{} and {}'.format(
self.name, self.parent.getPath(), '<not known>',
self.parent.childrenDict[self.name].getType()))
else:
raise SElementMergedException(
'Element {} tried to be merged with an existing element '
'under same parent={}'.format(name, parent.getPath()))
else:
self.parent = None
self.children = []
self.childrenDict = {}
self.outgoing = []
self.incoming = []
self.attrs = {}
# self.num = '0'
def addChild(self, child: 'SElement'):
"""
Add child, but if there is an overlapping element, merge instead and return merged element.
:param child: the child to be added.
:return: None or the element where the child has been merged with (differs from child)
"""
if child == self:
sys.stderr.write('Error with data model loop\n')
raise Exception('Aborting due to addChild self != child violation')
if child.name not in self.childrenDict:
self.children.append(child)
self.childrenDict[child.name] = child
else:
if DEBUG:
raise Exception(
'Error: overlapping elements related to {} under {}, types: {} '
'and {}'.format(child.name, self.getPath(),
child.getType(),
self.childrenDict[child.name].getType()))
else:
self.childrenDict[child.name].merge(child)
return self.childrenDict[child.name]
child.parent = self
def addChildIgnoreWithSameName(self, child, elemWithSameName):
"""
Add child, but if there is an overlapping element, merge instead and return merged element.
Overwrite is allowed for the elemWithSameName
:param child: the child to be added
:param elemWithSameName: Overwrite allowed for this element.
:return:
"""
if child == self:
sys.stderr.write('Error with data model loop\n')
raise Exception('Aborting due to addChild self != child violation')
if child.name not in self.childrenDict:
self.children.append(child)
self.childrenDict[child.name] = child
else:
if self.childrenDict[child.name] == elemWithSameName:
self.children.append(child)
self.childrenDict[child.name] = child
child.parent = self
return child
else:
if DEBUG:
raise Exception(
'Error: overlapping elements related to {} under {}, types: {} and {}'
.format(child.name, self.getPath(), child.getType(),
self.childrenDict[child.name].getType()))
else:
self.childrenDict[child.name].merge(child)
return self.childrenDict[child.name]
child.parent = self
def addAttribute(self, a, v):
self.attrs[a] = v
def enclosingFilenameEndswith(self, postfix):
e = self
while e.parent is not None:
if e.name.endswith(postfix):
return True
else:
e = e.parent
return False
def cumulateAttribute(self, a, v):
if a not in self.attrs:
self.attrs[a] = str(v)
else:
x = float(self.attrs[a]) + (v * 1.0)
self.attrs[a] = str(x)
def cumulateListAttribute(self, a, v, avoid_duplicates):
if a not in self.attrs:
self.attrs[a] = str(v)
else:
if not avoid_duplicates or v not in self.attrs[a]:
self.attrs[a] += ';' + v
def cumulateIntAttribute(self, a, v):
if a not in self.attrs:
self.attrs[a] = v
else:
self.attrs[a] += v
def traverseElements(self, visit):
visit(self)
for c in self.children:
c.traverseElements(visit)
def traverseIncoming(self, visited):
for incoming_element in set(self.incoming) - visited:
visited.add(incoming_element.fromElement)
incoming_element.fromElement.traverseIncoming(visited)
return visited
def removeElements(self, path):
splitted = path.split('/')
self.removeElementsWithList(splitted)
def removeElementsWithList(self, splitted):
if self.name == splitted[0]:
if len(splitted) == 1:
self.remove()
else:
for x in self.children:
x.removeElementsWithList(splitted[1:])
def detachChild(self, elem):
"""Always do this first before addChild"""
elem.parent = None
self.children.remove(elem)
if elem.name in self.childrenDict:
self.childrenDict.pop(elem.name)
else:
sys.stderr.write(
'Error: Probably duplicated element {} under {}'.format(
elem.name, self.getPath()))
def remove(self, leaveParentUntouched=False):
if not leaveParentUntouched:
if self.parent is not None:
self.parent.detachChild(self)
for ea in self.outgoing:
ea.toElement.incoming.remove(ea)
self.outgoing = []
for ea in self.incoming:
ea.fromElement.outgoing.remove(ea)
self.incoming = []
for c in self.children:
c.remove(True)
self.children = []
self.childrenDict = {}
def update_children_dict(self):
self.childrenDict.clear()
for c in self.children:
self.childrenDict[c.name] = c
def getNodeCount(self):
i = 1
for x in self.children:
i += x.getNodeCount()
return i
def getEACount(self):
i = len(self.outgoing)
for x in self.children:
i += x.getEACount()
return i
def getEATypes(self, theSet):
for ea in self.outgoing:
theSet.add(ea.deptype)
for x in self.children:
x.getEATypes(theSet)
def getEATypeCounts(self, d: Dict[str, int]):
for ea in self.outgoing:
if ea.deptype not in d:
d[ea.deptype] = 1
else:
d[ea.deptype] += 1
for x in self.children:
x.getEATypeCounts(d)
def getPath(self) -> str:
p = self.parent
pathparts = [self.name]
while p is not None and p.parent != p:
pathparts.append(p.name)
p = p.parent
pathparts.reverse()
return '/'.join(pathparts)
def getElementsByNameOnLevel(self, name, level, current_level=0):
out = []
if current_level == level - 1:
if name in self.childrenDict:
out.append(self.childrenDict[name])
# for c in self.children:
# if c.name == name:
# out.append(c)
elif current_level < level:
for c in self.children:
out += c.getElementsByNameOnLevel(name, level,
current_level + 1)
return out
def recurseIncomingDependencies(self,
elemlist,
assoclist,
outside_level=0):
for c in self.incoming:
if outside_level == 0 or c.fromElement.getAncestorOfLevel(
outside_level) != c.toElement.getAncestorOfLevel(
outside_level):
elemlist.append(c.fromElement)
if assoclist is not None:
assoclist.append(c)
for c in self.children:
c.recurseIncomingDependencies(elemlist, assoclist)
def getAllUsers(self, outside_level=0):
elems = []
self.recurseIncomingDependencies(elems, None, outside_level)
return set(elems)
def getAncestorOfLevel(self, level):
x = self.getLevel()
delta = x - level
ancestor = self
while delta > 0:
ancestor = ancestor.parent
delta -= 1
return ancestor
def getAncestorOfType(self, t):
"""
Return ancestor that has matching type.
:param t: type (str)
:return: SElement or None
"""
if self.typeEquals(t):
return self
ancestor = self
while ancestor is not None and ancestor.parent is not None:
ancestor = ancestor.parent
if ancestor.typeEquals(t):
return ancestor
return None
def getAncestorOfTypes(self, types):
"""
Return ancestor that has matching type.
:param types: type list or set
:return: SElement or None
"""
for t in types:
if self.typeEquals(t):
return self
ancestor = self
while ancestor is not None and ancestor.parent is not None:
ancestor = ancestor.parent
for t in types:
if ancestor.typeEquals(t):
return ancestor
return None
def getAncestors(self):
ancestor = self
ancestors = []
while ancestor is not None and ancestor.parent is not None:
ancestor = ancestor.parent
ancestors.append(ancestor)
return ancestors
def isDescendantOf(self, anc):
if self == anc:
return False
p = self.parent
while p is not None:
if p == anc:
return True
else:
p = p.parent
return False
def getRoot(self):
p = self
while p.parent is not None:
p = p.parent
return p
def getLevel(self) -> int:
e = self.parent
level = 0
while e is not None:
e = e.parent
level += 1
return level
def getChildByName(self, n):
if n in self.childrenDict:
return self.childrenDict[n]
# for c in self.children:
# if c.name == n:
# return c
return None
def findElement(self, n):
if n.startswith('/'):
# sys.stderr.write('invalid id (2): '+n+'\n')
n = n[1:]
if '/' not in n:
return self.getChildByName(n)
else:
pos = n.find('/')
root = n[0:pos]
if len(self.children) == 0:
return None
else:
child = self.getChildByName(root)
if child is not None:
return child.findElement(n[pos + 1:])
else:
return None
def createOrGetElement(self, n: str):
if n.startswith('/'):
# sys.stderr.write('invalid id (1): '+n+'\n')
n = n[1:]
if '/' not in n:
child = self.getChildByName(n)
if child is not None:
return child
# print 'FOO',n
return SElement(self, n)
else:
pos = n.find('/')
root = n[0:pos]
if len(self.children) == 0:
return self.createElementChain(n)
else:
child = self.getChildByName(root)
if child is not None:
return child.createOrGetElement(n[pos + 1:])
else:
return self.createElementChain(n)
def createElementChain(self, elemid):
# print 'FOO2',elemid
current = self
for n in elemid.split('/'):
current = SElement(current, n)
return current
def hasSiblingsRecursive(self):
if self.parent is None:
return False
found = -1
for i in range(len(self.parent.children)):
if self == self.parent.children[i]:
found = i
break
i = found + 1
if i < len(self.parent.children):
return True
else:
return self.parent.hasSiblingsRecursive()
def setType(self, t):
if 'type' in self.attrs and self.attrs['type'] == 'repository':
if t == 'dir':
# Do not overwrite existing type=repository, e.g. with less meaningful "dir" etc.
# This happens e.g. when some analyzer collects dir elements to the model and it
# already has the "repository" marked for the same directory (done in the
# beginning of the analysis).
pass
else:
# Unpredicted case, so let it go..
self.attrs['type'] = t
else:
self.attrs['type'] = t
def getType(self) -> str:
if 'type' in self.attrs:
return self.attrs['type']
return ''
def getNextSiblingRecursive(self):
if self.parent is None:
return None
c = self.parent.children
i = c.index(self) + 1
if i | |
cluster labels, classification threshold)
Returns the original object, not a copy. Changes to the returned object are persisted to DSS by calling
:meth:`save_user_meta`
"""
return self.details["userMeta"]
def save_user_meta(self):
um = self.details["userMeta"]
if self.mltask is not None:
self.mltask.client._perform_empty(
"PUT", "/projects/%s/models/lab/%s/%s/models/%s/user-meta" % (self.mltask.project_key,
self.mltask.analysis_id, self.mltask.mltask_id, self.mltask_model_id), body = um)
else:
self.saved_model.client._perform_empty(
"PUT", "/projects/%s/savedmodels/%s/versions/%s/user-meta" % (self.saved_model.project_key,
self.saved_model.sm_id, self.saved_model_version), body = um)
def get_origin_analysis_trained_model(self):
"""
Fetch details about the model in an analysis, this model has been exported from. Returns None if the
deployed trained model does not have an origin analysis trained model.
:rtype: DSSTrainedModelDetails | None
"""
if self.saved_model is None:
return self
else:
fmi = self.get_raw().get("smOrigin", {}).get("fullModelId")
if fmi is not None:
origin_ml_task = DSSMLTask.from_full_model_id(self.saved_model.client, fmi,
project_key=self.saved_model.project_key)
return origin_ml_task.get_trained_model_details(fmi)
def get_diagnostics(self):
"""
Retrieves diagnostics computed for this trained model
:returns: list of diagnostics
:rtype: list of type `dataikuapi.dss.ml.DSSMLDiagnostic`
"""
diagnostics = self.details.get("mlDiagnostics", {})
return [DSSMLDiagnostic(d) for d in diagnostics.get("diagnostics", [])]
def generate_documentation(self, folder_id=None, path=None):
"""
Start the model document generation from a template docx file in a managed folder,
or from the default template if no folder id and path are specified.
:param folder_id: (optional) the id of the managed folder
:param path: (optional) the path to the file from the root of the folder
:return: A :class:`~dataikuapi.dss.future.DSSFuture` representing the model document generation process
"""
if bool(folder_id) != bool(path):
raise ValueError("Both folder id and path arguments are required to use a template from folder. Use without argument to generate the model documentation using the default template")
template_mode_url = "default-template" if folder_id is None and path is None else "template-in-folder"
if self.mltask is not None:
f = self.mltask.client._perform_json(
"POST", "/projects/%s/models/lab/%s/%s/models/%s/generate-documentation-from-%s" %
(self.mltask.project_key, self.mltask.analysis_id, self.mltask.mltask_id, self.mltask_model_id, template_mode_url),
params={"folderId": folder_id, "path": path})
return DSSFuture(self.mltask.client, f["jobId"])
else:
f = self.saved_model.client._perform_json(
"POST", "/projects/%s/savedmodels/%s/versions/%s/generate-documentation-from-%s" %
(self.saved_model.project_key, self.saved_model.sm_id, self.saved_model_version, template_mode_url),
params={"folderId": folder_id, "path": path})
return DSSFuture(self.saved_model.client, job_id=f["jobId"])
def generate_documentation_from_custom_template(self, fp):
"""
Start the model document generation from a docx template (as a file object).
:param object fp: A file-like object pointing to a template docx file
:return: A :class:`~dataikuapi.dss.future.DSSFuture` representing the model document generation process
"""
files = {'file': fp}
if self.mltask is not None:
f = self.mltask.client._perform_json(
"POST", "/projects/%s/models/lab/%s/%s/models/%s/generate-documentation-from-custom-template" %
(self.mltask.project_key, self.mltask.analysis_id, self.mltask.mltask_id, self.mltask_model_id),
files=files)
return DSSFuture(self.mltask.client, f["jobId"])
else:
f = self.saved_model.client._perform_json(
"POST", "/projects/%s/savedmodels/%s/versions/%s/generate-documentation-from-custom-template" %
(self.saved_model.project_key, self.saved_model.sm_id, self.saved_model_version),
files=files)
return DSSFuture(self.saved_model.client, job_id=f["jobId"])
def download_documentation_stream(self, export_id):
"""
Download a model documentation, as a binary stream.
Warning: this stream will monopolize the DSSClient until closed.
:param export_id: the id of the generated model documentation returned as the result of the future
:return: A :class:`~dataikuapi.dss.future.DSSFuture` representing the model document generation process
"""
if self.mltask is not None:
return self.mltask.client._perform_raw(
"GET", "/projects/%s/models/lab/documentations/%s" % (self.mltask.project_key, export_id))
else:
return self.saved_model.client._perform_raw(
"GET", "/projects/%s/savedmodels/documentations/%s" % (self.saved_model.project_key, export_id))
def download_documentation_to_file(self, export_id, path):
"""
Download a model documentation into the given output file.
:param export_id: the id of the generated model documentation returned as the result of the future
:param path: the path where to download the model documentation
:return: None
"""
stream = self.download_documentation_stream(export_id)
with open(path, 'wb') as f:
for chunk in stream.iter_content(chunk_size=10000):
if chunk:
f.write(chunk)
f.flush()
class DSSMLDiagnostic(object):
"""
Object that represents a computed Diagnostic on a trained model
Do not create this object directly, use :meth:`DSSTrainedModelDetails.get_diagnostics()` instead
"""
def __init__(self, data):
self._internal_dict = data
def get_raw(self):
"""
Gets the raw dictionary of the diagnostic
:rtype: dict
"""
return self._internal_dict
def get_type(self):
"""
Returns the base Diagnostic type
:rtype: str
"""
return self._internal_dict["type"]
def get_type_pretty(self):
"""
Returns the Diagnostic type as displayed in the UI
:rtype: str
"""
return self._internal_dict["displayableType"]
def get_message(self):
"""
Returns the message as displayed in the UI
:rtype: str
"""
return self._internal_dict["message"]
def __repr__(self):
return "{cls}(type={type}, message={msg})".format(cls=self.__class__.__name__,
type=self._internal_dict["type"],
msg=self._internal_dict["message"])
class DSSMLAssertionsParams(object):
"""
Object that represents parameters for all assertions of a ml task
Do not create this object directly, use :meth:`DSSPredictionMLTaskSettings.get_assertions_params` instead
"""
def __init__(self, data):
self._internal_dict = data
def __repr__(self):
return u"{}({})".format(self.__class__.__name__, self.get_raw())
def get_raw(self):
"""
Gets the raw dictionary of the assertions parameters
:rtype: dict
"""
return self._internal_dict
def get_assertion(self, assertion_name):
"""
Gets a :class:`DSSMLAssertionParams` representing the parameters of the assertion with the
provided name (or None if no assertion has that name)
:param str assertion_name: Name of the assertion
:rtype: :class:`DSSMLAssertionParams` or None if no assertion has that name
"""
for assertion_dict in self._internal_dict["assertions"]:
if assertion_dict["name"] == assertion_name:
return DSSMLAssertionParams(assertion_dict)
return None
def get_assertions_names(self):
"""
Gets the list of all assertions' names
:return: A list of all assertions' names
:rtype: list
"""
return [assertion_dict["name"] for assertion_dict in self._internal_dict["assertions"]]
def add_assertion(self, assertion_params):
"""
Adds parameters of an assertion to the assertions parameters of the ml task.
:param object assertion_params: A :class:`DSSMLAssertionParams` representing parameters of the assertion
"""
if not isinstance(assertion_params, DSSMLAssertionParams):
raise ValueError('Wrong type for assertion parameters: {}'.format(type(assertion_params)))
self._internal_dict["assertions"].append(assertion_params.get_raw())
def delete_assertion(self, assertion_name):
"""
Deletes the assertion parameters of the assertion with the provided name from the assertions parameters of the ml task.
Raises a ValueError if no assertion with the provided name was found
:param str assertion_name: Name of the assertion
"""
for idx, assertion_dict in enumerate(self._internal_dict["assertions"]):
if assertion_dict["name"] == assertion_name:
del self._internal_dict["assertions"][idx]
return
raise ValueError('No assertion found with name: {}'.format(assertion_name))
class DSSMLAssertionParams(object):
"""
Object that represents parameters for one assertion
Do not create this object directly, use :meth:`DSSMLAssertionsParams.get_assertion` or
:meth:`from_params` instead
"""
def __init__(self, data):
self._internal_dict = data
def __repr__(self):
return u"{}({})".format(self.__class__.__name__, self.get_raw())
@staticmethod
def from_params(name, a_filter, condition):
"""
Creates assertion parameters from name, filter and condition
:param str name: Name of the assertion
:param dict a_filter: A dict representing the filter to select assertion population. You can use
a :class:`~dataikuapi.dss.utils.DSSFilterBuilder` to build the settings of the filter
:param object condition: A :class:`DSSMLAssertionCondition` for the assertion to be successful
:rtype: :class:`DSSMLAssertionParams`
"""
assertion_params = DSSMLAssertionParams({})
assertion_params.name = name
assertion_params.filter = a_filter
assertion_params.condition = condition
return assertion_params
def get_raw(self):
"""
Gets the raw dictionary of the assertion parameters
:rtype: dict
"""
return self._internal_dict
@property
def name(self):
"""
Returns the assertion name
:rtype: str
"""
return self._internal_dict["name"]
@name.setter
def name(self, name):
self._internal_dict["name"] = name
@property
def filter(self):
"""
Returns the assertion filter
:rtype: dict
"""
return self._internal_dict["filter"]
@filter.setter
def filter(self, a_filter):
self._internal_dict["filter"] = a_filter
@property
def condition(self):
"""
Returns the assertion condition
:rtype: :class:`DSSMLAssertionCondition`
"""
return DSSMLAssertionCondition(self._internal_dict["assertionCondition"])
@condition.setter
def condition(self, condition):
if not isinstance(condition, DSSMLAssertionCondition):
raise ValueError('Wrong type for assertion condition: {}'.format(type(condition)))
self._internal_dict["assertionCondition"] = condition.get_raw()
class DSSMLAssertionCondition(object):
"""
Object that represents an assertion condition
Do not create this object directly, use :meth:`DSSMLAssertionParams.condition`,
:meth:`DSSMLAssertionCondition.from_expected_class` or :meth:`DSSMLAssertionCondition.from_expected_range` instead
"""
def __init__(self, data):
self._internal_dict = data
def __repr__(self):
return u"{}({})".format(self.__class__.__name__, self.get_raw())
@staticmethod
def from_expected_class(expected_valid_ratio, expected_class):
"""
Creates an assertion condition from the expected valid ratio and class
:param float expected_valid_ratio: Assertion passes if this ratio of rows predicted as expected_class is attained
:param str expected_class: Assertion passes if the ratio of rows predicted as expected_class is attained
:rtype: :class:`DSSMLAssertionCondition`
"""
assertion_condition = DSSMLAssertionCondition({})
assertion_condition.expected_valid_ratio = expected_valid_ratio
assertion_condition.expected_class = expected_class
return assertion_condition
@staticmethod
def from_expected_range(expected_valid_ratio, expected_min, expected_max):
"""
Creates an assertion condition from expected valid ratio and range.
The expected range is the interval between expected_min and expected_max (included)
for the predictions in which the rows will be considered valid.
:param float expected_valid_ratio: Assertion passes if this ratio of rows predicted between expected_min and expected_max (included) is attained
:param float expected_min: Min value of the expected range
:param float expected_max: Max value of the expected range
:rtype: :class:`DSSMLAssertionCondition`
"""
assertion_condition = DSSMLAssertionCondition({})
assertion_condition.expected_valid_ratio = expected_valid_ratio
assertion_condition.expected_min = expected_min
assertion_condition.expected_max = expected_max
return assertion_condition
def get_raw(self):
"""
Gets the raw dictionary of the condition
:rtype: dict
"""
return self._internal_dict
@property
def expected_class(self):
"""
Returns the expected class or None if it is not defined. Assertion passes if the expected_valid_ratio
of rows predicted as expected_class is attained.
:rtype: str
"""
if "expectedClass" in self._internal_dict:
return self._internal_dict["expectedClass"]
else:
return None
@expected_class.setter
def expected_class(self, expected_class):
self._internal_dict["expectedClass"] = expected_class
@property
def expected_valid_ratio(self):
"""
Returns the ratio of valid rows to exceed for the | |
<filename>applications/zcomx/modules/books.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Book classes and functions.
"""
import datetime
import functools
import json
import os
import re
import shutil
import urllib.parse
from gluon import *
from pydal.helpers.regex import REGEX_UPLOAD_EXTENSION
from applications.zcomx.modules.book_pages import (
BookPage,
BookPageTmp,
)
from applications.zcomx.modules.book_types import BookType
from applications.zcomx.modules.cc_licences import CCLicence
from applications.zcomx.modules.creators import \
Creator, \
creator_name, \
short_url as creator_short_url
from applications.zcomx.modules.images import (
CachedImgTag,
ImageDescriptor,
SIZES,
)
from applications.zcomx.modules.names import \
BookName, \
BookNumber, \
BookTitle, \
names as name_values
from applications.zcomx.modules.records import \
Record, \
Records
from applications.zcomx.modules.shell_utils import tthsum
from applications.zcomx.modules.zco import \
BOOK_STATUSES, \
BOOK_STATUS_ACTIVE, \
BOOK_STATUS_DISABLED, \
BOOK_STATUS_DRAFT, \
SITE_NAME
DEFAULT_BOOK_TYPE = 'one-shot'
LOG = current.app.logger
class Book(Record):
"""Class representing a book record."""
db_table = 'book'
def page_count(self):
"""return the number of pages in the book.
returns:
integer, the number of pages
"""
return len(self.pages())
def pages(self, orderby=None, limitby=None):
"""Return a list of pages of the book.
Args:
orderby: orderby expression, see select()
Default, [page_no, id]
limitby: limitby expression, see seelct()
Returns:
list of BookPage instances
"""
if orderby is None:
db = current.app.db
orderby = [db.book_page.page_no, db.book_page.id]
return Records.from_key(
BookPage,
dict(book_id=self.id),
orderby=orderby,
limitby=limitby
)
def tmp_pages(self, orderby=None, limitby=None):
"""Return a list of pages book_page_tmp records associated with a book.
Args:
orderby: orderby expression, see select()
Default, [page_no, id]
limitby: limitby expression, see seelct()
Returns:
list of BookPageTmp instances
"""
if orderby is None:
db = current.app.db
orderby = [db.book_page_tmp.page_no, db.book_page_tmp.id]
return Records.from_key(
BookPageTmp,
dict(book_id=self.id),
orderby=orderby,
limitby=limitby
)
def book_name(book, use='file'):
"""Return the name of the book suitable for specified use.
Args:
book: Book instance
use: one of 'file', 'search', 'url'
Returns:
string, name of file
"""
if use == 'file':
return names(book.as_dict())['name_for_file']
elif use == 'search':
return book.name_for_search
elif use == 'url':
return book.name_for_url
return
def book_page_for_json(book_page):
"""Return the book_page formated as json suitable for jquery-file-upload.
Args:
book_page: BookPage instance
Returns:
dict, containing book_page data suitable for jquery-file-upload
{
"name": "picture1.jpg",
"size": 902604,
"url": "http:\/\/dom.org\/files\/picture1.jpg",
"thumbnailUrl": "http:\/\/dom.org\/files\/thumbnail\/pic1.jpg",
"deleteUrl": "http:\/\/dom.org\/files\/picture1.jpg",
"deleteType": "DELETE"
},
"""
filename = book_page.upload_image().original_name()
size = ImageDescriptor(book_page.upload_image().fullname()).size_bytes()
down_url = URL(
c='images',
f='download',
args=book_page.image,
)
thumb = URL(
c='images',
f='download',
args=book_page.image,
vars={'size': 'web'},
)
delete_url = URL(
c='login',
f='book_pages_handler',
args=book_page.book_id,
vars={'book_page_id': book_page.id},
)
return dict(
book_id=book_page.book_id,
book_page_id=book_page.id,
name=filename,
size=size,
url=down_url,
thumbnailUrl=thumb,
deleteUrl=delete_url,
deleteType='DELETE',
)
def book_pages_as_json(book, book_page_ids=None):
"""Return the book pages formated as json suitable for jquery-file-upload.
Args:
book: Book instance
book_page_ids: list of ids, integers of book_page records. By default
all pages of book are returned. With this option only pages with
ids in this list are returned.
Returns:
string, json formatted book_page data
{'files': [
{
... see book_page_for_json ...
},
]
}
"""
pages = []
for page in book.pages():
if not book_page_ids or page.id in book_page_ids:
pages.append(page)
json_pages = [book_page_for_json(p) for p in pages]
return json.dumps(dict(files=json_pages))
def book_pages_from_tmp(book):
"""Copy book_page_tmp records associated with book to book_page records.
Args:
book: Book instance
"""
db = current.app.db
for page in book.pages():
page.delete()
for page in book.tmp_pages():
data = page.as_dict()
data['image'] = page.image.replace(
'book_page_tmp.image',
'book_page.image'
)
book_page = BookPage.from_add(
data,
validate=False,
)
page.copy_images(db.book_page)
def book_pages_to_tmp(book):
"""Copy book_page records associated with book to book_page_tmp records.
Args:
book: Book instance
"""
db = current.app.db
for page in book.tmp_pages():
page.delete()
for page in book.pages():
data = page.as_dict()
data['image'] = page.image.replace(
'book_page.image',
'book_page_tmp.image'
)
book_page_tmp = BookPageTmp.from_add(
data,
validate=False,
)
page.copy_images(db.book_page_tmp)
def book_pages_years(book):
"""Return a list of years for the pages of a book.
The years can be used for copyright.
Args:
book: Book instance
Returns:
list of integers
"""
return sorted(set([x.created_on.year for x in book.pages()]))
def book_tables():
"""Return a list of tables referencing books.
Returns:
list of strings, list of table names.
"""
return [
'activity_log',
'tentative_activity_log',
'book_page',
'book_page_tmp',
'book_view',
'contribution',
'derivative',
'download',
'publication_metadata',
'publication_serial',
'rating',
]
def book_types(db):
"""Return a XML instance representing book types suitable for
an HTML radio button input.
Args:
db: gluon.dal.DAL instance
"""
# {'value': record_id, 'text': description}, ...
rows = db(db.book_type).select(
db.book_type.ALL,
orderby=db.book_type.sequence
)
return XML(
','.join(
[
'{{"value":"{x.id}", "text":"{x.description}"}}'.format(x=x)
for x in rows
]
)
)
def calc_contributions_remaining(book):
"""Return the calculated contributions remaining for the book to reach
its contributions target.
Args:
book: Book instance
Returns:
float, dollar amount of contributions remaining.
"""
if not book:
return 0.00
db = current.app.db
target = contributions_target(book)
query = (db.contribution.book_id == book.id)
total = db.contribution.amount.sum()
rows = db(query).select(total)
contributed_total = rows[0][total] if rows and rows[0][total] else 0.00
remaining = target - contributed_total
if remaining < 0:
remaining = 0.00
return remaining
def calc_status(book):
"""Determine the calculated status of the book.
Args:
book: Book instance
Returns:
string, the status of a book, eg BOOK_STATUS_ACTIVE
"""
if book.status == BOOK_STATUS_DISABLED:
return BOOK_STATUS_DISABLED
return BOOK_STATUS_ACTIVE if book.page_count() > 0 else BOOK_STATUS_DRAFT
def cbz_comment(book):
""" Return a comment suitable for the cbz file.
Args:
book: Book instance
Returns:
string, eg '2014|Cartoonist Name|Title of Book|64'
"""
creator = Creator.from_id(book.creator_id)
cc_licence = CCLicence.from_id(book.cc_licence_id)
fields = []
fields.append(str(book.publication_year))
fields.append(creator.name)
fields.append(book.name)
fields.append(formatted_number(book))
fields.append(cc_licence.code)
fields.append(creator_short_url(creator))
return '|'.join(fields)
def cbz_link(book, components=None, **attributes):
"""Return a link suitable for the cbz file of a book.
Args:
book: Book instance
components: list, passed to A(*components), default [book.name_for_url]
attributes: dict of attributes for A()
Returns:
A instance
"""
empty = SPAN('')
if not book:
return empty
link_url = cbz_url(book)
if not link_url:
return empty
if not components:
name = '{n}.cbz'.format(n=book_name(book, use='url').lower())
components = [name]
kwargs = {}
kwargs.update(attributes)
if '_href' not in attributes:
kwargs['_href'] = link_url
return A(*components, **kwargs)
def cbz_url(book, **url_kwargs):
"""Return the url to the cbz file for the book.
Args:
book: Book instance
url_kwargs: dict of kwargs for URL(). Eg {'extension': False}
Returns:
string, url, eg
http://zco.mx/FirstLast/MyBook-001.cbz
"""
creator = Creator.from_id(book.creator_id)
name_of_creator = creator_name(creator, use='url')
if not name_of_creator:
return
name = book_name(book, use='url')
if not name:
return
kwargs = {}
kwargs.update(url_kwargs)
return URL(
c=name_of_creator,
f='{name}.cbz'.format(name=name),
**kwargs
)
def cc_licence_data(book):
"""Return data required for the cc licence for the book.
Args:
book: Book instance
Returns:
dict
"""
creator = Creator.from_id(book.creator_id)
year_list = book_pages_years(book)
if not year_list:
year_list = [datetime.date.today().year]
if len(year_list) == 1:
years = str(year_list[0])
else:
years = '{f}-{l}'.format(f=year_list[0], l=year_list[-1])
return dict(
owner=creator.name,
owner_url=creator_short_url(creator),
title=book.name,
title_url=short_url(book),
year=years,
place=book.cc_licence_place,
)
def complete_link(book, components=None, **attributes):
"""Return html code suitable for a 'set as complete' link/button/checkbox.
Args:
book: Book instance
components: list, passed to A(*components)
attributes: dict of attributes for A()
"""
empty = SPAN('')
if not book:
return empty
if not components:
components = [
DIV(
INPUT(_type='checkbox', _value='off'),
_class="checkbox_wrapper"
)
]
kwargs = {}
kwargs.update(attributes)
if '_href' not in attributes:
kwargs['_href'] = URL(
c='login', f='book_complete', args=book.id, extension=False)
return A(*components, **kwargs)
def contribute_link(book, components=None, **attributes):
"""Return html code suitable for a 'Contribute' link.
Args:
book: Book instance
components: list, passed to A(*components), default ['Contribute']
attributes: dict of attributes for A()
"""
empty = SPAN('')
if not book:
return empty
if not components:
components = ['Contribute']
kwargs = {}
kwargs.update(attributes)
if '_href' not in attributes:
kwargs['_href'] = URL(
c='contributions',
f='modal',
vars=dict(book_id=book.id),
extension=False
)
return A(*components, **kwargs)
def contributions_remaining_by_creator(creator):
"""Return the calculated contributions remaining for all books of the
creator.
Args:
creator: Creator instance
Returns:
float, dollar amount of contributions remaining.
"""
# invalid-name (C0103): *Invalid %%s name "%%s"%%s*
# pylint: disable=C0103
if not creator:
return 0.00
db = current.app.db
query = (db.book.creator_id == creator.id) & \
(db.book.status == BOOK_STATUS_ACTIVE)
total = 0
books = db(query).select()
for book_row in books:
book = Book.from_id(book_row.id)
amount = calc_contributions_remaining(book)
total = total + amount
return total
def contributions_target(book):
"""Return the contributions target for the book.
Args:
book: Book instance
Returns:
float, dollar amount of contributions target.
"""
rate_per_page = 10.00
if not book:
return 0.00
amount = int((rate_per_page * book.page_count()) + 0.5)
return amount
def cover_image(book, size='original', img_attributes=None):
"""Return html code suitable for the cover image.
Args:
book: Book instance
size: string, the size of the image. One of SIZES
img_attributes: dict of attributes for IMG
"""
image = None
if book:
try:
first_page = get_page(book, page_no='first')
except LookupError:
first_page = None
image = first_page.image if first_page else None
attributes = {'_alt': ''}
if img_attributes:
attributes.update(img_attributes)
return CachedImgTag(image, size=size, attributes=attributes)()
def default_contribute_amount(book):
"""Return the default amount for the contribute widget.
Args:
book: Book instance
"""
minimum | |
work out the
# auth chain difference of the unpersisted events.
unpersisted_ids.update(e for e in event_chain if e in event_map)
else:
set_ids.add(event_id)
# The auth chain difference of the unpersisted events of the state sets
# is calculated by taking the difference between the union and
# intersections.
union = unpersisted_set_ids[0].union(*unpersisted_set_ids[1:])
intersection = unpersisted_set_ids[0].intersection(*unpersisted_set_ids[1:])
difference_from_event_map: Collection[str] = union - intersection
else:
difference_from_event_map = ()
state_sets_ids = [set(state_set.values()) for state_set in state_sets]
difference = await state_res_store.get_auth_chain_difference(
room_id, state_sets_ids
)
difference.update(difference_from_event_map)
return difference
def _seperate(
state_sets: Iterable[StateMap[str]],
) -> Tuple[StateMap[str], StateMap[Set[str]]]:
"""Return the unconflicted and conflicted state. This is different than in
the original algorithm, as this defines a key to be conflicted if one of
the state sets doesn't have that key.
Args:
state_sets
Returns:
A tuple of unconflicted and conflicted state. The conflicted state dict
is a map from type/state_key to set of event IDs
"""
unconflicted_state = {}
conflicted_state = {}
for key in set(itertools.chain.from_iterable(state_sets)):
event_ids = {state_set.get(key) for state_set in state_sets}
if len(event_ids) == 1:
unconflicted_state[key] = event_ids.pop()
else:
event_ids.discard(None)
conflicted_state[key] = event_ids
# mypy doesn't understand that discarding None above means that conflicted
# state is StateMap[Set[str]], not StateMap[Set[Optional[Str]]].
return unconflicted_state, conflicted_state # type: ignore
def _is_power_event(event: EventBase) -> bool:
"""Return whether or not the event is a "power event", as defined by the
v2 state resolution algorithm
Args:
event
Returns:
True if the event is a power event.
"""
if (event.type, event.state_key) in (
(EventTypes.PowerLevels, ""),
(EventTypes.JoinRules, ""),
(EventTypes.Create, ""),
):
return True
if event.type == EventTypes.Member:
if event.membership in ("leave", "ban"):
return event.sender != event.state_key
return False
async def _add_event_and_auth_chain_to_graph(
graph: Dict[str, Set[str]],
room_id: str,
event_id: str,
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
auth_diff: Set[str],
) -> None:
"""Helper function for _reverse_topological_power_sort that add the event
and its auth chain (that is in the auth diff) to the graph
Args:
graph: A map from event ID to the events auth event IDs
room_id: the room we are working in
event_id: Event to add to the graph
event_map
state_res_store
auth_diff: Set of event IDs that are in the auth difference.
"""
state = [event_id]
while state:
eid = state.pop()
graph.setdefault(eid, set())
event = await _get_event(room_id, eid, event_map, state_res_store)
for aid in event.auth_event_ids():
if aid in auth_diff:
if aid not in graph:
state.append(aid)
graph.setdefault(eid, set()).add(aid)
async def _reverse_topological_power_sort(
clock: Clock,
room_id: str,
event_ids: Iterable[str],
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
auth_diff: Set[str],
) -> List[str]:
"""Returns a list of the event_ids sorted by reverse topological ordering,
and then by power level and origin_server_ts
Args:
clock
room_id: the room we are working in
event_ids: The events to sort
event_map
state_res_store
auth_diff: Set of event IDs that are in the auth difference.
Returns:
The sorted list
"""
graph: Dict[str, Set[str]] = {}
for idx, event_id in enumerate(event_ids, start=1):
await _add_event_and_auth_chain_to_graph(
graph, room_id, event_id, event_map, state_res_store, auth_diff
)
# We await occasionally when we're working with large data sets to
# ensure that we don't block the reactor loop for too long.
if idx % _AWAIT_AFTER_ITERATIONS == 0:
await clock.sleep(0)
event_to_pl = {}
for idx, event_id in enumerate(graph, start=1):
pl = await _get_power_level_for_sender(
room_id, event_id, event_map, state_res_store
)
event_to_pl[event_id] = pl
# We await occasionally when we're working with large data sets to
# ensure that we don't block the reactor loop for too long.
if idx % _AWAIT_AFTER_ITERATIONS == 0:
await clock.sleep(0)
def _get_power_order(event_id: str) -> Tuple[int, int, str]:
ev = event_map[event_id]
pl = event_to_pl[event_id]
return -pl, ev.origin_server_ts, event_id
# Note: graph is modified during the sort
it = lexicographical_topological_sort(graph, key=_get_power_order)
sorted_events = list(it)
return sorted_events
async def _iterative_auth_checks(
clock: Clock,
room_id: str,
room_version: RoomVersion,
event_ids: List[str],
base_state: StateMap[str],
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
) -> MutableStateMap[str]:
"""Sequentially apply auth checks to each event in given list, updating the
state as it goes along.
Args:
clock
room_id
room_version
event_ids: Ordered list of events to apply auth checks to
base_state: The set of state to start with
event_map
state_res_store
Returns:
Returns the final updated state
"""
resolved_state = dict(base_state)
for idx, event_id in enumerate(event_ids, start=1):
event = event_map[event_id]
auth_events = {}
for aid in event.auth_event_ids():
ev = await _get_event(
room_id, aid, event_map, state_res_store, allow_none=True
)
if not ev:
logger.warning(
"auth_event id %s for event %s is missing", aid, event_id
)
else:
if ev.rejected_reason is None:
auth_events[(ev.type, ev.state_key)] = ev
for key in event_auth.auth_types_for_event(room_version, event):
if key in resolved_state:
ev_id = resolved_state[key]
ev = await _get_event(room_id, ev_id, event_map, state_res_store)
if ev.rejected_reason is None:
auth_events[key] = event_map[ev_id]
try:
event_auth.check_auth_rules_for_event(
event,
auth_events.values(),
)
resolved_state[(event.type, event.state_key)] = event_id
except AuthError:
pass
# We await occasionally when we're working with large data sets to
# ensure that we don't block the reactor loop for too long.
if idx % _AWAIT_AFTER_ITERATIONS == 0:
await clock.sleep(0)
return resolved_state
async def _mainline_sort(
clock: Clock,
room_id: str,
event_ids: List[str],
resolved_power_event_id: Optional[str],
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
) -> List[str]:
"""Returns a sorted list of event_ids sorted by mainline ordering based on
the given event resolved_power_event_id
Args:
clock
room_id: room we're working in
event_ids: Events to sort
resolved_power_event_id: The final resolved power level event ID
event_map
state_res_store
Returns:
The sorted list
"""
if not event_ids:
# It's possible for there to be no event IDs here to sort, so we can
# skip calculating the mainline in that case.
return []
mainline = []
pl = resolved_power_event_id
idx = 0
while pl:
mainline.append(pl)
pl_ev = await _get_event(room_id, pl, event_map, state_res_store)
auth_events = pl_ev.auth_event_ids()
pl = None
for aid in auth_events:
ev = await _get_event(
room_id, aid, event_map, state_res_store, allow_none=True
)
if ev and (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
pl = aid
break
# We await occasionally when we're working with large data sets to
# ensure that we don't block the reactor loop for too long.
if idx != 0 and idx % _AWAIT_AFTER_ITERATIONS == 0:
await clock.sleep(0)
idx += 1
mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(reversed(mainline))}
event_ids = list(event_ids)
order_map = {}
for idx, ev_id in enumerate(event_ids, start=1):
depth = await _get_mainline_depth_for_event(
event_map[ev_id], mainline_map, event_map, state_res_store
)
order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id)
# We await occasionally when we're working with large data sets to
# ensure that we don't block the reactor loop for too long.
if idx % _AWAIT_AFTER_ITERATIONS == 0:
await clock.sleep(0)
event_ids.sort(key=lambda ev_id: order_map[ev_id])
return event_ids
async def _get_mainline_depth_for_event(
event: EventBase,
mainline_map: Dict[str, int],
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
) -> int:
"""Get the mainline depths for the given event based on the mainline map
Args:
event
mainline_map: Map from event_id to mainline depth for events in the mainline.
event_map
state_res_store
Returns:
The mainline depth
"""
room_id = event.room_id
tmp_event: Optional[EventBase] = event
# We do an iterative search, replacing `event with the power level in its
# auth events (if any)
while tmp_event:
depth = mainline_map.get(tmp_event.event_id)
if depth is not None:
return depth
auth_events = tmp_event.auth_event_ids()
tmp_event = None
for aid in auth_events:
aev = await _get_event(
room_id, aid, event_map, state_res_store, allow_none=True
)
if aev and (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
tmp_event = aev
break
# Didn't find a power level auth event, so we just return 0
return 0
@overload
async def _get_event(
room_id: str,
event_id: str,
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
allow_none: Literal[False] = False,
) -> EventBase:
...
@overload
async def _get_event(
room_id: str,
event_id: str,
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
allow_none: Literal[True],
) -> Optional[EventBase]:
...
async def _get_event(
room_id: str,
event_id: str,
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
allow_none: bool = False,
) -> Optional[EventBase]:
"""Helper function to look up event in event_map, falling back to looking
it up in the store
Args:
room_id
event_id
event_map
state_res_store
allow_none: if the event is not found, return None rather than raising
an exception
Returns:
The event, or none if the event does not exist (and allow_none is True).
"""
if event_id not in event_map:
events = await state_res_store.get_events([event_id], allow_rejected=True)
event_map.update(events)
event = event_map.get(event_id)
if event is None:
if allow_none:
return None
| |
<gh_stars>1-10
"""
Baseline hierarchical configuration setup functions for Brokkr.
"""
# Standard library imports
import abc
import argparse
import collections.abc
import copy
import json
import logging
import os
from pathlib import Path
# Third party imports
import toml
import toml.decoder
# Local imports
from brokkr.constants import (
LEVEL_NAME_LOCAL,
LEVEL_NAME_REMOTE,
LEVEL_NAME_SYSTEM,
LEVEL_NAME_SYSTEM_CLIENT,
)
import brokkr.utils.misc
# General static constants
DEFAULT_CONFIG_TYPE_NAME = "config"
LEVEL_NAME_CLI_ARGS = "cli_args"
LEVEL_NAME_DEFAULTS = "defaults"
LEVEL_NAME_ENV_VARS = "env_vars"
LEVEL_NAME_FILE = "local"
LEVEL_NAME_OVERLAY = "overlay"
LEVEL_NAME_PRESETS = "presets"
EXTENSION_TOML = "toml"
EXTENSION_JSON = "json"
EXTENSION_DEFAULT = EXTENSION_TOML
EXTENSIONS_SUPPORTED = [EXTENSION_TOML, EXTENSION_JSON]
VERSION_KEY = "config_version"
EMPTY_CONFIG = ("config_is_empty", True)
JSON_SEPERATORS = (",", ":")
CONFIG_VERSION_DEFAULT = 1
LEVEL_CLASS = "level_class"
LEVEL_ARGS = "level_args"
# --- Utility functions --- #
def check_extension_supported(extension):
if extension not in EXTENSIONS_SUPPORTED:
raise ValueError("Extension must be one of "
f"{EXTENSIONS_SUPPORTED}, not {extension}")
def convert_paths(config_data, path_variables):
# Format string paths as pathlib paths with username expanded
for key_name in path_variables:
inner_dict = config_data
try:
inner_dict = brokkr.utils.misc.get_inner_dict(
obj=config_data, keys=key_name[:-1])
inner_dict[key_name[-1]] = brokkr.utils.misc.convert_path(
inner_dict[key_name[-1]])
# Ignore missing keys
except KeyError:
continue
return config_data
def read_config_file(path, extension=None, logger=None):
if logger is True:
logger = logging.getLogger(__name__)
path = Path(path)
if extension is None:
extension = path.suffix.strip(".")
check_extension_supported(extension)
if extension == EXTENSION_TOML:
try:
config_data = toml.load(path)
except toml.decoder.TomlDecodeError as e:
if logger is not None:
logger.error("%s reading TOML config file %r: %s",
type(e).__name__, path.as_posix(), e)
logger.info("Error details:", exc_info=True)
raise SystemExit(1) from e
raise
elif extension == EXTENSION_JSON:
with open(path, "r", encoding="utf-8") as config_file:
try:
config_data = json.load(config_file)
except Exception as e:
if logger is not None:
logger.error("%s reading JSON config file %r: %s",
type(e).__name__, path.as_posix(), e)
logger.info("Error details:", exc_info=True)
raise SystemExit(1) from e
raise
return config_data
def write_config_file(config_data, path, extension=None):
path = Path(path)
if extension is None:
extension = Path(path).suffix.strip(".")
check_extension_supported(extension)
os.makedirs(path.parent, exist_ok=True)
with open(path, mode="w", encoding="utf-8", newline="\n") as config_file:
if extension == EXTENSION_TOML:
toml.dump(config_data, config_file)
elif extension == EXTENSION_JSON:
json.dump(config_data, config_file,
allow_nan=False, separators=JSON_SEPERATORS)
def insert_values(config_data, insert_items, logger=None):
# pylint: disable=too-many-nested-blocks, too-many-branches
if logger is True:
logger = logging.getLogger(__name__)
# Insert the specified values into the given keys
for preset_name, preset_data in config_data.items():
for table_name, target_key in insert_items:
if (preset_data.get(table_name, None) is None
or preset_data.get(target_key, None) is None):
continue # Skip if source or target table is not preset
if preset_data[table_name].get(
target_key, None) is not None:
# If target key is present at first level, use that
target_tables = {table_name: preset_data[table_name]}
else:
# Otherwise, check for the key in the table's subdicts
target_tables = preset_data[table_name]
for target_name, target_table in target_tables.items():
if target_table.get(target_key, None) is None:
continue # Skip target tables that lack the key at all
if not target_table[target_key]:
# If key is empty, fill it with the entire source table
target_table[target_key] = preset_data[target_key]
continue
# Otherwise, do a lookup in the source table
try:
if brokkr.utils.misc.is_iterable(
target_table[target_key]):
if isinstance(preset_data[target_key],
collections.abc.Mapping):
# If the target is an iterable and the src a dict,
# look up each value in the source table
target_table[target_key] = {
inner_key: preset_data[target_key][inner_key]
for inner_key in target_table[target_key]}
else:
# Otherwise, if both are lists, merge them
target_table[target_key] = set(
target_table[target_key]
+ preset_data[target_key])
else:
# Otherwise, look up the value in the source table
# and merge them, keeping values in the original
merged_table = brokkr.utils.misc.update_dict_recursive(
preset_data[target_key][target_table[target_key]],
target_table)
target_table.update(merged_table)
# And remove the now-redundant item
del target_table[target_key]
except KeyError as e:
if not logger:
raise
logger.error(
"%s inserting value for preset %r: "
"Can't find inner key %s in key %r to insert into "
"table %r, subtable %r",
type(e).__name__, preset_name, e, target_key,
table_name, target_name)
logger.info("Error details:", exc_info=True)
logger.info("Possible keys: %r",
list(preset_data[target_key].keys()))
raise SystemExit(1) from e
return config_data
# --- Config type --- #
class ConfigType(brokkr.utils.misc.AutoReprMixin):
def __init__(
self,
name,
defaults=None,
overlay=None,
local_config_path=None,
preset_config_path=None,
path_variables=None,
config_version=CONFIG_VERSION_DEFAULT,
):
self.name = name
self.defaults = {} if defaults is None else defaults
self.overlay = overlay
self.local_config_path = (
None if local_config_path is None else Path(local_config_path))
self.preset_config_path = (
None if preset_config_path is None else Path(preset_config_path))
self.path_variables = [] if path_variables is None else path_variables
self.config_version = config_version
# --- Config level classes #
class ConfigLevel(brokkr.utils.misc.AutoReprMixin, metaclass=abc.ABCMeta):
def __init__(
self,
name,
config_type=None,
logger=None,
):
self.name = name
self.config_type = (ConfigType(DEFAULT_CONFIG_TYPE_NAME)
if config_type is None else config_type)
self.logger = logger
def generate_config(self):
if self.config_type.config_version is not None:
config_data = {VERSION_KEY: self.config_type.config_version}
else:
config_data = {}
return config_data
@abc.abstractmethod
def read_config(self, input_data=None):
config_data = convert_paths(
input_data, self.config_type.path_variables)
return config_data
class WritableConfigLevel(ConfigLevel, metaclass=abc.ABCMeta):
@abc.abstractmethod
def write_config(self, config_data=None):
pass
class DefaultsConfigLevel(ConfigLevel):
def __init__(self, name=LEVEL_NAME_DEFAULTS, **kwargs):
super().__init__(name=name, **kwargs)
def generate_config(self):
config_data = super().generate_config()
config_data = config_data.update(self.config_type.defaults)
return config_data
def read_config(self, input_data=None):
if input_data is None:
input_data = copy.deepcopy(self.config_type.defaults)
else:
input_data = copy.deepcopy(input_data)
return super().read_config(input_data)
class FileConfigLevel(WritableConfigLevel):
def __init__(
self,
name=LEVEL_NAME_FILE,
path=None,
extension=EXTENSION_DEFAULT,
preset=False,
append_level=False,
**kwargs,
):
check_extension_supported(extension)
super().__init__(name=name, **kwargs)
self.extension = extension
self.preset = preset
# Setup full config path given defaults
if path is not None:
self.path = Path(path)
elif self.preset:
self.path = self.config_type.preset_config_path
else:
self.path = self.config_type.local_config_path
# Generate filename and add to path if needed
if self.path.suffix != self.extension:
config_filename = self.config_type.name
if append_level:
config_filename = "_".join([config_filename, self.name])
config_filename += ("." + self.extension)
self.path = self.path / config_filename
def read_config(self, input_data=None):
if input_data is None:
try:
config_data = read_config_file(
path=self.path,
extension=self.extension,
logger=self.logger,
)
# Generate or ignore config_name file if it does not yet exist
except FileNotFoundError:
if not self.preset:
config_data = self.write_config()
else:
config_data = {}
else:
config_data = copy.deepcopy(input_data)
# Delete empty config key, added to avoid unreadable empty JSONs
try:
del config_data[EMPTY_CONFIG[0]]
except KeyError:
pass
config_data = super().read_config(config_data)
return config_data
def write_config(self, config_data=None):
# Prevent JSON errors from serializing/deserializing empty dict
if not config_data and self.extension == EXTENSION_JSON:
config_data = {EMPTY_CONFIG[0]: EMPTY_CONFIG[1]}
# Merge config data with generated baseline
if not config_data:
config_data = self.generate_config()
else:
config_data = {**self.generate_config(), **config_data}
write_config_file(config_data, self.path)
return config_data
class PresetsConfigLevel(ConfigLevel):
def __init__(
self,
name=LEVEL_NAME_PRESETS,
path=None,
filename_glob=f"*.preset.{EXTENSION_DEFAULT}",
key_name="name",
template=None,
insert_items=None,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.filename_glob = filename_glob
self.key_name = key_name
self.template = {} if template is None else template
self.insert_items = {} if insert_items is None else insert_items
if path is not None:
self.path = Path(path)
else:
self.path = self.config_type.local_config_path
def read_config(self, input_data=None):
if input_data is None:
preset_paths = self.path.glob(self.filename_glob)
presets = {
path: brokkr.utils.misc.update_dict_recursive(
copy.deepcopy(self.template), read_config_file(
path=path, logger=self.logger))
for path in preset_paths}
config_data = {
preset.get(self.key_name, path.stem.split(".")[0]): preset
for path, preset in presets.items()}
config_data = insert_values(
config_data, self.insert_items, logger=self.logger)
else:
config_data = copy.deepcopy(input_data)
config_data = super().read_config(input_data=config_data)
return config_data
class MappingConfigLevel(ConfigLevel):
def __init__(
self,
name,
mapping,
**kwargs,
):
self.mapping = mapping
super().__init__(name=name, **kwargs)
def read_config(self, input_data=None):
config_data = {}
if input_data:
for src_key, config_keys in self.mapping.items():
config_value = input_data.get(src_key, None)
# Recursively set config keys
if config_value is not None:
inner_dict = config_data
for config_section in config_keys[:-1]:
try:
inner_dict = inner_dict[config_section]
except KeyError:
inner_dict[config_section] = {}
inner_dict = inner_dict[config_section]
inner_dict[config_keys[-1]] = config_value
return super().read_config(config_data)
class EnvVarsConfigLevel(MappingConfigLevel):
def __init__(self, name=LEVEL_NAME_ENV_VARS, mapping=None, **kwargs):
super().__init__(name=name, mapping=mapping, **kwargs)
def read_config(self, input_data=None):
if input_data is None:
input_data = os.environ
config_data = super().read_config(input_data)
return config_data
class CLIArgsConfigLevel(MappingConfigLevel):
def __init__(self, name=LEVEL_NAME_CLI_ARGS, mapping=None, **kwargs):
super().__init__(name=name, mapping=mapping, **kwargs)
def read_config(self, input_data=None):
if input_data is None:
arg_parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
usage=argparse.SUPPRESS,
add_help=False,
)
for arg_name in self.mapping.keys():
arg_parser.add_argument(f"--{arg_name.replace('_', '-')}")
cli_args, __ = arg_parser.parse_known_args()
else:
input_data = cli_args
# Convert to dict if cli_args is a namespace, ignoring errors
try:
cli_args = vars(cli_args)
except TypeError:
pass
config_data = super().read_config(cli_args)
return config_data
# --- Config handler classes #
class ConfigHandler(brokkr.utils.misc.AutoReprMixin):
def __init__(self, config_type=None, config_levels=None):
self.config_type = (ConfigType(DEFAULT_CONFIG_TYPE_NAME)
if config_type is None else config_type)
config_levels = [] if config_levels is None else config_levels
self.config_levels = {}
if (self.config_type.defaults is not None
and not any((isinstance(config_level, DefaultsConfigLevel)
for config_level in config_levels))):
defaults_config_level = DefaultsConfigLevel(
config_type=self.config_type)
config_levels = [defaults_config_level, *config_levels]
for config_level in config_levels:
self.config_levels[config_level.name] = config_level
def read_configs(self, config_names=None):
configs = {}
if config_names is None:
config_names = self.config_levels.keys()
configs = {config_name: self.config_levels[config_name].read_config()
for config_name in config_names}
if self.config_type.overlay is not None:
configs[LEVEL_NAME_OVERLAY] = copy.deepcopy(
self.config_type.overlay)
return configs
def render_config(self, configs=None):
if configs is None:
configs = self.read_configs()
# Recursively build final config dict from succession of loaded configs
rendered_config = copy.deepcopy(
configs[list(configs.keys())[0]])
for config_name in list(configs.keys())[1:]:
if configs[config_name]:
rendered_config = brokkr.utils.misc.update_dict_recursive(
rendered_config, configs[config_name])
return rendered_config
CONFIG_LEVEL_PRESETS = {
LEVEL_NAME_SYSTEM: {LEVEL_ARGS: {
"preset": True}},
LEVEL_NAME_SYSTEM_CLIENT: {LEVEL_ARGS: {
"preset": | |
Check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call.
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
return True
return False
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
if not getattr(self.obj, "__test__", True):
return []
# Avoid random getattrs and peek in the __dict__ instead.
dicts = [getattr(self.obj, "__dict__", {})]
if isinstance(self.obj, type):
for basecls in self.obj.__mro__:
dicts.append(basecls.__dict__)
# In each class, nodes should be definition ordered. Since Python 3.6,
# __dict__ is definition ordered.
seen: Set[str] = set()
dict_values: List[List[Union[nodes.Item, nodes.Collector]]] = []
ihook = self.ihook
for dic in dicts:
values: List[Union[nodes.Item, nodes.Collector]] = []
# Note: seems like the dict can change during iteration -
# be careful not to remove the list() without consideration.
for name, obj in list(dic.items()):
if name in IGNORED_ATTRIBUTES:
continue
if name in seen:
continue
seen.add(name)
res = ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj
)
if res is None:
continue
elif isinstance(res, list):
values.extend(res)
else:
values.append(res)
dict_values.append(values)
# Between classes in the class hierarchy, reverse-MRO order -- nodes
# inherited from base classes should come before subclasses.
result = []
for values in reversed(dict_values):
result.extend(values)
return result
def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]:
modulecol = self.getparent(Module)
assert modulecol is not None
module = modulecol.obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
fixtureinfo = definition._fixtureinfo
# pytest_generate_tests impls call metafunc.parametrize() which fills
# metafunc._calls, the outcome of the hook.
metafunc = Metafunc(
definition=definition,
fixtureinfo=fixtureinfo,
config=self.config,
cls=cls,
module=module,
_ispytest=True,
)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if cls is not None and hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
if not metafunc._calls:
yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
else:
# Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs.
fm = self.session._fixturemanager
fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
# Add_funcarg_pseudo_fixture_def may have shadowed some fixtures
# with direct parametrization, so make sure we update what the
# function really needs.
fixtureinfo.prune_dependency_tree()
for callspec in metafunc._calls:
subname = f"{name}[{callspec.id}]"
yield Function.from_parent(
self,
name=subname,
callspec=callspec,
fixtureinfo=fixtureinfo,
keywords={callspec.id: True},
originalname=name,
)
class Module(nodes.File, PyCollector):
"""Collector for test classes and functions."""
def _getobj(self):
return self._importtestmodule()
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
self._inject_setup_module_fixture()
self._inject_setup_function_fixture()
self.session._fixturemanager.parsefactories(self)
return super().collect()
def _inject_setup_module_fixture(self) -> None:
"""Inject a hidden autouse, module scoped fixture into the collected module object
that invokes setUpModule/tearDownModule if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
has_nose = self.config.pluginmanager.has_plugin("nose")
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
if setup_module is None and has_nose:
setup_module = _get_first_non_fixture_func(self.obj, ("setup",))
teardown_module = _get_first_non_fixture_func(
self.obj, ("tearDownModule", "teardown_module")
)
if teardown_module is None and has_nose:
teardown_module = _get_first_non_fixture_func(self.obj, ("teardown",))
if setup_module is None and teardown_module is None:
return
@fixtures.fixture(
autouse=True,
scope="module",
# Use a unique name to speed up lookup.
name=f"_xunit_setup_module_fixture_{self.obj.__name__}",
)
def xunit_setup_module_fixture(request) -> Generator[None, None, None]:
if setup_module is not None:
_call_with_optional_argument(setup_module, request.module)
yield
if teardown_module is not None:
_call_with_optional_argument(teardown_module, request.module)
self.obj.__pytest_setup_module = xunit_setup_module_fixture
def _inject_setup_function_fixture(self) -> None:
"""Inject a hidden autouse, function scoped fixture into the collected module object
that invokes setup_function/teardown_function if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
teardown_function = _get_first_non_fixture_func(
self.obj, ("teardown_function",)
)
if setup_function is None and teardown_function is None:
return
@fixtures.fixture(
autouse=True,
scope="function",
# Use a unique name to speed up lookup.
name=f"_xunit_setup_function_fixture_{self.obj.__name__}",
)
def xunit_setup_function_fixture(request) -> Generator[None, None, None]:
if request.instance is not None:
# in this case we are bound to an instance, so we need to let
# setup_method handle this
yield
return
if setup_function is not None:
_call_with_optional_argument(setup_function, request.function)
yield
if teardown_function is not None:
_call_with_optional_argument(teardown_function, request.function)
self.obj.__pytest_setup_function = xunit_setup_function_fixture
def _importtestmodule(self):
# We assume we are only called once per module.
importmode = self.config.getoption("--import-mode")
try:
mod = import_path(self.path, mode=importmode, root=self.config.rootpath)
except SyntaxError as e:
raise self.CollectError(
ExceptionInfo.from_current().getrepr(style="short")
) from e
except ImportPathMismatchError as e:
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules" % e.args
) from e
except ImportError as e:
exc_info = ExceptionInfo.from_current()
if self.config.getoption("verbose") < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short")
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
raise self.CollectError(
"ImportError while importing test module '{path}'.\n"
"Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n"
"{traceback}".format(path=self.path, traceback=formatted_tb)
) from e
except skip.Exception as e:
if e.allow_module_level:
raise
raise self.CollectError(
"Using pytest.skip outside of a test will skip the entire module. "
"If that's your intention, pass `allow_module_level=True`. "
"If you want to skip a specific test or an entire class, "
"use the @pytest.mark.skip or @pytest.mark.skipif decorators."
) from e
self.config.pluginmanager.consider_module(mod)
return mod
class Package(Module):
def __init__(
self,
fspath: Optional[LEGACY_PATH],
parent: nodes.Collector,
# NOTE: following args are unused:
config=None,
session=None,
nodeid=None,
path=Optional[Path],
) -> None:
# NOTE: Could be just the following, but kept as-is for compat.
# nodes.FSCollector.__init__(self, fspath, parent=parent)
session = parent.session
nodes.FSCollector.__init__(
self,
fspath=fspath,
path=path,
parent=parent,
config=config,
session=session,
nodeid=nodeid,
)
self.name = self.path.parent.name
def setup(self) -> None:
# Not using fixtures to call setup_module here because autouse fixtures
# from packages are not called automatically (#4085).
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
if setup_module is not None:
_call_with_optional_argument(setup_module, self.obj)
teardown_module = _get_first_non_fixture_func(
self.obj, ("tearDownModule", "teardown_module")
)
if teardown_module is not None:
func = partial(_call_with_optional_argument, teardown_module, self.obj)
self.addfinalizer(func)
def gethookproxy(self, fspath: "os.PathLike[str]"):
warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
return self.session.gethookproxy(fspath)
def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool:
warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
return self.session.isinitpath(path)
def _recurse(self, direntry: "os.DirEntry[str]") -> bool:
if direntry.name == "__pycache__":
return False
fspath = Path(direntry.path)
ihook = self.session.gethookproxy(fspath.parent)
if ihook.pytest_ignore_collect(fspath=fspath, config=self.config):
return False
norecursepatterns = self.config.getini("norecursedirs")
if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns):
return False
return True
def _collectfile(
self, fspath: Path, handle_dupes: bool = True
) -> Sequence[nodes.Collector]:
assert (
fspath.is_file()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink()
)
ihook = self.session.gethookproxy(fspath)
if not self.session.isinitpath(fspath):
if ihook.pytest_ignore_collect(fspath=fspath, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if fspath in duplicate_paths:
return ()
else:
duplicate_paths.add(fspath)
return ihook.pytest_collect_file(fspath=fspath, parent=self) # type: ignore[no-any-return]
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
this_path = self.path.parent
init_module = this_path / "__init__.py"
if init_module.is_file() and path_matches_patterns(
init_module, self.config.getini("python_files")
):
yield Module.from_parent(self, path=init_module)
pkg_prefixes: Set[Path] = set()
for direntry in visit(str(this_path), recurse=self._recurse):
path = Path(direntry.path)
# We will visit our own __init__.py file, in which case we skip it.
if direntry.is_file():
if direntry.name == "__init__.py" and path.parent == this_path:
continue
parts_ = parts(direntry.path)
if any(
str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path
for pkg_prefix in pkg_prefixes
):
continue
if direntry.is_file():
yield from self._collectfile(path)
elif not direntry.is_dir():
# Broken symlink or invalid/missing file.
continue
elif path.joinpath("__init__.py").is_file():
pkg_prefixes.add(path)
def _call_with_optional_argument(func, arg) -> None:
"""Call the given function with the given argument if func accepts one argument, otherwise
calls func without arguments."""
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count:
func(arg)
else:
func()
def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> Optional[object]:
"""Return the attribute from the given object to be used as a setup/teardown
xunit-style function, but only if not marked as a fixture to avoid calling it twice."""
for name in names:
meth: Optional[object] = getattr(obj, name, None)
if meth is not None and fixtures.getfixturemarker(meth) is None:
return meth
return None
class Class(PyCollector):
"""Collector for test methods."""
@classmethod
def from_parent(cls, parent, *, name, obj=None, **kw):
"""The public constructor."""
return super().from_parent(name=name, parent=parent, **kw)
def newinstance(self):
return self.obj()
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
if not safe_getattr(self.obj, "__test__", True):
return []
if hasinit(self.obj):
assert self.parent is not None
self.warn(
PytestCollectionWarning(
"cannot collect test class | |
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DhcpOptionsSetArgs', 'DhcpOptionsSet']
@pulumi.input_type
class DhcpOptionsSetArgs:
def __init__(__self__, *,
associate_vpcs: Optional[pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]]] = None,
dhcp_options_set_description: Optional[pulumi.Input[str]] = None,
dhcp_options_set_name: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[str]] = None,
dry_run: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a DhcpOptionsSet resource.
:param pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]] associate_vpcs: AssociateVpcs. Number of VPCs that can be associated with each DHCP options set is 10.
:param pulumi.Input[str] dhcp_options_set_description: The description of the DHCP options set. The description must be 2 to 256 characters in length and cannot start with `http://` or `https://`.
:param pulumi.Input[str] dhcp_options_set_name: The name of the DHCP options set. The name must be 2 to 128 characters in length and can contain letters, Chinese characters, digits, underscores (_), and hyphens (-). It must start with a letter or a Chinese character.
:param pulumi.Input[str] domain_name: The root domain, for example, example.com. After a DHCP options set is associated with a Virtual Private Cloud (VPC) network, the root domain in the DHCP options set is automatically synchronized to the ECS instances in the VPC network.
:param pulumi.Input[str] domain_name_servers: The DNS server IP addresses. Up to four DNS server IP addresses can be specified. IP addresses must be separated with commas (,).Before you specify any DNS server IP address, all ECS instances in the associated VPC network use the IP addresses of the Alibaba Cloud DNS servers, which are `172.16.58.3` and `192.168.127.12`.
:param pulumi.Input[bool] dry_run: Specifies whether to precheck this request only. Valid values: `true` or `false`.
"""
if associate_vpcs is not None:
pulumi.set(__self__, "associate_vpcs", associate_vpcs)
if dhcp_options_set_description is not None:
pulumi.set(__self__, "dhcp_options_set_description", dhcp_options_set_description)
if dhcp_options_set_name is not None:
pulumi.set(__self__, "dhcp_options_set_name", dhcp_options_set_name)
if domain_name is not None:
pulumi.set(__self__, "domain_name", domain_name)
if domain_name_servers is not None:
pulumi.set(__self__, "domain_name_servers", domain_name_servers)
if dry_run is not None:
pulumi.set(__self__, "dry_run", dry_run)
@property
@pulumi.getter(name="associateVpcs")
def associate_vpcs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]]]:
"""
AssociateVpcs. Number of VPCs that can be associated with each DHCP options set is 10.
"""
return pulumi.get(self, "associate_vpcs")
@associate_vpcs.setter
def associate_vpcs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]]]):
pulumi.set(self, "associate_vpcs", value)
@property
@pulumi.getter(name="dhcpOptionsSetDescription")
def dhcp_options_set_description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the DHCP options set. The description must be 2 to 256 characters in length and cannot start with `http://` or `https://`.
"""
return pulumi.get(self, "dhcp_options_set_description")
@dhcp_options_set_description.setter
def dhcp_options_set_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dhcp_options_set_description", value)
@property
@pulumi.getter(name="dhcpOptionsSetName")
def dhcp_options_set_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the DHCP options set. The name must be 2 to 128 characters in length and can contain letters, Chinese characters, digits, underscores (_), and hyphens (-). It must start with a letter or a Chinese character.
"""
return pulumi.get(self, "dhcp_options_set_name")
@dhcp_options_set_name.setter
def dhcp_options_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dhcp_options_set_name", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> Optional[pulumi.Input[str]]:
"""
The root domain, for example, example.com. After a DHCP options set is associated with a Virtual Private Cloud (VPC) network, the root domain in the DHCP options set is automatically synchronized to the ECS instances in the VPC network.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainNameServers")
def domain_name_servers(self) -> Optional[pulumi.Input[str]]:
"""
The DNS server IP addresses. Up to four DNS server IP addresses can be specified. IP addresses must be separated with commas (,).Before you specify any DNS server IP address, all ECS instances in the associated VPC network use the IP addresses of the Alibaba Cloud DNS servers, which are `172.16.58.3` and `192.168.127.12`.
"""
return pulumi.get(self, "domain_name_servers")
@domain_name_servers.setter
def domain_name_servers(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name_servers", value)
@property
@pulumi.getter(name="dryRun")
def dry_run(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to precheck this request only. Valid values: `true` or `false`.
"""
return pulumi.get(self, "dry_run")
@dry_run.setter
def dry_run(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "dry_run", value)
@pulumi.input_type
class _DhcpOptionsSetState:
def __init__(__self__, *,
associate_vpcs: Optional[pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]]] = None,
dhcp_options_set_description: Optional[pulumi.Input[str]] = None,
dhcp_options_set_name: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[str]] = None,
dry_run: Optional[pulumi.Input[bool]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DhcpOptionsSet resources.
:param pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]] associate_vpcs: AssociateVpcs. Number of VPCs that can be associated with each DHCP options set is 10.
:param pulumi.Input[str] dhcp_options_set_description: The description of the DHCP options set. The description must be 2 to 256 characters in length and cannot start with `http://` or `https://`.
:param pulumi.Input[str] dhcp_options_set_name: The name of the DHCP options set. The name must be 2 to 128 characters in length and can contain letters, Chinese characters, digits, underscores (_), and hyphens (-). It must start with a letter or a Chinese character.
:param pulumi.Input[str] domain_name: The root domain, for example, example.com. After a DHCP options set is associated with a Virtual Private Cloud (VPC) network, the root domain in the DHCP options set is automatically synchronized to the ECS instances in the VPC network.
:param pulumi.Input[str] domain_name_servers: The DNS server IP addresses. Up to four DNS server IP addresses can be specified. IP addresses must be separated with commas (,).Before you specify any DNS server IP address, all ECS instances in the associated VPC network use the IP addresses of the Alibaba Cloud DNS servers, which are `172.16.58.3` and `192.168.127.12`.
:param pulumi.Input[bool] dry_run: Specifies whether to precheck this request only. Valid values: `true` or `false`.
:param pulumi.Input[str] owner_id: The ID of the account to which the DHCP options set belongs.
:param pulumi.Input[str] status: The status of the DHCP options set. Valid values: `Available`, `InUse` or `Pending`. `Available`: The DHCP options set is available for use. `InUse`: The DHCP options set is in use. `Pending`: The DHCP options set is being configured.
"""
if associate_vpcs is not None:
pulumi.set(__self__, "associate_vpcs", associate_vpcs)
if dhcp_options_set_description is not None:
pulumi.set(__self__, "dhcp_options_set_description", dhcp_options_set_description)
if dhcp_options_set_name is not None:
pulumi.set(__self__, "dhcp_options_set_name", dhcp_options_set_name)
if domain_name is not None:
pulumi.set(__self__, "domain_name", domain_name)
if domain_name_servers is not None:
pulumi.set(__self__, "domain_name_servers", domain_name_servers)
if dry_run is not None:
pulumi.set(__self__, "dry_run", dry_run)
if owner_id is not None:
pulumi.set(__self__, "owner_id", owner_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="associateVpcs")
def associate_vpcs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]]]:
"""
AssociateVpcs. Number of VPCs that can be associated with each DHCP options set is 10.
"""
return pulumi.get(self, "associate_vpcs")
@associate_vpcs.setter
def associate_vpcs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DhcpOptionsSetAssociateVpcArgs']]]]):
pulumi.set(self, "associate_vpcs", value)
@property
@pulumi.getter(name="dhcpOptionsSetDescription")
def dhcp_options_set_description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the DHCP options set. The description must be 2 to 256 characters in length and cannot start with `http://` or `https://`.
"""
return pulumi.get(self, "dhcp_options_set_description")
@dhcp_options_set_description.setter
def dhcp_options_set_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dhcp_options_set_description", value)
@property
@pulumi.getter(name="dhcpOptionsSetName")
def dhcp_options_set_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the DHCP options set. The name must be 2 to 128 characters in length and can contain letters, Chinese characters, digits, underscores (_), and hyphens (-). It must start with a letter or a Chinese character.
"""
return pulumi.get(self, "dhcp_options_set_name")
@dhcp_options_set_name.setter
def dhcp_options_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dhcp_options_set_name", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> Optional[pulumi.Input[str]]:
"""
The root domain, for example, example.com. After a DHCP options set is associated with a Virtual Private Cloud (VPC) network, the root domain in the DHCP options set is automatically synchronized to the ECS instances in the VPC network.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainNameServers")
def domain_name_servers(self) -> Optional[pulumi.Input[str]]:
"""
The DNS server IP addresses. Up to four DNS server IP addresses can be specified. IP addresses must be separated with commas (,).Before you specify any DNS server IP address, all ECS instances in the associated VPC network use the IP addresses of the Alibaba Cloud DNS servers, which are `172.16.58.3` and `192.168.127.12`.
"""
return pulumi.get(self, "domain_name_servers")
@domain_name_servers.setter
def domain_name_servers(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name_servers", value)
@property
@pulumi.getter(name="dryRun")
def dry_run(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether | |
if word in temp_res and not temp_res[word]:
#Python 字典 pop() 方法删除字典给定键 key 及对应的值,返回值为被删除的值。key 值必须给出。 否则,返回 default 值。
temp_res.pop(word)
if word in temp_lda and not temp_lda[word]:
temp_lda.pop(word)
if not temp_res and not temp_lda:
return []
#返回xy(x的y次方) 的值。
cof = math.pow(10e-6, len(words) - max(len(temp_res), len(temp_lda)))
level = math.pow(10e-6, len(words)+1)
rank = self.cal_rank(temp_res,temp_lda,cof)
sortrk = sorted(rank.items(), key=lambda item: item[1], reverse=True)
result = [(r[0],r[1]) for r in sortrk if r[1]>level]
#result [(teacher_id,'权值'),(),..]
return result
class Query_for_institution:
def __init__(self,subs):
# [{"code": '01', "k": 46}, {"code": '02', "k": 98}]
self.subs=subs
#{teacher_id1:{id:xx,name:xxx},...}
self.id_name = pickle.load(open(root + '/InstitutionName', 'rb'))
#self.Subject_for_institution {code1:Subject_for_institution(sub1),sode2:Subject_for_institution2(sub2)}
self.Subject_for_institution={sub['code']:Subject_for_institution(sub,self.id_name) for sub in self.subs}
self.stop=[]
stopword = [line.strip() for line in open('fenci/stopwords.txt', encoding='utf-8').readlines()]
stopword1 = [line.strip() for line in open('fenci/stop_word_4.txt', encoding='utf-8').readlines()]
stopwords = [i.split(':')[0] for i in stopword1]
self.stop.extend(stopword)
self.stop.extend(stopwords)
self.fill = ['vn', 'n', 'nr', 'nr1', 'nr2', 'nrj', 'nrf', 'ns', 'nsf',
'nt', 'nz', 'nl', 'ng']
jieba.load_userdict('fenci/userdict.txt')
def prints(self,result):
# {'0828': [(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)],
# '0829': [],
# '0830': [(126955, 0.0007021102104810927), (68129, 0.00013266169457311943), (22286, 0.00011640344697493587),
# (5818, 1.821814740424121e-05)]}
for code in result:
size=len(result[code])
if size==0:
continue
#教师个数
print("学科:%s,有关学院个数:%d" %(code,size))
teacher=result[code]
print(code)
for t in teacher:
#教师名字:(id:权重)
print(self.id_name[t[0]]["SCHOOL_NAME"]+self.id_name[t[0]]["NAME"]+":"+str(t))
print()
def do_query(self,text,filer):
#将输入内容进行分词
seg_list = pseg.cut(text)
words = []
for word, flag in seg_list:
if flag in self.fill and word not in self.stop:
#是名词且不是停用词,将其纳入搜索列表
words.append(word)
print(words)
if "school" in filer and len(filer["school"])>0:
teacher_id={t for t in self.id_name if self.id_name[t]['SCHOOL_NAME'] in filer['school']}
else:
teacher_id=None
if "name" in filer and len(filer["name"])>0:
if teacher_id:
teacher_id={t for t in teacher_id if self.id_name[t]['name'].find(filer["name"])>=0}
else:
teacher_id = {t for t in self.id_name if self.id_name[t]['name'].find(filer["name"])>=0}
result={}
#teacher_id dict None
for sub in self.Subject_for_institution:
if "code" in filer and len(filer['code'])>0 and sub not in filer['code']:
continue
else:
# self.Subject_for_institution {code1:Subject_for_institution(sub1),sode2:Subject_for_institution2(sub2)}
result[sub]=self.Subject_for_institution[sub].do_query(words,teacher_id)
#result {code:[teacher_id:value,].,code:[],...}
return result
class Subject_for_school():
# subs = [{"code": '0828', "k": 36}, {"code": '0829', "k": 14}]
# self.subs = subs
# self.Subject_for_school = {sub['code']: Subject_for_school(sub, self.id_name) for sub in self.subs}
# self.Subject_for_school = {0828:Subject_for_school({"code": '0828', "k": 36},self.id_name),0829:Subject_for_school({"code": '0829', "k": 14}}
def __init__(self, sub, id_name):
# ({"code": '0828', "k": 36},self.id_name)
self.sub = sub
# self.id_name 字典
self.id_name = id_name
code = self.sub['code']
k = self.sub['k']
print("load:" + code)
self.path = root + '/' + code + '/k' + str(k)
# 词的索引 wordIndex {word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数, col_fre: word[w] / length}, word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数}}
self.lmindex = pickle.load(open(self.path + '/s_wordIndex', 'rb'))
# word和topic的关系 wordToTopic word2topic {'催化剂': {0: 0.104, 1: 0.0, 2: 0.0, 3: 0.0,}
self.ldaword = pickle.load(open(self.path + '/wordToTopic', 'rb'))
# 教师和topic的关系 {teacher1: {topic1: p1, topic2: p2}, ...}
self.ldaexp = pickle.load(open(self.path + '/schoolTopic', 'rb'))
# 教师PageRank评分 {teacher_id:value,...}
self.pagerank = pickle.load(open(self.path + '/schoolRank', 'rb'))
self.cal = 0.9
def cal_lda_one_word(self, word, teacher_id):
'''
没有用
:param word:
:param teacher_id:
:return:
'''
"""计算单个词的专家lda得分"""
# self.ldaword wordToTopi {'催化剂': {0: 0.104, 1: 0.0, 2: 0.0, 3: 0.0, }
ld = self.ldaword.get(word)
# sort {topic_id1:value,...} 筛选出value>1.0e-06 value降序排序
sort = {}
# res {teacher_id1:value,...}
res = {}
# ld '催化剂': {0: 0.104, 1: 0.0, 2: 0.0, 3: 0.0, }
if ld != None:
if teacher_id is not None:
ld = {k: ld[k] for k in ld if k in teacher_id}
# 对字典中的项,进行值升序排序,然后逆序,返回一个列表 [(topic_id1:value),(topic_id2:value),(topic_id3:value),...]
sortld = sorted(ld.items(), key=lambda item: item[1], reverse=True)
a = [r for r in sortld if r[1] > 1.0e-06]
for i in a:
sort[i[0]] = i[1]
for j in sort.keys():
# j是 topic_id
# ldaexp 教师和topic的关系{teacher1: {topic1: p1, topic2: p2}, ...}
for m in self.ldaexp.keys():
# m是teacher_id
if j in self.ldaexp[m]:
# id为m的老师对某主题的值乘这个主题对这个词的值
res[m] = self.ldaexp[m][j] * sort[j]
return res
def cal_one_word(self, word, teacher_id):
'''
:param word:一个被搜索的词
:param teacher_id:
:return:
'''
"""计算单个词的专家语言模型得分"""
# lm = wordIndex {word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数, col_fre: word[w] / length}, word1: {teacher_id1: word1出现的次数 / 总词数,teacher_id1: word1出现的次数 / 总词数}}
lm = self.lmindex.get(word) # type dict
res = {}
# 引入平滑系数
# lm可能为空,因为这是一个学科的倒排索引表,可能没这个单词
if lm != None:
# lm {teacher_id1: word1出现的次数 / 总词数,teacher_id2: word1出现的次数 / 总词数, col_fre: word[w] / length}
if teacher_id is not None:
lm = {k: lm[k] for k in lm if k in teacher_id or k == "col_fre"}
for l in lm.keys():
# l teacher_id
if l != 'col_fre':
res[l] = self.cal * lm[l] + (1 - self.cal) * lm['col_fre']
res['col'] = lm['col_fre']
# res = {'teacher_id':value,...,col:lm['col_fre']}
return res
def cal_rank(self, res, lda, cof):
'''
:param res: {word1:{'teacher_id':value,...,col:lm['col_fre'],...}
:param lda: {word1:{teacher_id1:value,...},...}
:param cof: cof = math.pow(10e-6, len(words) - max(len(temp_res), len(temp_lda)))
:return:
'''
"""计算专家排序"""
rank = {}
# wd是res的key,代表词,res[wd]还是字典,代表词对应的老师及其对该词的值,r是res[wd]中的key,代表老师id
# exp_list [teacher_id1,teacher_id2,...]
exp_list = [r for wd in res.keys() for r in res[wd]]
exp_list = set(exp_list)
if 'col' in exp_list:
# 教师名单,所以去掉cof
exp_list.remove('col')
# rank {'teacher_id1':cof,teacher_id2:cof}
# r teacher_id
for r in exp_list:
rank[r] = cof
# wd word
for wd in res.keys():
if len(res[wd]) != 0:
# 如果res[wd]中有r这个teacher_id,那么给这个
if res[wd].get(r):
rank[r] *= res[wd][r]
else:
rank[r] *= res[wd]['col']
if wd in lda and lda[wd].get(r):
adjust = lda[wd][r]
rank[r] *= adjust
else:
rank[r] *= 10e-6
for wd in lda:
if wd not in res:
rank[r] *= lda[wd][r]
if self.pagerank.get(r) and r in self.id_name and self.id_name[r]['total'] is not None:
print(rank[r])
print("pagerank ", self.pagerank[r])
print("id_name[r] ", self.id_name[r])
print("total ", self.id_name[r]["total"])
rank[r] *= self.pagerank[r] * self.id_name[r]["total"]
return rank
def do_query(self, words, teacher_id):
# words为搜索的关键字集合的列表,teacher_id默认为空
# temp_res {word1:{'teacher_id':value,...,col:lm['col_fre']},word2:{'teacher_id':value,...,col:lm['col_fre']},...}
temp_res = {}
# res {word1:{teacher_id1:value,...},...}
temp_lda = {}
for word in words:
temp_res[word] = self.cal_one_word(word, teacher_id)
temp_lda[word] = self.cal_lda_one_word(word, teacher_id)
for word in words:
if word in temp_res and not temp_res[word]:
# Python 字典 pop() 方法删除字典给定键 key 及对应的值,返回值为被删除的值。key 值必须给出。 否则,返回 default 值。
temp_res.pop(word)
if word in temp_lda and not temp_lda[word]:
temp_lda.pop(word)
if not temp_res and not temp_lda:
return []
# 返回xy(x的y次方) 的值。
cof = math.pow(10e-6, len(words) - max(len(temp_res), len(temp_lda)))
level = math.pow(10e-6, len(words) + 1)
rank = self.cal_rank(temp_res, temp_lda, cof)
sortrk = sorted(rank.items(), key=lambda item: item[1], reverse=True)
result = [(r[0], r[1]) for r in sortrk if r[1] > level]
# result [(teacher_id,'权值'),(),..]
return result
class Query_for_school():
def __init__(self, subs):
# [{"code": '01', "k": 46}, {"code": '02', "k": 98}]
self.subs = subs
# {teacher_id1:{id:xx,name:xxx},...}
self.id_name = pickle.load(open(root + '/SchoolName', 'rb'))
# self.Subject_for_school {code1:Subject_for_school(sub1),sode2:Subject_for_school2(sub2)}
self.Subject_for_school = {sub['code']: Subject_for_school(sub, self.id_name) for sub in self.subs}
self.stop = []
stopword = [line.strip() for line in open('fenci/stopwords.txt', encoding='utf-8').readlines()]
stopword1 = [line.strip() for line in open('fenci/stop_word_4.txt', encoding='utf-8').readlines()]
stopwords = [i.split(':')[0] for i in stopword1]
self.stop.extend(stopword)
self.stop.extend(stopwords)
self.fill = ['vn', 'n', 'nr', 'nr1', 'nr2', 'nrj', 'nrf', 'ns', 'nsf',
'nt', 'nz', 'nl', 'ng']
jieba.load_userdict('fenci/userdict.txt')
def prints(self, result):
# {'0828': [(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)],
# '0829': [],
# '0830': [(126955, 0.0007021102104810927), (68129, 0.00013266169457311943), (22286, 0.00011640344697493587),
# (5818, 1.821814740424121e-05)]}
for code in result:
size = len(result[code])
if size == 0:
continue
# 教师个数
print("学科:%s,有关学校个数:%d" % (code, size))
teacher = result[code]
print(code)
for t in teacher:
# 教师名字:(id:权重)
print(self.id_name[t[0]]["NAME"] + ":" + str(t))
print()
def prints_for_city(self, result, province):
# {'0828': [(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)],
# '0829': [],
# '0830': [(126955, 0.0007021102104810927), (68129, 0.00013266169457311943), (22286, 0.00011640344697493587),
# (5818, 1.821814740424121e-05)]}
city_info = {}
#SchoolAddress {school_id: (procince_id, city_id), }
schoolAddress = pickle.load(open(root + '/schooladdress', 'rb'))
for code in result:
size = len(result[code])
if size == 0:
continue
# 教师个数
print("学科:%s,有关城市个数:%d" % (code, size))
#[(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)]
school_info = result[code]
print(code)
print(school_info)
for t in school_info:
#print(self.id_name[t[0]]["NAME"] + ":" + str(t))
school_id = t[0]
if school_id in schoolAddress:
city_id = schoolAddress[school_id][1]
schoolAddress[school_id][1]
else:
continue
if city_id in city_info:
city_info[city_id] += t[1]
else:
city_info[city_id] = t[1]
city_rank = dict(sorted(city_info.items(), key=lambda x: x[1], reverse=True))
print("city rank ", city_rank)
for city in city_rank:
if city in provinceCity[province]:
print(city+' '+str(city_rank[city]))
print()
def prints_for_province(self, result):
# {'0828': [(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)],
# '0829': [],
# '0830': [(126955, 0.0007021102104810927), (68129, 0.00013266169457311943), (22286, 0.00011640344697493587),
# (5818, 1.821814740424121e-05)]}
province_info = {}
#SchoolAddress {school_id: (procince_id, city_id), }
schoolAddress = pickle.load(open(root + '/schooladdress', 'rb'))
for code in result:
size = len(result[code])
if size == 0:
continue
# 教师个数
print("学科:%s,有关省的个数:%d" % (code, size))
#[(23711, 0.031088879496921837), (23721, 0.003430221466157156), (143479, 0.00010151384288551602)]
school_info = result[code]
print(code)
for t in school_info:
# 学校名字:(id:权重)
#print(self.id_name[t[0]]["NAME"] + | |
can be to stop the loop'
' (raise PulseLoopStop in callback or event_loop_stop() from another thread),'
' doing whatever pulse calls synchronously and then resuming event_listen() loop.' )
self._loop_running, self._loop_stop = True, False
try: yield self._loop
finally:
self._loop_running = False
if self._loop_closed: self.close() # to free() after stopping it
def _pulse_run(self):
with self._pulse_loop() as loop: c.pa.mainloop_run(loop, self._ret)
def _pulse_iterate(self, block=True):
with self._pulse_loop() as loop: c.pa.mainloop_iterate(loop, int(block), self._ret)
@contextmanager
def _pulse_op_cb(self, raw=False):
act_id = next(self._action_ids)
self._actions[act_id] = None
try:
cb = lambda s=True,k=act_id: self._actions.update({k: bool(s)})
if not raw: cb = c.PA_CONTEXT_SUCCESS_CB_T(lambda ctx,s,d,cb=cb: cb(s))
yield cb
while self.connected and self._actions[act_id] is None: self._pulse_iterate()
if not self._actions[act_id]: raise PulseOperationFailed(act_id)
finally: self._actions.pop(act_id, None)
def _pulse_poll(self, timeout=None):
'''timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.'''
with self._pulse_loop() as loop:
ts = c.mono_time()
ts_deadline = timeout and (ts + timeout)
while True:
delay = max(0, int((ts_deadline - ts) * 1000)) if ts_deadline else -1
c.pa.mainloop_prepare(loop, delay) # usec
c.pa.mainloop_poll(loop)
if self._loop_closed: break # interrupted by close() or such
c.pa.mainloop_dispatch(loop)
if self._loop_stop: break
ts = c.mono_time()
if ts_deadline and ts >= ts_deadline: break
def _pulse_info_cb(self, info_cls, data_list, done_cb, ctx, info, eof, userdata):
# No idea where callbacks with "userdata != NULL" come from,
# but "info" pointer in them is always invalid, so they are discarded here.
# Looks like some kind of mixup or corruption in libpulse memory?
# See also: https://github.com/mk-fg/python-pulse-control/issues/35
if userdata is not None: return
# Empty result list and conn issues are checked elsewhere.
# Errors here are non-descriptive (errno), so should not be useful anyway.
# if eof < 0: done_cb(s=False)
if eof: done_cb()
else: data_list.append(info_cls(info[0]))
def _pulse_get_list(cb_t, pulse_func, info_cls, singleton=False, index_arg=True):
def _wrapper_method(self, index=None):
data = list()
with self._pulse_op_cb(raw=True) as cb:
cb = cb_t(
ft.partial(self._pulse_info_cb, info_cls, data, cb) if not singleton else
lambda ctx, info, userdata, cb=cb: data.append(info_cls(info[0])) or cb() )
pa_op = pulse_func( self._ctx,
*([index, cb, None] if index is not None else [cb, None]) )
c.pa.operation_unref(pa_op)
data = data or list()
if index is not None or singleton:
if not data: raise PulseIndexError(index)
data, = data
return data
_wrapper_method.__name__ = '...'
_wrapper_method.__doc__ = 'Signature: func({})'.format(
'' if pulse_func.__name__.endswith('_list') or singleton or not index_arg else 'index' )
return _wrapper_method
get_sink_by_name = _pulse_get_list(
c.PA_SINK_INFO_CB_T,
c.pa.context_get_sink_info_by_name, PulseSinkInfo )
get_source_by_name = _pulse_get_list(
c.PA_SOURCE_INFO_CB_T,
c.pa.context_get_source_info_by_name, PulseSourceInfo )
get_card_by_name = _pulse_get_list(
c.PA_CARD_INFO_CB_T,
c.pa.context_get_card_info_by_name, PulseCardInfo )
sink_input_list = _pulse_get_list(
c.PA_SINK_INPUT_INFO_CB_T,
c.pa.context_get_sink_input_info_list, PulseSinkInputInfo )
sink_input_info = _pulse_get_list(
c.PA_SINK_INPUT_INFO_CB_T,
c.pa.context_get_sink_input_info, PulseSinkInputInfo )
source_output_list = _pulse_get_list(
c.PA_SOURCE_OUTPUT_INFO_CB_T,
c.pa.context_get_source_output_info_list, PulseSourceOutputInfo )
source_output_info = _pulse_get_list(
c.PA_SOURCE_OUTPUT_INFO_CB_T,
c.pa.context_get_source_output_info, PulseSourceOutputInfo )
sink_list = _pulse_get_list(
c.PA_SINK_INFO_CB_T, c.pa.context_get_sink_info_list, PulseSinkInfo )
sink_info = _pulse_get_list(
c.PA_SINK_INFO_CB_T, c.pa.context_get_sink_info_by_index, PulseSinkInfo )
source_list = _pulse_get_list(
c.PA_SOURCE_INFO_CB_T, c.pa.context_get_source_info_list, PulseSourceInfo )
source_info = _pulse_get_list(
c.PA_SOURCE_INFO_CB_T, c.pa.context_get_source_info_by_index, PulseSourceInfo )
card_list = _pulse_get_list(
c.PA_CARD_INFO_CB_T, c.pa.context_get_card_info_list, PulseCardInfo )
card_info = _pulse_get_list(
c.PA_CARD_INFO_CB_T, c.pa.context_get_card_info_by_index, PulseCardInfo )
client_list = _pulse_get_list(
c.PA_CLIENT_INFO_CB_T, c.pa.context_get_client_info_list, PulseClientInfo )
client_info = _pulse_get_list(
c.PA_CLIENT_INFO_CB_T, c.pa.context_get_client_info, PulseClientInfo )
server_info = _pulse_get_list(
c.PA_SERVER_INFO_CB_T, c.pa.context_get_server_info, PulseServerInfo, singleton=True )
module_info = _pulse_get_list(
c.PA_MODULE_INFO_CB_T, c.pa.context_get_module_info, PulseModuleInfo )
module_list = _pulse_get_list(
c.PA_MODULE_INFO_CB_T, c.pa.context_get_module_info_list, PulseModuleInfo )
def _pulse_method_call(pulse_op, func=None, index_arg=True):
'''Creates following synchronous wrapper for async pa_operation callable:
wrapper(index, ...) -> pulse_op(index, [*]args_func(...))
index_arg=False: wrapper(...) -> pulse_op([*]args_func(...))'''
def _wrapper(self, *args, **kws):
if index_arg:
if 'index' in kws: index = kws.pop('index')
else: index, args = args[0], args[1:]
pulse_args = func(*args, **kws) if func else list()
if not is_list(pulse_args): pulse_args = [pulse_args]
if index_arg: pulse_args = [index] + list(pulse_args)
with self._pulse_op_cb() as cb:
try: pulse_op(self._ctx, *(list(pulse_args) + [cb, None]))
except c.ArgumentError as err: raise TypeError(err.args)
except c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])
func_args = list(inspect.getargspec(func or (lambda: None)))
func_args[0] = list(func_args[0])
if index_arg: func_args[0] = ['index'] + func_args[0]
_wrapper.__name__ = '...'
_wrapper.__doc__ = 'Signature: func' + inspect.formatargspec(*func_args)
if func.__doc__: _wrapper.__doc__ += '\n\n' + func.__doc__
return _wrapper
card_profile_set_by_index = _pulse_method_call(
c.pa.context_set_card_profile_by_index, lambda profile_name: profile_name )
sink_default_set = _pulse_method_call(
c.pa.context_set_default_sink, index_arg=False,
func=lambda sink: sink.name if isinstance(sink, PulseSinkInfo) else sink )
source_default_set = _pulse_method_call(
c.pa.context_set_default_source, index_arg=False,
func=lambda source: source.name if isinstance(source, PulseSourceInfo) else source )
sink_input_mute = _pulse_method_call(
c.pa.context_set_sink_input_mute, lambda mute=True: mute )
sink_input_move = _pulse_method_call(
c.pa.context_move_sink_input_by_index, lambda sink_index: sink_index )
sink_mute = _pulse_method_call(
c.pa.context_set_sink_mute_by_index, lambda mute=True: mute )
sink_input_volume_set = _pulse_method_call(
c.pa.context_set_sink_input_volume, lambda vol: vol.to_struct() )
sink_volume_set = _pulse_method_call(
c.pa.context_set_sink_volume_by_index, lambda vol: vol.to_struct() )
sink_suspend = _pulse_method_call(
c.pa.context_suspend_sink_by_index, lambda suspend=True: suspend )
sink_port_set = _pulse_method_call(
c.pa.context_set_sink_port_by_index,
lambda port: port.name if isinstance(port, PulsePortInfo) else port )
source_output_mute = _pulse_method_call(
c.pa.context_set_source_output_mute, lambda mute=True: mute )
source_output_move = _pulse_method_call(
c.pa.context_move_source_output_by_index, lambda sink_index: sink_index )
source_mute = _pulse_method_call(
c.pa.context_set_source_mute_by_index, lambda mute=True: mute )
source_output_volume_set = _pulse_method_call(
c.pa.context_set_source_output_volume, lambda vol: vol.to_struct() )
source_volume_set = _pulse_method_call(
c.pa.context_set_source_volume_by_index, lambda vol: vol.to_struct() )
source_suspend = _pulse_method_call(
c.pa.context_suspend_source_by_index, lambda suspend=True: suspend )
source_port_set = _pulse_method_call(
c.pa.context_set_source_port_by_index,
lambda port: port.name if isinstance(port, PulsePortInfo) else port )
def module_load(self, name, args=''):
if is_list(args): args = ' '.join(args)
name, args = map(c.force_bytes, [name, args])
data = list()
with self._pulse_op_cb(raw=True) as cb:
cb = c.PA_CONTEXT_INDEX_CB_T(
lambda ctx, index, userdata, cb=cb: data.append(index) or cb() )
try: c.pa.context_load_module(self._ctx, name, args, cb, None)
except c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])
index, = data
return index
module_unload = _pulse_method_call(c.pa.context_unload_module, None)
def stream_restore_test(self):
'Returns module-stream-restore version int (e.g. 1) or None if it is unavailable.'
data = list()
with self._pulse_op_cb(raw=True) as cb:
cb = c.PA_EXT_STREAM_RESTORE_TEST_CB_T(
lambda ctx, version, userdata, cb=cb: data.append(version) or cb() )
try: c.pa.ext_stream_restore_test(self._ctx, cb, None)
except c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])
version, = data
return version if version != c.PA_INVALID else None
stream_restore_read = _pulse_get_list(
c.PA_EXT_STREAM_RESTORE_READ_CB_T,
c.pa.ext_stream_restore_read, PulseExtStreamRestoreInfo, index_arg=False )
stream_restore_list = stream_restore_read # for consistency with other *_list methods
@ft.partial(_pulse_method_call, c.pa.ext_stream_restore_write, index_arg=False)
def stream_restore_write( obj_name_or_list,
mode='merge', apply_immediately=False, **obj_kws ):
'''Update module-stream-restore db entry for specified name.
Can be passed PulseExtStreamRestoreInfo object or list of them as argument,
or name string there and object init keywords (e.g. volume, mute, channel_list, etc).
"mode" is PulseUpdateEnum value of
'merge' (default), 'replace' or 'set' (replaces ALL entries!!!).'''
mode = PulseUpdateEnum[mode]._c_val
if is_str(obj_name_or_list):
obj_name_or_list = PulseExtStreamRestoreInfo(obj_name_or_list, **obj_kws)
if isinstance(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
# obj_array is an array of structs, laid out contiguously in memory, not pointers
obj_array = (c.PA_EXT_STREAM_RESTORE_INFO * len(obj_name_or_list))()
for n, obj in enumerate(obj_name_or_list):
obj_struct, dst_struct = obj.to_struct(), obj_array[n]
for k,t in obj_struct._fields_: setattr(dst_struct, k, getattr(obj_struct, k))
return mode, obj_array, len(obj_array), int(bool(apply_immediately))
@ft.partial(_pulse_method_call, c.pa.ext_stream_restore_delete, index_arg=False)
def stream_restore_delete(obj_name_or_list):
'''Can be passed string name,
PulseExtStreamRestoreInfo object or a list of any of these.'''
if is_str(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
name_list = list((obj.name if isinstance( obj,
PulseExtStreamRestoreInfo ) else obj) for obj in obj_name_or_list)
name_struct = (c.c_char_p * len(name_list))()
name_struct[:] = list(map(c.force_bytes, name_list))
return [name_struct]
def default_set(self, obj):
'Set passed sink or source to be used as default one by pulseaudio server.'
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_default_set,
PulseSourceInfo: self.source_default_set }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj)
def mute(self, obj, mute=True):
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_mute,
PulseSinkInputInfo: self.sink_input_mute,
PulseSourceInfo: self.source_mute,
PulseSourceOutputInfo: self.source_output_mute }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj.index, mute)
obj.mute = mute
def port_set(self, obj, port):
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_port_set,
PulseSourceInfo: self.source_port_set }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj.index, port)
obj.port_active = port
def card_profile_set(self, card, profile):
assert_pulse_object(card)
if is_str(profile):
profile_dict = dict((p.name, p) for p in card.profile_list)
if profile not in profile_dict:
raise PulseIndexError( 'Card does not have'
' profile with specified name: {!r}'.format(profile) )
profile = profile_dict[profile]
self.card_profile_set_by_index(card.index, profile.name)
card.profile_active = profile
def volume_set(self, obj, vol):
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_volume_set,
PulseSinkInputInfo: self.sink_input_volume_set,
PulseSourceInfo: self.source_volume_set,
PulseSourceOutputInfo: self.source_output_volume_set }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj.index, vol)
obj.volume = vol
def volume_set_all_chans(self, obj, vol):
assert_pulse_object(obj)
obj.volume.value_flat = vol
self.volume_set(obj, obj.volume)
def volume_change_all_chans(self, obj, inc):
assert_pulse_object(obj)
obj.volume.values = [max(0, v + inc) for v in obj.volume.values]
self.volume_set(obj, obj.volume)
def volume_get_all_chans(self, obj):
# Purpose of this func can be a bit confusing, being here next to set/change ones
'''Get "flat" volume float value for info-object as a mean of all channel values.
Note that this DOES NOT query any kind of updated values from libpulse,
and simply returns value(s) stored in passed object, i.e. same ones for same object.'''
assert_pulse_object(obj)
return obj.volume.value_flat
def event_mask_set(self, *masks):
mask = 0
for m in masks: mask |= PulseEventMaskEnum[m]._c_val
with self._pulse_op_cb() as cb:
c.pa.context_subscribe(self._ctx, mask, cb, None)
def event_callback_set(self, func):
'''Call event_listen() to start receiving these,
and be sure to raise PulseLoopStop in a callback to stop the loop.
Passing None will disable the thing.'''
self.event_callback = func
def event_listen(self, timeout=None, raise_on_disconnect=True):
'''Does not return until PulseLoopStop
gets raised in event callback or timeout passes.
timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.
raise_on_disconnect causes PulseDisconnected exceptions by default.
Do not run any pulse operations from these callbacks.'''
assert self.event_callback
try: self._pulse_poll(timeout)
except c.pa.CallError: pass # e.g. from mainloop_dispatch() on disconnect
if raise_on_disconnect and not self.connected: raise PulseDisconnected()
def event_listen_stop(self):
'''Stop event_listen() loop from e.g. another thread.
Does nothing if libpulse poll is not running yet, so might be racey with
event_listen() - be sure to call it in a loop until event_listen returns or something.'''
self._loop_stop = True
c.pa.mainloop_wakeup(self._loop)
def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None)
def get_peak_sample(self, source, timeout, stream_idx=None):
'''Returns peak (max) value in 0-1.0 range for samples in source/stream within timespan.
"source" can be either int index of pulseaudio source
(i.e. source.index), its name (source.name), or None to use default source.
Resulting value is what pulseaudio returns as
PA_SAMPLE_FLOAT32BE float after "timeout" seconds.
If specified source does not | |
<filename>common/gamesrv.py
from __future__ import generators
from socket import *
from select import select
from struct import pack, unpack
import zlib, os, random, struct, md5, sys
from time import time, ctime
from msgstruct import *
from errno import EWOULDBLOCK
SERVER_TIMEOUT = 7200 # 2 hours without any connection or port activity
def protofilepath(filename):
dirpath = filename
path = []
while dirpath:
dirpath, component = os.path.split(dirpath)
assert component, "invalid file path %r" % (filename,)
path.insert(0, component)
path.insert(0, game.FnBasePath)
return '/'.join(path)
class Icon:
count = 0
def __init__(self, bitmap, code, x,y,w,h, alpha=255):
self.w = w
self.h = h
self.origin = (bitmap, x, y)
self.code = code
if alpha == 255:
self.msgdef = message(MSG_DEF_ICON, bitmap.code, code, x,y,w,h)
else:
self.msgdef = message(MSG_DEF_ICON, bitmap.code, code, x,y,w,h, alpha)
framemsgappend(self.msgdef)
def getimage(self):
import pixmap
bitmap, x, y = self.origin
image = pixmap.decodepixmap(bitmap.read())
return pixmap.cropimage(image, (x, y, self.w, self.h))
def getorigin(self):
bitmap, x, y = self.origin
return bitmap, (x, y, self.w, self.h)
class DataChunk:
def __init__(self):
for c in clients:
if c.initialized == 2:
self.defall(c)
if recording and game:
self.defall(recording)
def read(self, slice=None):
f = open(self.filename, "rb")
data = f.read()
f.close()
if slice:
start, length = slice
data = data[start:start+length]
return data
def defall(self, client):
if client.proto == 1 or not self.filename:
# protocol 1
try:
msgdef = self.msgdef
except AttributeError:
data = zlib.compress(self.read())
msgdef = self.msgdef = self.getmsgdef(data)
else:
# protocol >= 2
try:
msgdef = self.sendmsgdef
except AttributeError:
fileid = len(filereaders)
filereaders[fileid] = self.read
data = self.read()
msgdef = self.sendmsgdef = (self.getmd5def(fileid, data) +
self.getmsgdef(fileid))
client.msgl.append(msgdef)
def getmd5def(self, fileid, data, offset=0):
checksum = md5.new(data).digest()
return message(MSG_MD5_FILE, fileid, protofilepath(self.filename),
offset, len(data), checksum)
class Bitmap(DataChunk):
def __init__(self, code, filename, colorkey=None):
self.code = code
self.filename = filename
self.icons = {}
self.colorkey = colorkey
DataChunk.__init__(self)
def geticon(self, x,y,w,h, alpha=255):
rect = (x,y,w,h)
try:
return self.icons[rect]
except:
ico = Icon(self, Icon.count, x,y,w,h, alpha)
Icon.count += 1
self.icons[rect] = ico
return ico
def geticonlist(self, w, h, count):
return map(lambda i, fn=self.geticon, w=w, h=h: fn(i*w, 0, w, h), range(count))
def getmsgdef(self, data):
if self.colorkey is not None:
return message(MSG_DEF_BITMAP, self.code, data, self.colorkey)
else:
return message(MSG_DEF_BITMAP, self.code, data)
def defall(self, client):
DataChunk.defall(self, client)
for i in self.icons.values():
client.msgl.append(i.msgdef)
class MemoryBitmap(Bitmap):
def __init__(self, code, data, colorkey=None):
self.data = data
Bitmap.__init__(self, code, None, colorkey)
def read(self, slice=None):
data = self.data
if slice:
start, length = slice
data = data[start:start+length]
return data
class Sample(DataChunk):
def __init__(self, code, filename, freqfactor=1):
self.code = code
self.filename = filename
self.freqfactor = freqfactor
DataChunk.__init__(self)
def defall(self, client):
if client.has_sound > 0:
DataChunk.defall(self, client)
def getmsgdef(self, data):
return message(MSG_DEF_SAMPLE, self.code, data)
def read(self, slice=None):
f = open(self.filename, "rb")
data = f.read()
f.close()
if self.freqfactor != 1:
freq, = unpack("<i", data[24:28])
freq = int(freq * self.freqfactor)
data = data[:24] + pack("<i", freq) + data[28:]
if slice:
start, length = slice
data = data[start:start+length]
return data
def getmd5def(self, fileid, data):
if self.freqfactor == 1:
return DataChunk.getmd5def(self, fileid, data)
else:
datahead = data[:28]
datatail = data[28:]
return (message(MSG_PATCH_FILE, fileid, 0, datahead) +
DataChunk.getmd5def(self, fileid, datatail, offset=28))
def play(self, lvolume=1.0, rvolume=None, pad=0.5, singleclient=None):
if rvolume is None:
rvolume = lvolume
lvolume *= 2.0*(1.0-pad)
rvolume *= 2.0*pad
if lvolume < 0.0:
lvolume = 0.0
elif lvolume > 1.0:
lvolume = 1.0
if rvolume < 0.0:
rvolume = 0.0
elif rvolume > 1.0:
rvolume = 1.0
message = pack("!hBBh", self.code, int(lvolume*255.0),
int(rvolume*255.0), -1)
if singleclient is None:
clist = clients[:]
else:
clist = [singleclient]
for c in clist:
if c.has_sound:
c.sounds.setdefault(message, 4)
class Music(DataChunk):
def __init__(self, filename, filerate=44100):
self.filename = filename
self.filerate = filerate
self.f = open(filename, 'rb')
self.f.seek(0, 2)
filesize = self.f.tell()
self.endpos = max(self.filerate, filesize - self.filerate)
self.fileid = len(filereaders)
filereaders[self.fileid] = self.read
self.md5msgs = {}
DataChunk.__init__(self)
def read(self, (start, length)):
self.f.seek(start)
return self.f.read(length)
def msgblock(self, position, limited=1):
blocksize = self.filerate
if limited and position+blocksize > self.endpos:
blocksize = self.endpos-position
if blocksize <= 0:
return ''
#self.f.seek(position)
#return message(MSG_DEF_MUSIC, self.code, position, self.f.read(blocksize))
try:
msg = self.md5msgs[position]
except KeyError:
data = self.read((position, blocksize))
checksum = md5.new(data).digest()
msg = message(MSG_MD5_FILE, self.fileid, protofilepath(self.filename),
position, blocksize, checksum)
self.md5msgs[position] = msg
return msg
def clientsend(self, clientpos):
msg = self.msgblock(clientpos)
#print 'clientsend:', self.code, len(msg), clientpos
if msg:
return [msg], clientpos + self.filerate
else:
return [], None
def initialsend(self, c):
return [self.msgblock(0), self.msgblock(self.endpos, 0)], self.filerate
def defall(self, client):
pass
def clearsprites():
sprites_by_n.clear()
sprites[:] = ['']
def compactsprites(insert_new=None, insert_before=None):
global sprites, sprites_by_n
if insert_before is not None:
if insert_new.alive:
insert_before = insert_before.alive
else:
insert_before = None
newsprites = ['']
newd = {}
l = sprites_by_n.items()
l.sort()
for n, s in l:
if n == insert_before:
prevn = insert_new.alive
newn = insert_new.alive = len(newsprites)
newsprites.append(sprites[prevn])
newd[newn] = insert_new
l.remove((prevn, insert_new))
newn = s.alive = len(newsprites)
newsprites.append(sprites[n])
newd[newn] = s
sprites = newsprites
sprites_by_n = newd
class Sprite:
def __init__(self, ico, x,y):
self.x = x
self.y = y
self.ico = ico
self.alive = len(sprites)
if (-ico.w < x < game.width and
-ico.h < y < game.height):
sprites.append(pack("!hhh", x, y, ico.code))
else:
sprites.append('') # starts off-screen
sprites_by_n[self.alive] = self
def move(self, x,y, ico=None):
self.x = x
self.y = y
if ico is not None:
self.ico = ico
sprites[self.alive] = pack("!hhh", x, y, self.ico.code)
def setdisplaypos(self, x, y):
# special use only (self.x,y are not updated)
s = sprites[self.alive]
if len(s) == 6:
sprites[self.alive] = pack("!hh", x, y) + s[4:]
def setdisplayicon(self, ico):
# special use only (self.ico is not updated)
s = sprites[self.alive]
if len(s) == 6:
sprites[self.alive] = s[:4] + pack("!h", ico.code)
#sizeof_displaypos = struct.calcsize("!hh")
def getdisplaypos(self):
# special use only (normally, read self.x,y,ico directly)
s = sprites[self.alive]
if self.alive and len(s) == 6:
return unpack("!hh", s[:4])
else:
return None, None
def step(self, dx,dy):
x = self.x = self.x + dx
y = self.y = self.y + dy
sprites[self.alive] = pack("!hhh", x, y, self.ico.code)
def seticon(self, ico):
self.ico = ico
sprites[self.alive] = pack("!hhh", self.x, self.y, ico.code)
def hide(self):
sprites[self.alive] = ''
def kill(self):
if self.alive:
del sprites_by_n[self.alive]
sprites[self.alive] = ''
self.alive = 0
def prefix(self, n, m=0):
pass #sprites[self.alive] = pack("!hhh", n, m, 32767) + sprites[self.alive]
def to_front(self):
if self.alive and self.alive < len(sprites)-1:
self._force_to_front()
def _force_to_front(self):
info = sprites[self.alive]
sprites[self.alive] = ''
del sprites_by_n[self.alive]
self.alive = len(sprites)
sprites_by_n[self.alive] = self
sprites.append(info)
def to_back(self, limit=None):
assert self is not limit
if limit:
n1 = limit.alive + 1
else:
n1 = 1
if self.alive > n1:
if n1 in sprites_by_n:
keys = sprites_by_n.keys()
keys.remove(self.alive)
keys.sort()
keys = keys[keys.index(n1):]
reinsert = [sprites_by_n[n] for n in keys]
for s1 in reinsert:
s1._force_to_front()
assert n1 not in sprites_by_n
info = sprites[self.alive]
sprites[self.alive] = ''
del sprites_by_n[self.alive]
self.alive = n1
sprites_by_n[n1] = self
sprites[n1] = info
def __repr__(self):
if self.alive:
return "<sprite %d at %d,%d>" % (self.alive, self.x, self.y)
else:
return "<killed sprite>"
class Player:
standardplayericon = None
def playerjoin(self):
pass
def playerleaves(self):
pass
def _playerleaves(self):
if self.isplaying():
self._client.killplayer(self)
del self._client
self.playerleaves()
def isplaying(self):
return hasattr(self, "_client")
class Client:
SEND_BOUND_PER_FRAME = 0x6000 # bytes
KEEP_ALIVE = 2.2 # seconds
def __init__(self, socket, addr):
socket.setblocking(0)
self.socket = socket
self.addr = addr
self.udpsocket = None
self.udpsockcounter = 0
self.initialdata = MSG_WELCOME
self.initialized = 0
self.msgl = [message(MSG_PING)]
self.buf = ""
self.players = { }
self.sounds = None
self.has_sound = 0
self.has_music = 0
self.musicpos = { }
self.proto = 1
self.dyncompress = None
addsocket('CLIENT', self.socket, self.input_handler)
clients.append(self)
self.log('connected')
self.send_buffer(self.initialdata)
def opengame(self, game):
if self.initialized == 0:
self.initialdata += game.FnDesc + '\n'
self.initialized = 1
if self.initialized == 1:
if game.broadcast_port:
self.initialdata += message(MSG_BROADCAST_PORT, game.broadcast_port)
game.trigger_broadcast()
self.initialdata += game.deffieldmsg()
else:
self.msgl.append(game.deffieldmsg())
self.activity = self.last_ping = time()
self.force_ping_delay = 0.6
for c in clients:
for id in c.players.keys():
self.msgl.append(message(MSG_PLAYER_JOIN, id, c is self))
def emit(self, udpdata, broadcast_extras):
if self.initialdata:
self.send_buffer(self.initialdata)
elif self.initialized == 2:
buffer = ''.join(self.msgl)
if buffer:
self.send_buffer(buffer)
if self.udpsocket is not None:
if self.sounds:
if broadcast_extras is None or self not in broadcast_clients:
udpdata = ''.join(self.sounds.keys() + [udpdata])
else:
broadcast_extras.update(self.sounds)
for key, value in self.sounds.items():
if value:
self.sounds[key] = value-1
else:
del self.sounds[key]
if broadcast_extras is None or self not in broadcast_clients:
if self.dyncompress is | |
<gh_stars>1-10
#!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Event handling module.
@see: U{http://apps.sourceforge.net/trac/winappdbg/wiki/Debugging}
@group Debugging:
EventHandler, EventSift
@group Debug events:
EventFactory,
EventDispatcher,
Event,
NoEvent,
CreateProcessEvent,
CreateThreadEvent,
ExitProcessEvent,
ExitThreadEvent,
LoadDLLEvent,
UnloadDLLEvent,
OutputDebugStringEvent,
RIPEvent,
ExceptionEvent
@group Warnings:
EventCallbackWarning
"""
__revision__ = "$Id$"
__all__ = [
# Factory of Event objects and all of it's subclasses.
# Users should not need to instance Event objects directly.
'EventFactory',
# Event dispatcher used internally by the Debug class.
'EventDispatcher',
# Base classes for user-defined event handlers.
'EventHandler',
'EventSift',
# Warning for uncaught exceptions on event callbacks.
'EventCallbackWarning',
# Dummy event object that can be used as a placeholder.
# It's never returned by the EventFactory.
'NoEvent',
# Base class for event objects.
'Event',
# Event objects.
'CreateProcessEvent',
'CreateThreadEvent',
'ExitProcessEvent',
'ExitThreadEvent',
'LoadDLLEvent',
'UnloadDLLEvent',
'OutputDebugStringEvent',
'RIPEvent',
'ExceptionEvent'
]
from winappdbg import win32
from winappdbg import compat
from winappdbg.win32 import FileHandle, ProcessHandle, ThreadHandle
from winappdbg.breakpoint import ApiHook
from winappdbg.module import Module
from winappdbg.thread import Thread
from winappdbg.process import Process
from winappdbg.textio import HexDump
from winappdbg.util import StaticClass, PathOperations
import sys
import ctypes
import warnings
import traceback
#==============================================================================
class EventCallbackWarning (RuntimeWarning):
"""
This warning is issued when an uncaught exception was raised by a
user-defined event handler.
"""
#==============================================================================
class Event (object):
"""
Event object.
@type eventMethod: str
@cvar eventMethod:
Method name to call when using L{EventHandler} subclasses.
Used internally.
@type eventName: str
@cvar eventName:
User-friendly name of the event.
@type eventDescription: str
@cvar eventDescription:
User-friendly description of the event.
@type debug: L{Debug}
@ivar debug:
Debug object that received the event.
@type raw: L{DEBUG_EVENT}
@ivar raw:
Raw DEBUG_EVENT structure as used by the Win32 API.
@type continueStatus: int
@ivar continueStatus:
Continue status to pass to L{win32.ContinueDebugEvent}.
"""
eventMethod = 'unknown_event'
eventName = 'Unknown event'
eventDescription = 'A debug event of an unknown type has occured.'
def __init__(self, debug, raw):
"""
@type debug: L{Debug}
@param debug: Debug object that received the event.
@type raw: L{DEBUG_EVENT}
@param raw: Raw DEBUG_EVENT structure as used by the Win32 API.
"""
self.debug = debug
self.raw = raw
self.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
## @property
## def debug(self):
## """
## @rtype debug: L{Debug}
## @return debug:
## Debug object that received the event.
## """
## return self.__debug()
def get_event_name(self):
"""
@rtype: str
@return: User-friendly name of the event.
"""
return self.eventName
def get_event_description(self):
"""
@rtype: str
@return: User-friendly description of the event.
"""
return self.eventDescription
def get_event_code(self):
"""
@rtype: int
@return: Debug event code as defined in the Win32 API.
"""
return self.raw.dwDebugEventCode
## # Compatibility with version 1.0
## # XXX to be removed in version 1.4
## def get_code(self):
## """
## Alias of L{get_event_code} for backwards compatibility
## with WinAppDbg version 1.0.
## Will be phased out in the next version.
##
## @rtype: int
## @return: Debug event code as defined in the Win32 API.
## """
## return self.get_event_code()
def get_pid(self):
"""
@see: L{get_process}
@rtype: int
@return: Process global ID where the event occured.
"""
return self.raw.dwProcessId
def get_tid(self):
"""
@see: L{get_thread}
@rtype: int
@return: Thread global ID where the event occured.
"""
return self.raw.dwThreadId
def get_process(self):
"""
@see: L{get_pid}
@rtype: L{Process}
@return: Process where the event occured.
"""
pid = self.get_pid()
system = self.debug.system
if system.has_process(pid):
process = system.get_process(pid)
else:
# XXX HACK
# The process object was missing for some reason, so make a new one.
process = Process(pid)
system._add_process(process)
## process.scan_threads() # not needed
process.scan_modules()
return process
def get_thread(self):
"""
@see: L{get_tid}
@rtype: L{Thread}
@return: Thread where the event occured.
"""
tid = self.get_tid()
process = self.get_process()
if process.has_thread(tid):
thread = process.get_thread(tid)
else:
# XXX HACK
# The thread object was missing for some reason, so make a new one.
thread = Thread(tid)
process._add_thread(thread)
return thread
#==============================================================================
class NoEvent (Event):
"""
No event.
Dummy L{Event} object that can be used as a placeholder when no debug
event has occured yet. It's never returned by the L{EventFactory}.
"""
eventMethod = 'no_event'
eventName = 'No event'
eventDescription = 'No debug event has occured.'
def __init__(self, debug, raw = None):
Event.__init__(self, debug, raw)
def __len__(self):
"""
Always returns C{0}, so when evaluating the object as a boolean it's
always C{False}. This prevents L{Debug.cont} from trying to continue
a dummy event.
"""
return 0
def get_event_code(self):
return -1
def get_pid(self):
return -1
def get_tid(self):
return -1
def get_process(self):
return Process(self.get_pid())
def get_thread(self):
return Thread(self.get_tid())
#==============================================================================
class ExceptionEvent (Event):
"""
Exception event.
@type exceptionName: dict( int S{->} str )
@cvar exceptionName:
Mapping of exception constants to their names.
@type exceptionDescription: dict( int S{->} str )
@cvar exceptionDescription:
Mapping of exception constants to user-friendly strings.
@type breakpoint: L{Breakpoint}
@ivar breakpoint:
If the exception was caused by one of our breakpoints, this member
contains a reference to the breakpoint object. Otherwise it's not
defined. It should only be used from the condition or action callback
routines, instead of the event handler.
@type hook: L{Hook}
@ivar hook:
If the exception was caused by a function hook, this member contains a
reference to the hook object. Otherwise it's not defined. It should
only be used from the hook callback routines, instead of the event
handler.
"""
eventName = 'Exception event'
eventDescription = 'An exception was raised by the debugee.'
__exceptionMethod = {
win32.EXCEPTION_ACCESS_VIOLATION : 'access_violation',
win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED : 'array_bounds_exceeded',
win32.EXCEPTION_BREAKPOINT : 'breakpoint',
win32.EXCEPTION_DATATYPE_MISALIGNMENT : 'datatype_misalignment',
win32.EXCEPTION_FLT_DENORMAL_OPERAND : 'float_denormal_operand',
win32.EXCEPTION_FLT_DIVIDE_BY_ZERO : 'float_divide_by_zero',
win32.EXCEPTION_FLT_INEXACT_RESULT : 'float_inexact_result',
win32.EXCEPTION_FLT_INVALID_OPERATION : 'float_invalid_operation',
win32.EXCEPTION_FLT_OVERFLOW : 'float_overflow',
win32.EXCEPTION_FLT_STACK_CHECK : 'float_stack_check',
win32.EXCEPTION_FLT_UNDERFLOW : 'float_underflow',
win32.EXCEPTION_ILLEGAL_INSTRUCTION : 'illegal_instruction',
win32.EXCEPTION_IN_PAGE_ERROR : 'in_page_error',
win32.EXCEPTION_INT_DIVIDE_BY_ZERO : 'integer_divide_by_zero',
win32.EXCEPTION_INT_OVERFLOW : 'integer_overflow',
win32.EXCEPTION_INVALID_DISPOSITION : 'invalid_disposition',
win32.EXCEPTION_NONCONTINUABLE_EXCEPTION : 'noncontinuable_exception',
win32.EXCEPTION_PRIV_INSTRUCTION : 'privileged_instruction',
win32.EXCEPTION_SINGLE_STEP : 'single_step',
win32.EXCEPTION_STACK_OVERFLOW : 'stack_overflow',
win32.EXCEPTION_GUARD_PAGE : 'guard_page',
win32.EXCEPTION_INVALID_HANDLE : 'invalid_handle',
win32.EXCEPTION_POSSIBLE_DEADLOCK : 'possible_deadlock',
win32.EXCEPTION_WX86_BREAKPOINT : 'wow64_breakpoint',
win32.CONTROL_C_EXIT : 'control_c_exit',
win32.DBG_CONTROL_C : 'debug_control_c',
win32.MS_VC_EXCEPTION : 'ms_vc_exception',
}
__exceptionName = {
win32.EXCEPTION_ACCESS_VIOLATION : 'EXCEPTION_ACCESS_VIOLATION',
win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED : 'EXCEPTION_ARRAY_BOUNDS_EXCEEDED',
win32.EXCEPTION_BREAKPOINT : 'EXCEPTION_BREAKPOINT',
win32.EXCEPTION_DATATYPE_MISALIGNMENT : 'EXCEPTION_DATATYPE_MISALIGNMENT',
win32.EXCEPTION_FLT_DENORMAL_OPERAND : 'EXCEPTION_FLT_DENORMAL_OPERAND',
win32.EXCEPTION_FLT_DIVIDE_BY_ZERO : 'EXCEPTION_FLT_DIVIDE_BY_ZERO',
win32.EXCEPTION_FLT_INEXACT_RESULT : 'EXCEPTION_FLT_INEXACT_RESULT',
win32.EXCEPTION_FLT_INVALID_OPERATION : 'EXCEPTION_FLT_INVALID_OPERATION',
win32.EXCEPTION_FLT_OVERFLOW : 'EXCEPTION_FLT_OVERFLOW',
win32.EXCEPTION_FLT_STACK_CHECK : 'EXCEPTION_FLT_STACK_CHECK',
win32.EXCEPTION_FLT_UNDERFLOW : 'EXCEPTION_FLT_UNDERFLOW',
win32.EXCEPTION_ILLEGAL_INSTRUCTION : 'EXCEPTION_ILLEGAL_INSTRUCTION',
win32.EXCEPTION_IN_PAGE_ERROR : 'EXCEPTION_IN_PAGE_ERROR',
win32.EXCEPTION_INT_DIVIDE_BY_ZERO : 'EXCEPTION_INT_DIVIDE_BY_ZERO',
win32.EXCEPTION_INT_OVERFLOW : 'EXCEPTION_INT_OVERFLOW',
win32.EXCEPTION_INVALID_DISPOSITION : 'EXCEPTION_INVALID_DISPOSITION',
win32.EXCEPTION_NONCONTINUABLE_EXCEPTION : 'EXCEPTION_NONCONTINUABLE_EXCEPTION',
win32.EXCEPTION_PRIV_INSTRUCTION : 'EXCEPTION_PRIV_INSTRUCTION',
win32.EXCEPTION_SINGLE_STEP : 'EXCEPTION_SINGLE_STEP',
win32.EXCEPTION_STACK_OVERFLOW : 'EXCEPTION_STACK_OVERFLOW',
win32.EXCEPTION_GUARD_PAGE : 'EXCEPTION_GUARD_PAGE',
win32.EXCEPTION_INVALID_HANDLE : 'EXCEPTION_INVALID_HANDLE',
win32.EXCEPTION_POSSIBLE_DEADLOCK : 'EXCEPTION_POSSIBLE_DEADLOCK',
win32.EXCEPTION_WX86_BREAKPOINT : 'EXCEPTION_WX86_BREAKPOINT',
win32.CONTROL_C_EXIT : 'CONTROL_C_EXIT',
win32.DBG_CONTROL_C : 'DBG_CONTROL_C',
win32.MS_VC_EXCEPTION : 'MS_VC_EXCEPTION',
}
__exceptionDescription = {
win32.EXCEPTION_ACCESS_VIOLATION : 'Access violation',
win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED : 'Array bounds exceeded',
win32.EXCEPTION_BREAKPOINT : 'Breakpoint',
win32.EXCEPTION_DATATYPE_MISALIGNMENT : 'Datatype misalignment',
win32.EXCEPTION_FLT_DENORMAL_OPERAND : 'Float denormal operand',
win32.EXCEPTION_FLT_DIVIDE_BY_ZERO : 'Float divide by zero',
win32.EXCEPTION_FLT_INEXACT_RESULT : 'Float inexact result',
win32.EXCEPTION_FLT_INVALID_OPERATION : 'Float invalid operation',
win32.EXCEPTION_FLT_OVERFLOW : 'Float overflow',
win32.EXCEPTION_FLT_STACK_CHECK : 'Float stack check',
win32.EXCEPTION_FLT_UNDERFLOW : 'Float underflow',
win32.EXCEPTION_ILLEGAL_INSTRUCTION : 'Illegal instruction',
win32.EXCEPTION_IN_PAGE_ERROR : 'In-page error',
win32.EXCEPTION_INT_DIVIDE_BY_ZERO : 'Integer divide by zero',
win32.EXCEPTION_INT_OVERFLOW : 'Integer overflow',
win32.EXCEPTION_INVALID_DISPOSITION : 'Invalid disposition',
win32.EXCEPTION_NONCONTINUABLE_EXCEPTION : 'Noncontinuable exception',
win32.EXCEPTION_PRIV_INSTRUCTION : 'Privileged instruction',
win32.EXCEPTION_SINGLE_STEP : 'Single step event',
| |
<reponame>larson-group/clubb_release
"""
-------------------------------------------------------------------------------
G E N E R A L I N F O R M A T I O N
-------------------------------------------------------------------------------
This file contains general constants and information about the variables saved
in the netCDF file needed for plotgen.py.
The list variables sortPlots, plotNames and lines are sorted identically in
order to relate the individual variables.
"""
#-------------------------------------------------------------------------------
# C O N S T A N T S
#-------------------------------------------------------------------------------
DAY = 24
HOUR = 3600
KG = 1000.
g_per_second_to_kg_per_day = 1. / (DAY * HOUR * KG)
kg_per_second_to_kg_per_day = 1. / (DAY * HOUR)
#-------------------------------------------------------------------------------
# P L O T S
#-------------------------------------------------------------------------------
# Names of the variables
sortPlots = ['theta_l', 'r_t', 'theta_l_flux', 'r_t_flux', 'cloudliq_frac', 'r_c', 'w_var', 'w3', 'theta_l_var', 'r_t_var', 'covar_thetal_rt', 'wobs', 'U', 'V', 'covar_uw', 'covar_vw', 'u_var', 'v_var',\
'QR', 'QR_IP', 'QRP2', 'QRP2_QRIP', \
'Nrm', 'Nrm_IP', 'Nrp2', 'Nrp2_NrmIP', \
'Ncm', 'Ncm_IP', 'Ncp2', 'Ncp2_NcmIP', \
'Ngm', 'Ngm_IP', 'Ngp2', 'Ngp2_NgmIP', \
'Qgm', 'Qgm_IP', 'Qgp2', 'Qgp2_QgmIP', \
'Qim', 'Qim_IP', 'Qip2', 'Qip2_QimIP', \
'Nim', 'Nim_IP', 'Nip2', 'Nip2_NimIP', \
'Qsm', 'Qsm_IP', 'Qsp2', 'Qsp2_QsmIP', \
'Nsm', 'Nsm_IP', 'Nsp2', 'Nsp2_NsmIP', \
'MicroFractions', 'Buoy_Flux', \
'uprcp', 'uprtp', 'upthlp', \
'vprcp', 'vprtp', 'vpthlp', \
]
# settings of each plot:
# plot number, plot title, axis label
plotNames = [\
['Liquid Water Potential Temperature, theta_l', 'thetal [K]'],\
['Total Water Mixing Ratio, r_t', 'rtm / qt [kg/kg]'],\
['Turbulent Flux of theta_l', 'wpthlp / thflux(s) [K m/s]'],\
['Turbulent Flux of r_t', 'wprtp / qtflux(s) [(kg/kg) m/s]'],\
['Cloud Liquid Fraction', ' [%/100]'],\
['Cloud Water Mixing Ratio, r_c', 'rcm / qcl [kg/kg]'],\
['Variance of w', 'wp2 / w2 [m^2/s^2]'],\
['Third-order Moment of w', 'wp3 / w3 [m^3/s^3]'],\
['Variance of theta_l', 'thlp2 / tl2 [K^2]'],\
['Variance of r_t', 'rtp2 / qtp2 [(kg/kg)^2]'],\
['Covariance of r_t & theta_l', 'rtpthlp [(kg/kg) K]'],\
['Vertical Wind Component, w (subsidence)', 'wobs [m/s]'],\
['Zonal Wind Component, u', 'um / u [m/s]'],\
['Meridonal Wind Component, v', 'vm / v [m/s]'],\
['Covariance of u & w', 'upwp / uw [m^2/s^2]'],\
['Covariance of v & w', 'vpwp / vw [m^2/s^2]'],\
['Variance of u wind', 'up2 / u2 [m^2/s^2]'],\
['Variance of v wind', 'vp2 / v2 [m^2/s^2]'],\
# Rain Water Mixing Ratio
['Rain Water Mixing Ratio', 'qrm [kg/kg]'],\
['Rain Water Mixing Ratio in Rain', 'qrm_ip [kg/kg]'],\
['Domain-wide Variance\nof Rain Water Mixing Ratio', 'qrp2 [(kg/kg)^2]'],\
['Within-rain Variance\nof Rain Water Mixing Ratio', 'qrp2_ip / qrm_ip^2 [-]'],\
#Rain Drop Number Concentration
['Rain Drop Concentration', 'Nrm [num/kg]'],\
['Rain Drop Concentration in Rain', 'Nrm_ip [num/kg]'],\
['Domain-wide Variance\nof Rain Drop Concentration', 'Nrp2 [(num/kg)^2]'],\
['Within-rain Variance\nof Rain Drop Concentration', 'Nrp2_ip / Nrm_ip^2 [-]'],\
#Cloud Droplet Number Concentration
['Cloud Droplet Number Concentration', 'Ncm [num/kg]'],\
['Cloud Droplet Number Concentration in Cloud', 'Ncm_ip [num/kg]'],\
['Domain-wide Variance\nof Cloud Droplet Number Concentration', 'Ncp2 [(#/kg)^2]'],\
['Within-cloud Variance\nof Cloud Droplet Number Concentration', 'Ncp2_ip / Ncm_ip^2 [-]'],\
#Graupel Number Concentration
['Graupel Number Concentration', 'Ngm [kg/kg]'],\
['Graupel Number Concentration in Graupel', 'Ngm_ip [num/kg]'],\
['Domain-wide Variance\nof Graupel Number Concentration', 'Ngp2 [(kg/kg)^2]'],\
['Within-graupel Variance\nof Graupel Number Concentration', 'Ngp2_ip / Ngm_ip^2 [-]'],\
#Graupel Mixing Ratio
['Graupel Mixing Ratio', 'qgm [kg/kg]'],\
['Graupel Mixing Ratio in Graupel', 'qgm_ip [kg/kg]'],\
['Domain-wide Variance\nof Graupel Mixing Ratio', 'qgp2 [(kg/kg)^2]'],\
['Within-graupel Variance\nof Graupel Mixing Ratio', 'qgp2_ip / qgm_ip^2 [-]'],\
#Cloud Ice Mixing Ratio
['Cloud Ice Mixing Ratio', 'qim [kg/kg]'],\
['Cloud Ice Mixing Ratio in Cloud Ice', 'qim_ip [kg/kg]'],\
['Domain-wide Variance\nof Cloud Ice Mixing Ratio', 'qip2 [(kg/kg)^2]'],\
['Within-cloud-ice Variance\nof Cloud Ice Mixing Ratio', 'qip2_ip / qim_ip^2 [-]'],\
#Cloud Ice Number Concentration
['Cloud Ice Concentration', 'Nim [num/kg]'],\
['Cloud Ice Number Concentration in Cloud Ice', 'Ni_ip [num/kg]'],\
['Domain-wide Variance\nof Cloud Ice Number Concentration', 'Nip2 [(num/kg)^2]'],\
['Within-cloud-ice Variance\nof Cloud Ice Number Concentration', 'Nip2_ip / Nim_ip^2 [-]'],\
#Snow Mixing Ratio
['Snow Mixing Ratio ', 'qsm [kg/kg]'],\
['Snow Mixing Ratio in Snow', 'qsm_ip [kg/kg]'],\
['Domain-wide Variance\nof Snow Mixing Ratio', 'qsp2 [(kg/kg)^2]'],\
['Within-snow Variance\nof Snow Mixing Ratio ', 'qsp2_ip / qsm_ip^2 [-]'],\
#Snow Number Concentration
['Snow Number Concentration', 'Nsm [num/kg]'],\
['Snow Number Concentration in Snow', 'Nsm_ip [num/kg]'],\
['Domain-wide Variance\nof Snow Number Concentration', 'Nsp2 [(#/kg)^2]'],\
['Within-snow Variance\nof Snow Number Concentration', 'Nsp2_ip / Nsm_ip^2 [-]'],\
['Micro Fractions', '[%/100]'],\
['Buoyancy flux', 'wpthvp / tlflux [K m/s]'],\
#['Liquid Water Path', 'lwp [kg/m^2]'],\
#['Surface rainfall rate', 'rain_rate_sfc[mm/day]'],\
#['Density-Weighted Vertically Averaged wp2', 'wp2 / w2 [m^2/s^2]'],\
#['Cloud Ice Water Path', 'iwp [kg/m^2]'],\
#['Snow Water Path', 'swp [kg/m^2]'],\
# buoyancy sub-terms for parameterization in upwp budget
['Covariance of u & rc', 'uprcp / urc [m^2/s^2]'],\
['Covariance of u & rt', 'uprtp / urt [m^2/s^2]'],\
['Covariance of u & thl', 'upthlp / uthl [m^2/s^2]'],\
# buoyancy sub-terms for parameterization in upwp budget
['Covariance of v & rc', 'vprcp / urc [m^2/s^2]'],\
['Covariance of v & rt', 'vprtp / urt [m^2/s^2]'],\
['Covariance of v & thl', 'vpthlp / uthl [m^2/s^2]'],\
]
# lines of each plot:
# variable name within python, shall this variable be plotted?, variable name in SAM output, conversion
thetal = [\
# variables of thetal
['THETAL', False, 'THETAL', 1., 0],\
['THETA', False, 'THETA', 1., 0],\
['TABS', False, 'TABS', 1., 0],\
['QI', False, 'QI', 1./KG, 0],\
['THETAL', True, 'THETAL + 2500.4 * (THETA/TABS) * QI', 1., 0],\
]
rt = [\
# variables of rt
['QI', False, 'QI', 1., 0],\
['QT', False, 'QT', 1., 0],\
['RT', True, '(QT-QI)', 1./KG, 0],\
]
thetalflux = [\
# variables of thetalflux
['TLFLUX', False, 'TLFLUX', 1., 0],\
['RHO', False, 'RHO', 1., 0],\
['WPTHLP_SGS', False, 'WPTHLP_SGS', 1., 0],\
['THETALFLUX', True, '((TLFLUX) / (RHO * 1004.)) + WPTHLP_SGS', 1., 0],\
]
rtflux = [\
# variables of rtflux
['QTFLUX', False, 'TLFLUX', 1., 0],\
['RHO', False, 'RHO', 1., 0],\
['WPRTP_SGS', False, 'WPRTP_SGS', 1., 0],\
['RTFLUX', True, '(QTFLUX / (RHO * 2.5104e+6)) + WPRTP_SGS', 1., 0],\
]
cloudliqfrac = [\
# variables of cloudliqfrac
['cloudliq_frac_em6', True, 'cloudliq_frac_em6', 1., 0],\
]
qcl = [\
# variables of qcl
['QCL', True, 'QCL', 1./KG, 0],\
]
wVar = [\
# variables of wVar
['WP2_SGS', False, 'WP2_SGS', 1., 0],\
['W2', False, 'W2', 1., 0],\
['WVAR', True, 'WP2_SGS + W2', 1., 0],\
]
w3 = [\
# variables of wVar
['WP3_SGS', False, 'WP3_SGS', 1., 0],\
['W3', False, 'W3', 1., 0],\
['W3', True, 'WP3_SGS + W3', 1., 0],\
]
thetalVar = [\
# variables of thetalVar
['THLP2_SGS', False, 'THLP2_SGS', 1., 0],\
['TL2', False, 'TL2', 1., 0],\
['THETALVAR', True, 'THLP2_SGS + TL2', 1., 0],\
]
rtVar = [\
# variables of rtVar
['RTP2_SGS', False, 'RTP2_SGS', 1., 0],\
['QT2', False, 'QT2', 1., 0],\
['RTVAR', True, '(QT2 / 1e+6) + RTP2_SGS', 1., 0],\
]
covarThetalRt = [\
# variables of covarThetalRt
['CovarThetaLRT', True, 'RTPTHLP_SGS', 1., 0],\
]
wobs = [\
# variables of wobs
['WOBS', True, 'WOBS', 1., 0],\
]
U = [\
# variables of U
['U', True, 'U', 1., 0],\
]
V = [\
# variables of V
['V', True, 'V', 1., 0],\
]
covarUW = [\
# variables of covarUV
['UPWP_SGS', False, 'UPWP_SGS', 1., 0],\
['UW', False, 'UW', 1., 0],\
['UW', True, 'UW + UPWP_SGS', 1., 0],\
]
covarVW = [\
# variables of covarVW
['VPWP_SGS', False, 'VPWP_SGS', 1., 0],\
['VW', False, 'VW', 1., 0],\
['VW', True, 'VW + VPWP_SGS', 1., 0],\
]
uVar = [\
# variables of uVar
['UP2_SGS', False, 'UP2_SGS', 1., 0],\
['U2', False, 'U2', 1., 0],\
['UVAR', True, 'UP2_SGS + U2', 1., 0],\
]
vVar = [\
# variables of vVar
['VP2_SGS', False, 'VP2_SGS', 1., 0],\
['V2', False, 'V2', 1., 0],\
['VVAR', True, 'VP2_SGS + V2', 1., 0],\
]
# Rain Water Mixing Ratio
QR = [\
# variables of QR
['QR', True, 'QR', 1./KG, 0],\
]
QRIP = [\
# variables of QRIP
['qrainm_ip', True, 'qrainm_ip', 1., 0],\
]
QRP2 = [\
# variables of QRP2
['qrainp2', True, 'qrainp2', 1., 0],\
]
QRP2_QRIP = [\
# variables of QRP2_QRIP
['qrainp2_ip', False, 'qrainp2_ip', 1., 0],\
['qrainm_ip', False, 'qrainm_ip', 1., 0],\
['QRP2_QRIP', True, '(qrainp2_ip / (np.maximum(np.full(n,1e-5),qrainm_ip)**2))', 1., 0],\
]
#Rain Drop Number Concentration
Nrm = [\
# variables of Nrm
['NR', False, 'NR', 1., 0],\
['RHO', False, 'RHO', 1., 0],\
['NRM', True, '(NR * 1e+6) / RHO', 1., 0],\
]
Nrm_IP = [\
# variables of Nrm_IP
['nrainm_ip', True, 'nrainm_ip', 1., 0],\
]
Nrp2 = [\
# variables of Nrp2
['nrainp2', True, | |
Arguments(agent_class, env_func=env_func, env_args=env_args)
args.num_layer = 3
args.net_dim = 2 ** 7
args.batch_size = int(args.net_dim * 2)
args.worker_num = 2
args.target_step = args.max_step
args.repeat_times = 2 ** -1
args.reward_scale = 2 ** -4
args.learning_rate = 2 ** -15
args.clip_grad_norm = 1.0
args.gamma = 0.99
args.if_act_target = False
args.explore_noise_std = 0.1 # for DPG
args.h_term_sample_rate = 2 ** -2
args.h_term_drop_rate = 2 ** -3
args.h_term_lambda = 2 ** -6
args.h_term_k_step = 4
args.h_term_update_gap = 2
args.eval_times = 2 ** 1
args.eval_gap = 2 ** 8
args.if_allow_break = False
args.break_step = int(2e6)
"""
| Arguments Remove cwd: ./Walker2d-v3_ReSAC_2
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 4.02e+03 -18.04 |
2 4.02e+03 -18.04 | -18.04 0.1 86 0 | -0.01 0.79 0.82 0.17
2 2.41e+05 88.29 |
2 2.41e+05 88.29 | 88.29 12.8 164 22 | 0.08 0.71 22.65 0.08
2 3.54e+05 228.02 |
2 3.54e+05 228.02 | 228.02 112.3 340 126 | 0.07 0.59 21.77 0.06
2 4.43e+05 228.02 | 109.31 0.0 160 0 | 0.07 0.67 24.23 0.06
2 5.19e+05 228.02 | 178.04 0.0 104 0 | 0.08 1.18 43.40 0.11
2 5.87e+05 228.02 | 71.07 0.0 207 0 | 0.07 1.56 61.99 0.15
2 6.51e+05 321.31 |
2 6.51e+05 321.31 | 321.31 29.7 226 7 | 0.08 1.08 57.19 0.11
2 7.05e+05 388.19 |
2 7.05e+05 388.19 | 388.19 20.8 244 16 | 0.05 0.53 33.81 0.07
2 7.54e+05 512.89 |
2 7.54e+05 512.89 | 512.89 24.3 486 20 | 0.10 0.32 20.92 0.04
2 8.02e+05 512.89 | 469.10 0.0 373 0 | 0.08 0.20 16.18 0.03
2 8.46e+05 512.89 | 411.87 0.0 256 0 | 0.10 0.14 13.30 0.03
2 8.85e+05 512.89 | 452.51 0.0 188 0 | 0.11 0.10 11.45 0.02
2 9.21e+05 1381.34 |
2 9.21e+05 1381.34 | 1381.34 530.7 608 226 | 0.13 0.08 11.06 0.02
2 9.58e+05 1381.34 | 475.98 0.0 226 0 | 0.12 0.07 11.06 0.02
2 9.92e+05 1381.34 | 1270.17 680.6 570 330 | 0.13 0.07 11.37 0.02
2 1.03e+06 1908.40 |
2 1.03e+06 1908.40 | 1908.40 8.6 1000 0 | 0.13 0.07 11.50 0.02
2 1.06e+06 1908.40 | 1761.35 254.2 908 92 | 0.14 0.07 12.03 0.02
2 1.10e+06 1908.40 | 938.58 0.0 449 0 | 0.13 0.07 12.54 0.02
2 1.13e+06 1908.40 | 716.68 0.0 359 0 | 0.14 0.07 12.93 0.02
2 1.16e+06 1908.40 | 1674.89 0.0 584 0 | 0.14 0.08 13.18 0.02
2 1.19e+06 2332.65 |
2 1.19e+06 2332.65 | 2332.65 66.9 887 113 | 0.16 0.08 13.55 0.02
2 1.22e+06 2332.65 | 1775.35 0.0 644 0 | 0.15 0.08 13.35 0.02
2 1.25e+06 2534.59 |
2 1.25e+06 2534.59 | 2534.59 10.1 1000 0 | 0.19 0.08 13.48 0.02
2 1.28e+06 3074.55 |
2 1.28e+06 3074.55 | 3074.55 276.9 902 98 | 0.18 0.08 14.28 0.03
2 1.31e+06 3134.52 |
2 1.31e+06 3134.52 | 3134.52 110.4 1000 0 | 0.16 0.09 14.33 0.03
2 1.33e+06 3134.52 | 1103.63 0.0 422 0 | 0.18 0.09 15.92 0.03
2 1.36e+06 3134.52 | 3106.59 58.3 1000 0 | 0.17 0.10 17.18 0.03
2 1.39e+06 3218.23 |
2 1.39e+06 3218.23 | 3218.23 148.1 1000 0 | 0.19 0.10 17.36 0.03
2 1.42e+06 3218.23 | 1566.81 0.0 491 0 | 0.18 0.10 17.50 0.03
2 1.45e+06 3218.23 | 2916.22 552.7 884 116 | 0.17 0.10 18.53 0.03
2 1.47e+06 3218.23 | 1098.57 0.0 371 0 | 0.19 0.10 18.71 0.03
2 1.50e+06 3218.23 | 3145.03 144.2 1000 0 | 0.19 0.10 18.69 0.03
2 1.52e+06 3278.94 |
2 1.52e+06 3278.94 | 3278.94 39.9 1000 0 | 0.20 0.10 19.04 0.03
2 1.55e+06 3278.94 | 3230.16 0.0 1000 0 | 0.20 0.11 19.33 0.03
2 1.57e+06 3278.94 | 3173.60 0.0 1000 0 | 0.19 0.10 19.20 0.03
2 1.59e+06 3278.94 | 3014.22 0.0 1000 0 | 0.19 0.10 20.24 0.03
2 1.61e+06 3278.94 | 3029.58 0.0 1000 0 | 0.20 0.10 19.66 0.03
2 1.64e+06 3278.94 | 3097.23 0.0 1000 0 | 0.21 0.10 20.53 0.03
2 1.66e+06 3339.45 |
2 1.66e+06 3339.45 | 3339.45 150.7 1000 0 | 0.19 0.11 20.27 0.03
2 1.68e+06 3602.28 |
2 1.68e+06 3602.28 | 3602.28 74.2 1000 0 | 0.20 0.11 20.61 0.03
2 1.70e+06 3921.36 |
2 1.70e+06 3921.36 | 3921.36 14.4 1000 0 | 0.20 0.11 20.76 0.03
2 1.72e+06 3921.36 | 3685.55 0.0 1000 0 | 0.20 0.11 20.81 0.03
2 1.74e+06 3921.36 | 3656.15 0.0 1000 0 | 0.20 0.11 20.80 0.03
2 1.76e+06 3921.36 | 1968.12 0.0 558 0 | 0.20 0.11 22.02 0.03
2 1.78e+06 3921.36 | 3768.89 0.0 1000 0 | 0.22 0.11 21.58 0.03
2 1.80e+06 3921.36 | 3855.74 0.0 1000 0 | 0.20 0.11 21.79 0.03
2 1.82e+06 3921.36 | 3623.61 0.0 1000 0 | 0.20 0.11 22.17 0.03
2 1.83e+06 3921.36 | 3441.80 810.2 819 169 | 0.21 0.11 22.05 0.03
2 1.85e+06 3921.36 | 173.80 0.0 99 0 | 0.21 0.11 22.30 0.03
2 1.87e+06 3921.36 | 185.29 0.0 105 0 | 0.21 0.11 22.83 0.03
2 1.89e+06 3921.36 | 3368.81 0.0 866 0 | 0.21 0.11 21.37 0.03
2 1.91e+06 3921.36 | 3681.30 0.0 1000 0 | 0.21 0.11 22.37 0.03
2 1.93e+06 3921.36 | 1009.22 0.0 312 0 | 0.21 0.11 22.56 0.03
2 1.95e+06 3921.36 | 3765.83 0.0 1000 0 | 0.21 0.12 22.10 0.03
2 1.97e+06 3921.36 | 260.32 0.0 118 0 | 0.23 0.11 22.96 0.03
2 1.99e+06 4099.22 |
2 1.99e+06 4099.22 | 4099.22 93.3 1000 0 | 0.22 0.12 22.81 0.03
| UsedTime: 14981 | SavedDir: ./Walker2d-v3_ReSAC_2
| Learner: Save in ./Walker2d-v3_ReSAC_2
| Arguments Remove cwd: ./Walker2d-v3_ReSACHtermK_5
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
5 4.11e+03 322.61 |
5 4.11e+03 322.61 | 322.61 0.2 210 0 | 0.01 0.78 0.68 0.17
5 2.38e+05 322.61 | 160.26 0.0 274 0 | 0.10 0.72 24.15 0.08
5 3.54e+05 322.61 | 236.19 149.1 218 40 | 0.09 0.77 25.16 0.06
5 4.43e+05 322.61 | 246.58 0.0 136 0 | 0.10 0.85 27.90 0.06
5 5.18e+05 322.61 | 277.61 0.0 152 0 | 0.08 0.69 29.26 0.06
5 5.81e+05 322.61 | 11.49 0.0 30 0 | 0.07 0.61 30.52 0.06
5 6.36e+05 322.61 | 166.38 0.0 88 0 | 0.09 0.66 36.18 0.06
5 6.88e+05 322.61 | 218.94 0.0 111 0 | 0.11 0.59 36.22 0.06
5 7.36e+05 322.61 | 9.33 0.0 26 0 | 0.09 0.38 31.12 0.05
5 7.81e+05 401.07 |
5 7.81e+05 401.07 | 401.07 54.4 177 15 | 0.10 0.25 22.31 0.04
5 8.24e+05 1200.05 |
5 8.24e+05 1200.05 | 1200.05 126.0 496 84 | 0.10 0.18 16.64 0.04
5 8.64e+05 1200.05 | 798.68 479.4 444 276 | 0.11 0.15 15.25 0.03
5 9.01e+05 1200.05 | 211.15 0.0 143 0 | 0.13 0.11 14.54 0.03
5 9.38e+05 1200.05 | 1178.97 629.1 533 241 | 0.13 0.08 12.28 0.02
5 9.75e+05 1200.05 | 518.32 0.0 209 0 | 0.15 0.07 12.11 0.02
5 1.01e+06 1973.11 |
5 1.01e+06 1973.11 | 1973.11 50.9 910 90 | 0.14 0.07 11.95 0.02
5 1.05e+06 1973.11 | 966.91 0.0 400 0 | 0.15 0.07 12.90 0.02
5 1.08e+06 1973.11 | 496.69 0.0 219 0 | 0.12 0.08 13.90 0.02
5 1.11e+06 2382.95 |
5 1.11e+06 2382.95 | 2382.95 21.7 1000 0 | 0.14 0.08 14.13 0.02
5 1.15e+06 2694.36 |
5 1.15e+06 2694.36 | 2694.36 31.9 1000 0 | 0.16 0.08 14.56 0.02
5 1.17e+06 2694.36 | 2109.05 774.1 736 264 | 0.17 0.09 15.10 0.03
5 1.20e+06 2694.36 | 2630.14 0.0 1000 0 | 0.16 0.09 16.15 0.03
5 1.22e+06 2823.11 |
5 1.22e+06 2823.11 | 2823.11 59.1 1000 0 | 0.17 0.09 16.54 0.03
5 1.24e+06 2823.11 | 2646.45 0.0 1000 0 | 0.16 0.09 17.18 0.03
5 1.26e+06 2823.11 | 2574.78 0.0 1000 0 | 0.16 0.09 17.60 0.03
5 1.29e+06 2880.16 |
5 1.29e+06 2880.16 | 2880.16 29.0 1000 0 | 0.16 0.09 17.81 0.03
5 1.31e+06 2880.16 | 2799.66 0.0 1000 0 | 0.16 0.09 17.94 0.03
5 1.34e+06 2880.16 | 2812.48 96.3 1000 0 | 0.17 0.09 18.53 0.03
5 1.37e+06 2923.55 |
5 1.37e+06 2923.55 | 2923.55 143.8 1000 0 | 0.18 0.09 18.45 0.03
5 1.39e+06 3720.88 |
5 1.39e+06 3720.88 | 3720.88 107.5 1000 0 | 0.19 0.09 18.90 0.03
5 1.42e+06 3720.88 | 3571.06 0.0 1000 0 | 0.21 0.09 19.19 0.03
5 1.44e+06 3720.88 | 3650.66 0.0 1000 0 | 0.20 0.09 19.57 0.03
5 1.47e+06 3720.88 | 3354.98 0.0 1000 0 | 0.19 0.09 20.57 | |
)
self.colorsCanvas = Tk.Canvas( mainFrame, borderwidth=2, relief='ridge', background='white', width=197, height=canvasHeight )
self.colorsCanvas.pack( pady=4 )
x = 10
y = 9
for i, rgbaColor in enumerate( reversed(self.recentColors) ):
# Prepare and store an image object for the color
colorSwatchImage = Image.new( 'RGBA', (8, 8), rgbaColor )
colorSwatchWithBorder = ImageOps.expand( colorSwatchImage, border=1, fill='black' )
self.recentColorImages.append( ImageTk.PhotoImage(colorSwatchWithBorder) )
# Draw the image onto the canvas.
itemId = self.colorsCanvas.create_image( x, y, image=self.recentColorImages[i], anchor='nw', tags='swatches' )
self.itemColors[itemId] = rgbaColor
x += 16
if i == 11: # Start a new line
x = 10
y += 16
self.colorsCanvas.tag_bind( 'swatches', '<1>', self.restoreColor )
def onMouseEnter(e): self.colorsCanvas['cursor']='hand2'
def onMouseLeave(e): self.colorsCanvas['cursor']=''
self.colorsCanvas.tag_bind( 'swatches', '<Enter>', onMouseEnter )
self.colorsCanvas.tag_bind( 'swatches', '<Leave>', onMouseLeave )
# RGB Channels
ttk.Label( mainFrame, text='Choose the RGB Channel values:' ).pack( anchor='w', padx=16, pady=4 )
curtainFrame = Tk.Frame( mainFrame, borderwidth=2, relief='ridge', width=250, height=50, cursor='hand2' )
whiteCurtain = Tk.Frame( curtainFrame, bg='white', width=25, height=50 )
whiteCurtain.pack( side='left' )
focusColorsFrame = Tk.Frame( curtainFrame, width=200, height=50 )
# Combine the initial color with the defalt background color, to simulate alpha on the colored frame (since Frames don't support alpha)
bgColor16Bit = Gui.root.winfo_rgb( focusColorsFrame['bg'] )
self.nativeBgColor = ( bgColor16Bit[0]/256, bgColor16Bit[1]/256, bgColor16Bit[2]/256 ) # Reduce it to an 8-bit colorspace
newColors = []
alphaBlending = round( self.currentRGBA[-1] / 255.0, 2 )
for i, colorChannel in enumerate( self.nativeBgColor ):
newColors.append( int(round( (alphaBlending * self.currentRGBA[i]) + (1-alphaBlending) * colorChannel )) )
originalColorBg = rgb2hex( newColors )
if getLuminance( originalColorBg + 'ff' ) > 127: fontColor = 'black'
else: fontColor = 'white'
self.originalColor = Tk.Frame( focusColorsFrame, bg=originalColorBg, width=200, height=25 )
Tk.Label( self.originalColor, text='Original Color', bg=originalColorBg, foreground=fontColor ).pack()
self.currentRgbDisplay = Tk.Frame( focusColorsFrame, width=200, height=25 ) # , bg='#ACACAC'
Tk.Label( self.currentRgbDisplay, text='New Color' ).pack()
focusColorsFrame.pack( side='left' )
for frame in [ self.originalColor, self.currentRgbDisplay ]:
frame.pack()
frame.pack_propagate( False )
frame.bind( '<1>', self.pickRGB )
frame.winfo_children()[0].bind( '<1>', self.pickRGB )
blackCurtain = Tk.Frame( curtainFrame, bg='black', width=25, height=50 )
blackCurtain.pack( side='left' )
curtainFrame.pack( padx=5, pady=4 )
curtainFrame.pack_propagate( False )
for frame in curtainFrame.winfo_children(): frame.pack_propagate( False )
# Alpha Channel
ttk.Label( mainFrame, text='Choose the Alpha Channel value:' ).pack( anchor='w', padx=16, pady=4 )
alphaRowFrame = Tk.Frame( mainFrame )
self.alphaEntry = ttk.Entry( alphaRowFrame, width=3 )
self.alphaEntry.pack( side='left', padx=4 )
self.alphaEntry.bind( '<KeyRelease>', self.alphaUpdated )
self.alphaSlider = ttk.Scale( alphaRowFrame, orient='horizontal', from_=0, to=255, length=260, command=self.alphaUpdated )
self.alphaSlider.pack( side='left' , padx=4 )
alphaRowFrame.pack( padx=5, pady=4 )
# Color Value Conversions
ttk.Label( mainFrame, text='Color Space Comparisons:' ).pack( anchor='w', padx=16, pady=4 )
colorEntryFieldsFrame = Tk.Frame( mainFrame )
# RGBA (decimal and hex forms)
ttk.Label( colorEntryFieldsFrame, text='RGBA:' ).grid( column=0, row=0, padx=5 )
self.rgbaStringVar = Tk.StringVar()
self.rgbaEntry = ttk.Entry( colorEntryFieldsFrame, textvariable=self.rgbaStringVar, width=16, justify='center' )
self.rgbaEntry.grid( column=1, row=0, padx=5 )
self.rgbaEntry.bind( '<KeyRelease>', self.rgbaEntryUpdated )
ttk.Label( colorEntryFieldsFrame, text='RGBA Hex:' ).grid( column=2, row=0, padx=5, pady=5 )
self.hexColorStringVar = Tk.StringVar()
self.rgbaHexEntry = ttk.Entry( colorEntryFieldsFrame, textvariable=self.hexColorStringVar, width=10, justify='center' )
self.rgbaHexEntry.grid( column=3, row=0, padx=5 )
self.rgbaHexEntry.bind( '<KeyRelease>', self.hexEntryUpdated )
# TPL Formats
ttk.Label( colorEntryFieldsFrame, text='TPL Format:' ).grid( column=0, row=1, padx=5 )
self.tplFormat = Tk.StringVar()
if 'Palette' in self.title: # Limit the selection of formats to just those used for palettes.
formatList = userFriendlyFormatList[3:-4]
else: formatList = userFriendlyFormatList[:-4]
self.tplFormat.set( formatList[defaultTplFormat] )
self.tplFormatOptionMenu = ttk.OptionMenu( colorEntryFieldsFrame, self.tplFormat, formatList[defaultTplFormat], *formatList, command=self.updateColorDisplays )
self.tplFormatOptionMenu.grid( column=1, row=1, padx=5, pady=5 )
if 'Palette' in self.title: self.tplFormatOptionMenu['state'] = 'disabled'
self.tplFormatStringVar = Tk.StringVar()
self.tplFormatEntry = ttk.Entry( colorEntryFieldsFrame, textvariable=self.tplFormatStringVar, width=13, justify='center' )
self.tplFormatEntry.grid( column=2, columnspan=2, row=1, padx=5, sticky='w' )
self.tplFormatEntry.bind( '<KeyRelease>', self.tplEntryUpdated )
colorEntryFieldsFrame.pack( padx=5, pady=4 )
self.updateColorDisplays( updateImage=False )
#self.alphaSlider.set( self.currentRGBA[-1] )
# Buttons! For use when this isn't just a comparison tool, but being used as a color picker to replace a value in a game/file
if self.title != 'Color Converter':
buttonsFrame = Tk.Frame( mainFrame )
ttk.Button( buttonsFrame, text='Submit', command=self.submit ).pack( side='left', ipadx=4, padx=20 )
ttk.Button( buttonsFrame, text='Cancel', command=self.cancel ).pack( side='left', ipadx=4, padx=20 )
buttonsFrame.pack( pady=8 )
mainFrame.pack()
self.updateEntryBorders( None )
self.window.bind( '<FocusIn>', self.updateEntryBorders ) # Allows for switching between multiple open windows to move the highlighting around
def updateEntryBorders( self, event ): # Updates the border color of palette entries to indicate whether they're selected
if 'Palette' in self.title:
# If any items are currently selected, change their border color back to normal
for item in Gui.paletteCanvas.find_withtag( 'selected' ):
Gui.paletteCanvas.itemconfig( item, fill='black' )
Gui.paletteCanvas.dtag( item, 'selected' ) # Removes this tag from the canvas item
# Use the paletteEntryOffset tag to locate the border item (getting its canvas ID)
if self.datDataOffsets != ():
borderIids = Gui.paletteCanvas.find_withtag( 't'+str(self.datDataOffsets[2]) )
if borderIids:
Gui.paletteCanvas.itemconfig( borderIids[0], fill=Gui.paletteCanvas.entryBorderColor, tags=('selected', 't'+str(self.datDataOffsets[2])) )
def updateColorDisplays( self, updateImage=True, setAlphaEntry=True ): # Updates the visual representation, alpha value/slider, and colorspace Entry values
currentTplFormat = int( self.tplFormat.get().split()[0][1:] )
if currentTplFormat in [ 0, 1, 4 ]: alphaSupported = False
else: alphaSupported = True
# Combine the newly selected color with the default background color, to simulate alpha on the colored frame (since Frames don't support transparency)
newColors = []
alphaBlending = round( self.currentRGBA[-1] / 255.0, 2 )
for i, color in enumerate( self.nativeBgColor ):
newColors.append( int(round( (alphaBlending * self.currentRGBA[i]) + (1-alphaBlending) * color )) )
currentColorLabel = self.currentRgbDisplay.winfo_children()[0]
currentColorBg = rgb2hex( newColors )
self.currentRgbDisplay['bg'] = currentColorBg
currentColorLabel['bg'] = currentColorBg
if getLuminance( currentColorBg + 'ff' ) > 127: currentColorLabel['fg'] = 'black'
else: currentColorLabel['fg'] = 'white'
# Set the alpha components of the GUI
self.preventNextSliderCallback = True # Prevents an infinite loop where the programmatic setting of the slider causes another update for this function
self.alphaEntry['state'] = 'normal'
self.alphaSlider.state(['!disabled'])
currentAlphaLevel = self.currentRGBA[-1]
if not alphaSupported: # These formats do not support alpha; max the alpha channel display and disable the widgets
self.alphaEntry.delete( 0, 'end' )
self.alphaEntry.insert( 0, '255' )
self.alphaSlider.set( 255 )
self.alphaEntry['state'] = 'disabled'
self.alphaSlider.state(['disabled'])
elif setAlphaEntry: # Prevents moving the cursor position if the user is typing into this field
self.alphaEntry.delete( 0, 'end' )
self.alphaEntry.insert( 0, str(currentAlphaLevel) ) #.lstrip('0')
self.alphaSlider.set( currentAlphaLevel )
else: self.alphaSlider.set( currentAlphaLevel ) # User entered a value into the alphaEntry; don't modify that
# Set the RGBA fields
if alphaSupported:
self.rgbaStringVar.set( ', '.join([ str(channel) for channel in self.currentRGBA ]) )
self.hexColorStringVar.set( self.currentHexColor )
else:
self.rgbaStringVar.set( ', '.join([ str(channel) for channel in self.currentRGBA[:-1] ]) )
self.hexColorStringVar.set( self.currentHexColor[:-2] )
# Set the TPL Entry field
self.tplHex = tplEncoder.encodeColor( currentTplFormat, self.currentRGBA )
if currentTplFormat < 6:
self.tplFormatStringVar.set( self.tplHex.upper() )
elif currentTplFormat == 6: # In this case, the value will actually be a tuple of the color parts
self.tplFormatStringVar.set( self.tplHex[0].upper() + ' | ' + self.tplHex[1].upper() )
else: self.tplFormatStringVar.set( 'N/A' )
if 'Palette' in self.title and updateImage:
# Validate the encoded color
if len( self.tplHex ) != 4 or not validHex( self.tplHex ):
msg( 'The newly generated color was not two bytes!' )
else:
self.updateTexture( self.tplHex )
def pickRGB( self, event ):
try: rgbValues, hexColor = askcolor( initialcolor='#'+self.currentHexColor[:-2], parent=self.window )
except: rgbValues, hexColor = '', ''
if rgbValues:
# Get the current alpha value, and combine it with the colors chosen above.
currentAlphaLevel = int( round(self.alphaSlider.get()) )
self.currentRGBA = ( rgbValues[0], rgbValues[1], rgbValues[2], currentAlphaLevel )
self.currentHexColor = hexColor.replace('#', '').upper() + "{0:0{1}X}".format( currentAlphaLevel, 2 )
self.updateColorDisplays()
def alphaUpdated( self, event ):
if self.preventNextSliderCallback:
self.preventNextSliderCallback = False
return
else:
setAlphaEntry = True
if isinstance( event, str ): newAlphaValue = int( float(event) ) # Means
else:
newAlphaValue = int( round(float( event.widget.get() )) )
setAlphaEntry = False
self.currentRGBA = self.currentRGBA[:-1] + ( newAlphaValue, )
self.currentHexColor = self.currentHexColor[:-2] + "{0:0{1}X}".format( newAlphaValue, 2 )
self.updateColorDisplays( setAlphaEntry=setAlphaEntry )
def rgbaEntryUpdated( self, event ):
# Parse and validate the input
channels = event.widget.get().split(',')
channelsList = []
parsingError = False
for channelValue in channels:
try:
newInt = int( float(channelValue) )
if newInt > -1 and newInt < 256: channelsList.append( newInt )
except:
parsingError = True
break
else: # Got through the above loop with no break. Still got one more check.
if len( channelsList ) != 4:
parsingError = True
if parsingError:
if event.keysym == 'Return': # User hit the "Enter" key in a confused attempt to force an update
msg( 'The input should be in the form, "r, g, b, a", where each value is within the range of 0 - 255.', 'Invalid input or formatting.' )
else: # Everything checks out, update the color and GUI
self.currentRGBA = tuple( channelsList )
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def hexEntryUpdated( self, event ):
# Parse and validate the input
inputStr = event.widget.get()
channelsList, parsingError = hex2rgb( inputStr )
if parsingError:
if event.keysym == 'Return': # User hit the "Enter" key in a confused attempt to force an update
msg( 'The input should be in the form, "RRGGBBAA", where each value is within the hexadecimal range of 00 - FF.', 'Invalid input or formatting.' )
else: # Everything checks out, update the color and GUI
self.currentRGBA = tuple( channelsList )
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def tplEntryUpdated( self, event ):
tplHex = self.tplFormatStringVar.get().replace('0x', '').replace('|', '')
nibbleCount = { 0:1, 1:2, 2:2, 3:4, 4:4, 5:4, 6:8, 8:1, 9:2, 10:4, 14:1 } # How many characters should be present in the string
currentTplFormat = int( self.tplFormat.get().split()[0][1:] )
if len( tplHex ) == nibbleCount[currentTplFormat] and validHex( tplHex ):
self.currentRGBA = tplDecoder.decodeColor( currentTplFormat, tplHex )
self.currentHexColor = ''.join( [ "{0:0{1}X}".format( channel, 2 ) for channel in self.currentRGBA ] )
self.updateColorDisplays()
def restoreColor( self, | |
<reponame>Aorjoa/aiyara-ceph-dash<filename>.tox/py27/lib/python2.7/site-packages/ceph_argparse.py
"""
Types and routines used by the ceph CLI as well as the RESTful
interface. These have to do with querying the daemons for
command-description information, validating user command input against
those descriptions, and submitting the command to the appropriate
daemon.
Copyright (C) 2013 Inktank Storage, Inc.
LGPL2. See file COPYING.
"""
import copy
import json
import os
import pprint
import re
import socket
import stat
import sys
import types
import uuid
class ArgumentError(Exception):
"""
Something wrong with arguments
"""
pass
class ArgumentNumber(ArgumentError):
"""
Wrong number of a repeated argument
"""
pass
class ArgumentFormat(ArgumentError):
"""
Argument value has wrong format
"""
pass
class ArgumentValid(ArgumentError):
"""
Argument value is otherwise invalid (doesn't match choices, for instance)
"""
pass
class ArgumentTooFew(ArgumentError):
"""
Fewer arguments than descriptors in signature; may mean to continue
the search, so gets a special exception type
"""
class ArgumentPrefix(ArgumentError):
"""
Special for mismatched prefix; less severe, don't report by default
"""
pass
class JsonFormat(Exception):
"""
some syntactic or semantic issue with the JSON
"""
pass
class CephArgtype(object):
"""
Base class for all Ceph argument types
Instantiating an object sets any validation parameters
(allowable strings, numeric ranges, etc.). The 'valid'
method validates a string against that initialized instance,
throwing ArgumentError if there's a problem.
"""
def __init__(self, **kwargs):
"""
set any per-instance validation parameters here
from kwargs (fixed string sets, integer ranges, etc)
"""
pass
def valid(self, s, partial=False):
"""
Run validation against given string s (generally one word);
partial means to accept partial string matches (begins-with).
If cool, set self.val to the value that should be returned
(a copy of the input string, or a numeric or boolean interpretation
thereof, for example)
if not, throw ArgumentError(msg-as-to-why)
"""
self.val = s
def __repr__(self):
"""
return string representation of description of type. Note,
this is not a representation of the actual value. Subclasses
probably also override __str__() to give a more user-friendly
'name/type' description for use in command format help messages.
"""
a = ''
if hasattr(self, 'typeargs'):
a = self.typeargs
return '{0}(\'{1}\')'.format(self.__class__.__name__, a)
def __str__(self):
"""
where __repr__ (ideally) returns a string that could be used to
reproduce the object, __str__ returns one you'd like to see in
print messages. Use __str__ to format the argtype descriptor
as it would be useful in a command usage message.
"""
return '<{0}>'.format(self.__class__.__name__)
class CephInt(CephArgtype):
"""
range-limited integers, [+|-][0-9]+ or 0x[0-9a-f]+
range: list of 1 or 2 ints, [min] or [min,max]
"""
def __init__(self, range=''):
if range == '':
self.range = list()
else:
self.range = list(range.split('|'))
self.range = map(long, self.range)
def valid(self, s, partial=False):
try:
val = long(s)
except ValueError:
raise ArgumentValid("{0} doesn't represent an int".format(s))
if len(self.range) == 2:
if val < self.range[0] or val > self.range[1]:
raise ArgumentValid("{0} not in range {1}".format(val, self.range))
elif len(self.range) == 1:
if val < self.range[0]:
raise ArgumentValid("{0} not in range {1}".format(val, self.range))
self.val = val
def __str__(self):
r = ''
if len(self.range) == 1:
r = '[{0}-]'.format(self.range[0])
if len(self.range) == 2:
r = '[{0}-{1}]'.format(self.range[0], self.range[1])
return '<int{0}>'.format(r)
class CephFloat(CephArgtype):
"""
range-limited float type
range: list of 1 or 2 floats, [min] or [min, max]
"""
def __init__(self, range=''):
if range == '':
self.range = list()
else:
self.range = list(range.split('|'))
self.range = map(float, self.range)
def valid(self, s, partial=False):
try:
val = float(s)
except ValueError:
raise ArgumentValid("{0} doesn't represent a float".format(s))
if len(self.range) == 2:
if val < self.range[0] or val > self.range[1]:
raise ArgumentValid("{0} not in range {1}".format(val, self.range))
elif len(self.range) == 1:
if val < self.range[0]:
raise ArgumentValid("{0} not in range {1}".format(val, self.range))
self.val = val
def __str__(self):
r = ''
if len(self.range) == 1:
r = '[{0}-]'.format(self.range[0])
if len(self.range) == 2:
r = '[{0}-{1}]'.format(self.range[0], self.range[1])
return '<float{0}>'.format(r)
class CephString(CephArgtype):
"""
String; pretty generic. goodchars is a RE char class of valid chars
"""
def __init__(self, goodchars=''):
from string import printable
try:
re.compile(goodchars)
except:
raise ValueError('CephString(): "{0}" is not a valid RE'.\
format(goodchars))
self.goodchars = goodchars
self.goodset = frozenset(
[c for c in printable if re.match(goodchars, c)]
)
def valid(self, s, partial=False):
sset = set(s)
if self.goodset and not sset <= self.goodset:
raise ArgumentFormat("invalid chars {0} in {1}".\
format(''.join(sset - self.goodset), s))
self.val = s
def __str__(self):
b = ''
if self.goodchars:
b += '(goodchars {0})'.format(self.goodchars)
return '<string{0}>'.format(b)
class CephSocketpath(CephArgtype):
"""
Admin socket path; check that it's readable and S_ISSOCK
"""
def valid(self, s, partial=False):
mode = os.stat(s).st_mode
if not stat.S_ISSOCK(mode):
raise ArgumentValid('socket path {0} is not a socket'.format(s))
self.val = s
def __str__(self):
return '<admin-socket-path>'
class CephIPAddr(CephArgtype):
"""
IP address (v4 or v6) with optional port
"""
def valid(self, s, partial=False):
# parse off port, use socket to validate addr
type = 6
if s.startswith('['):
type = 6
elif s.find('.') != -1:
type = 4
if type == 4:
port = s.find(':')
if (port != -1):
a = s[:port]
p = s[port+1:]
if int(p) > 65535:
raise ArgumentValid('{0}: invalid IPv4 port'.format(p))
else:
a = s
p = None
try:
socket.inet_pton(socket.AF_INET, a)
except:
raise ArgumentValid('{0}: invalid IPv4 address'.format(a))
else:
# v6
if s.startswith('['):
end = s.find(']')
if end == -1:
raise ArgumentFormat('{0} missing terminating ]'.format(s))
if s[end+1] == ':':
try:
p = int(s[end+2])
except:
raise ArgumentValid('{0}: bad port number'.format(s))
a = s[1:end]
else:
a = s
p = None
try:
socket.inet_pton(socket.AF_INET6, a)
except:
raise ArgumentValid('{0} not valid IPv6 address'.format(s))
if p is not None and long(p) > 65535:
raise ArgumentValid("{0} not a valid port number".format(p))
self.val = s
self.addr = a
self.port = p
def __str__(self):
return '<IPaddr[:port]>'
class CephEntityAddr(CephIPAddr):
"""
EntityAddress, that is, IP address[/nonce]
"""
def valid(self, s, partial=False):
nonce = None
if '/' in s:
ip, nonce = s.split('/')
else:
ip = s
super(self.__class__, self).valid(ip)
if nonce:
nonce_long = None
try:
nonce_long = long(nonce)
except ValueError:
pass
if nonce_long is None or nonce_long < 0:
raise ArgumentValid(
'{0}: invalid entity, nonce {1} not integer > 0'.\
format(s, nonce)
)
self.val = s
def __str__(self):
return '<EntityAddr>'
class CephPoolname(CephArgtype):
"""
Pool name; very little utility
"""
def __str__(self):
return '<poolname>'
class CephObjectname(CephArgtype):
"""
Object name. Maybe should be combined with Pool name as they're always
present in pairs, and then could be checked for presence
"""
def __str__(self):
return '<objectname>'
class CephPgid(CephArgtype):
"""
pgid, in form N.xxx (N = pool number, xxx = hex pgnum)
"""
def valid(self, s, partial=False):
if s.find('.') == -1:
raise ArgumentFormat('pgid has no .')
poolid, pgnum = s.split('.')
if poolid < 0:
raise ArgumentFormat('pool {0} < 0'.format(poolid))
try:
pgnum = int(pgnum, 16)
except:
raise ArgumentFormat('pgnum {0} not hex integer'.format(pgnum))
self.val = s
def __str__(self):
return '<pgid>'
class CephName(CephArgtype):
"""
Name (type.id) where:
type is osd|mon|client|mds
id is a base10 int, if type == osd, or a string otherwise
Also accept '*'
"""
def __init__(self):
self.nametype = None
self.nameid = None
def valid(self, s, partial=False):
if s == '*':
self.val = s
return
if s.find('.') == -1:
raise ArgumentFormat('CephName: no . in {0}'.format(s))
else:
t, i = s.split('.')
if not t in ('osd', 'mon', 'client', 'mds'):
raise ArgumentValid('unknown type ' + t)
if t == 'osd':
if i != '*':
try:
i = int(i)
except:
raise ArgumentFormat('osd id ' + i + ' not integer')
self.nametype = t
self.val = s
self.nameid = i
def __str__(self):
return '<name (type.id)>'
class CephOsdName(CephArgtype):
"""
Like CephName, but specific to osds: allow <id> alone
osd.<id>, or <id>, or *, where id is a base10 int
"""
def __init__(self):
self.nametype = None
self.nameid = None
def valid(self, s, partial=False):
if s == '*':
self.val = s
return
if s.find('.') != -1:
t, i = s.split('.')
if t != 'osd':
raise ArgumentValid('unknown type ' + t)
else:
t = 'osd'
i = s
try:
i = int(i)
except:
raise ArgumentFormat('osd id ' + i + ' not integer')
self.nametype = t
self.nameid = i
self.val = i
def __str__(self):
return '<osdname (id|osd.id)>'
class CephChoices(CephArgtype):
"""
Set of string literals; init with valid choices
"""
def __init__(self, strings='', **kwargs):
self.strings = strings.split('|')
def valid(self, s, partial=False):
if not partial:
if not s in self.strings:
# show as __str__ does: {s1|s2..}
raise ArgumentValid("{0} | |
content type EMPTY
class Port_ (PortMapTarget_):
"""Complex type {avm}Port with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Port')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 175, 2)
_ElementMap = PortMapTarget_._ElementMap.copy()
_AttributeMap = PortMapTarget_._AttributeMap.copy()
# Base type is PortMapTarget_
# Attribute Notes uses Python identifier Notes
__Notes = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Notes'), 'Notes', '__avm_Port__Notes', pyxb.binding.datatypes.string)
__Notes._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 178, 8)
__Notes._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 178, 8)
Notes = property(__Notes.value, __Notes.set, None, None)
# Attribute XPosition uses Python identifier XPosition
__XPosition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'XPosition'), 'XPosition', '__avm_Port__XPosition', pyxb.binding.datatypes.unsignedInt)
__XPosition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 179, 8)
__XPosition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 179, 8)
XPosition = property(__XPosition.value, __XPosition.set, None, None)
# Attribute Definition uses Python identifier Definition
__Definition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Definition'), 'Definition', '__avm_Port__Definition', pyxb.binding.datatypes.anyURI)
__Definition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 180, 8)
__Definition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 180, 8)
Definition = property(__Definition.value, __Definition.set, None, None)
# Attribute YPosition uses Python identifier YPosition
__YPosition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'YPosition'), 'YPosition', '__avm_Port__YPosition', pyxb.binding.datatypes.unsignedInt)
__YPosition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 181, 8)
__YPosition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 181, 8)
YPosition = property(__YPosition.value, __YPosition.set, None, None)
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_Port__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 182, 8)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 182, 8)
Name = property(__Name.value, __Name.set, None, None)
# Attribute ID inherited from {avm}PortMapTarget
# Attribute PortMap inherited from {avm}PortMapTarget
_ElementMap.update({
})
_AttributeMap.update({
__Notes.name() : __Notes,
__XPosition.name() : __XPosition,
__Definition.name() : __Definition,
__YPosition.name() : __YPosition,
__Name.name() : __Name
})
Namespace.addCategoryObject('typeBinding', u'Port', Port_)
# Complex type {avm}NormalDistribution with content type ELEMENT_ONLY
class NormalDistribution_ (ProbabilisticValue_):
"""Complex type {avm}NormalDistribution with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'NormalDistribution')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 214, 2)
_ElementMap = ProbabilisticValue_._ElementMap.copy()
_AttributeMap = ProbabilisticValue_._AttributeMap.copy()
# Base type is ProbabilisticValue_
# Element Mean uses Python identifier Mean
__Mean = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Mean'), 'Mean', '__avm_NormalDistribution__Mean', False, pyxb.utils.utility.Location(u'avm.xsd', 218, 10), )
Mean = property(__Mean.value, __Mean.set, None, None)
# Element StandardDeviation uses Python identifier StandardDeviation
__StandardDeviation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'StandardDeviation'), 'StandardDeviation', '__avm_NormalDistribution__StandardDeviation', False, pyxb.utils.utility.Location(u'avm.xsd', 219, 10), )
StandardDeviation = property(__StandardDeviation.value, __StandardDeviation.set, None, None)
_ElementMap.update({
__Mean.name() : __Mean,
__StandardDeviation.name() : __StandardDeviation
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'NormalDistribution', NormalDistribution_)
# Complex type {avm}UniformDistribution with content type EMPTY
class UniformDistribution_ (ProbabilisticValue_):
"""Complex type {avm}UniformDistribution with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'UniformDistribution')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 276, 2)
_ElementMap = ProbabilisticValue_._ElementMap.copy()
_AttributeMap = ProbabilisticValue_._AttributeMap.copy()
# Base type is ProbabilisticValue_
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'UniformDistribution', UniformDistribution_)
# Complex type {avm}Optional with content type ELEMENT_ONLY
class Optional_ (DesignSpaceContainer_):
"""Complex type {avm}Optional with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Optional')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 361, 2)
_ElementMap = DesignSpaceContainer_._ElementMap.copy()
_AttributeMap = DesignSpaceContainer_._AttributeMap.copy()
# Base type is DesignSpaceContainer_
# Element Container (Container) inherited from {avm}Container
# Element Property (Property) inherited from {avm}Container
# Element ComponentInstance (ComponentInstance) inherited from {avm}Container
# Element Port (Port) inherited from {avm}Container
# Element Connector (Connector) inherited from {avm}Container
# Element JoinData (JoinData) inherited from {avm}Container
# Element Formula (Formula) inherited from {avm}Container
# Element ContainerFeature (ContainerFeature) inherited from {avm}Container
# Element ResourceDependency (ResourceDependency) inherited from {avm}Container
# Element DomainModel (DomainModel) inherited from {avm}Container
# Element Resource (Resource) inherited from {avm}Container
# Element Classifications (Classifications) inherited from {avm}Container
# Attribute XPosition inherited from {avm}Container
# Attribute Name inherited from {avm}Container
# Attribute YPosition inherited from {avm}Container
# Attribute ID inherited from {avm}Container
# Attribute Description inherited from {avm}Container
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'Optional', Optional_)
# Complex type {avm}Alternative with content type ELEMENT_ONLY
class Alternative_ (DesignSpaceContainer_):
"""Complex type {avm}Alternative with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Alternative')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 366, 2)
_ElementMap = DesignSpaceContainer_._ElementMap.copy()
_AttributeMap = DesignSpaceContainer_._AttributeMap.copy()
# Base type is DesignSpaceContainer_
# Element Container (Container) inherited from {avm}Container
# Element Property (Property) inherited from {avm}Container
# Element ComponentInstance (ComponentInstance) inherited from {avm}Container
# Element Port (Port) inherited from {avm}Container
# Element Connector (Connector) inherited from {avm}Container
# Element JoinData (JoinData) inherited from {avm}Container
# Element Formula (Formula) inherited from {avm}Container
# Element ContainerFeature (ContainerFeature) inherited from {avm}Container
# Element ResourceDependency (ResourceDependency) inherited from {avm}Container
# Element DomainModel (DomainModel) inherited from {avm}Container
# Element Resource (Resource) inherited from {avm}Container
# Element Classifications (Classifications) inherited from {avm}Container
# Element ValueFlowMux uses Python identifier ValueFlowMux
__ValueFlowMux = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ValueFlowMux'), 'ValueFlowMux', '__avm_Alternative__ValueFlowMux', True, pyxb.utils.utility.Location(u'avm.xsd', 370, 10), )
ValueFlowMux = property(__ValueFlowMux.value, __ValueFlowMux.set, None, None)
# Attribute XPosition inherited from {avm}Container
# Attribute Name inherited from {avm}Container
# Attribute YPosition inherited from {avm}Container
# Attribute ID inherited from {avm}Container
# Attribute Description inherited from {avm}Container
_ElementMap.update({
__ValueFlowMux.name() : __ValueFlowMux
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'Alternative', Alternative_)
# Complex type {avm}ComponentPortInstance with content type EMPTY
class ComponentPortInstance_ (PortMapTarget_):
"""Complex type {avm}ComponentPortInstance with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ComponentPortInstance')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 388, 2)
_ElementMap = PortMapTarget_._ElementMap.copy()
_AttributeMap = PortMapTarget_._AttributeMap.copy()
# Base type is PortMapTarget_
# Attribute IDinComponentModel uses Python identifier IDinComponentModel
__IDinComponentModel = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'IDinComponentModel'), 'IDinComponentModel', '__avm_ComponentPortInstance__IDinComponentModel', pyxb.binding.datatypes.string, required=True)
__IDinComponentModel._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 391, 8)
__IDinComponentModel._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 391, 8)
IDinComponentModel = property(__IDinComponentModel.value, __IDinComponentModel.set, None, None)
# Attribute ID inherited from {avm}PortMapTarget
# Attribute PortMap inherited from {avm}PortMapTarget
_ElementMap.update({
})
_AttributeMap.update({
__IDinComponentModel.name() : __IDinComponentModel
})
Namespace.addCategoryObject('typeBinding', u'ComponentPortInstance', ComponentPortInstance_)
# Complex type {avm}ComponentConnectorInstance with content type EMPTY
class ComponentConnectorInstance_ (ConnectorCompositionTarget_):
"""Complex type {avm}ComponentConnectorInstance with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ComponentConnectorInstance')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 414, 2)
_ElementMap = ConnectorCompositionTarget_._ElementMap.copy()
_AttributeMap = ConnectorCompositionTarget_._AttributeMap.copy()
# Base type is ConnectorCompositionTarget_
# Attribute IDinComponentModel uses Python identifier IDinComponentModel
__IDinComponentModel = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'IDinComponentModel'), 'IDinComponentModel', '__avm_ComponentConnectorInstance__IDinComponentModel', pyxb.binding.datatypes.string, required=True)
__IDinComponentModel._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 417, 8)
__IDinComponentModel._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 417, 8)
IDinComponentModel = property(__IDinComponentModel.value, __IDinComponentModel.set, None, None)
# Attribute ConnectorComposition inherited from {avm}ConnectorCompositionTarget
# Attribute ID inherited from {avm}ConnectorCompositionTarget
# Attribute ApplyJoinData inherited from {avm}ConnectorCompositionTarget
_ElementMap.update({
})
_AttributeMap.update({
__IDinComponentModel.name() : __IDinComponentModel
})
Namespace.addCategoryObject('typeBinding', u'ComponentConnectorInstance', ComponentConnectorInstance_)
# Complex type {avm}SimpleFormula with content type EMPTY
class SimpleFormula_ (Formula_):
"""Complex type {avm}SimpleFormula with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'SimpleFormula')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 443, 2)
_ElementMap = Formula_._ElementMap.copy()
_AttributeMap = Formula_._AttributeMap.copy()
# Base type is Formula_
# Attribute Name inherited from {avm}Formula
# Attribute XPosition inherited from {avm}Formula
# Attribute YPosition inherited from {avm}Formula
# Attribute Operation uses Python identifier Operation
__Operation = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Operation'), 'Operation', '__avm_SimpleFormula__Operation', SimpleFormulaOperation)
__Operation._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 446, 8)
__Operation._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 446, 8)
Operation = property(__Operation.value, __Operation.set, None, None)
# Attribute Operand uses Python identifier Operand
__Operand = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Operand'), 'Operand', '__avm_SimpleFormula__Operand', STD_ANON_4, required=True)
__Operand._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 447, 8)
__Operand._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 447, 8)
Operand = property(__Operand.value, __Operand.set, None, None)
# Attribute ID inherited from {avm}ValueNode
_ElementMap.update({
})
_AttributeMap.update({
__Operation.name() : __Operation,
__Operand.name() : __Operand
})
Namespace.addCategoryObject('typeBinding', u'SimpleFormula', SimpleFormula_)
# Complex type {avm}ComplexFormula with content type ELEMENT_ONLY
class ComplexFormula_ (Formula_):
"""Complex type {avm}ComplexFormula with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ComplexFormula')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 468, 2)
_ElementMap = Formula_._ElementMap.copy()
_AttributeMap = Formula_._AttributeMap.copy()
# Base type is Formula_
# Element Operand uses Python identifier Operand
__Operand = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Operand'), 'Operand', '__avm_ComplexFormula__Operand', True, pyxb.utils.utility.Location(u'avm.xsd', 472, 10), )
Operand = property(__Operand.value, __Operand.set, None, None)
# Attribute Name inherited from {avm}Formula
# Attribute XPosition inherited from {avm}Formula
# Attribute YPosition inherited from {avm}Formula
# Attribute ID inherited from {avm}ValueNode
# Attribute Expression uses Python identifier Expression
__Expression = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Expression'), 'Expression', '__avm_ComplexFormula__Expression', pyxb.binding.datatypes.string, required=True)
__Expression._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 474, 8)
__Expression._UseLocation = | |
length of each dash and the gap length between
the dashes.
"""
self._cmd.distance(
name,
self._into_selection(selection1),
self._into_selection(selection2),
cutoff,
mode,
label=int(show_label),
width=width,
length=length,
gap=gap
)
@validate
def dss(self, selection=None, state=None):
"""
Determine the secondary structure of the selected atoms.
This method is a thin wrapper around the *PyMOL* ``dss()``
command.
Parameters
----------
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
state : int, optional
The state to apply the command on.
By default, the command is applied on all states of this
*PyMOL* object.
"""
state = 0 if state is None else state
self._cmd.dss(self._into_selection(selection), state)
@validate
def enable(self, selection=None):
"""
Turn on display of the selected atoms.
This method is a thin wrapper around the *PyMOL* ``enable()``
command.
Parameters
----------
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
"""
self._cmd.enable(self._into_selection(selection))
@validate
def hide(self, representation, selection=None):
"""
Turn off an atom representation (e.g. sticks, spheres, etc.).
This method is a thin wrapper around the *PyMOL* ``hide()``
command.
Parameters
----------
representation : str
One of
- ``'lines'``,
- ``'spheres'``,
- ``'mesh'``,
- ``'ribbon'``,
- ``'cartoon'``,
- ``'sticks'``,
- ``'dots'``,
- ``'surface'``,
- ``'label'``,
- ``'extent'``,
- ``'nonbonded'``,
- ``'nb_spheres'``,
- ``'slice'`` or
- ``'cell'``.
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
"""
self._cmd.hide(representation, self._into_selection(selection))
@validate
def indicate(self, selection=None):
"""
Show a visual representation of the selected atoms.
This method is a thin wrapper around the *PyMOL* ``indicate()``
command.
Parameters
----------
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
"""
self._cmd.indicate(self._into_selection(selection))
@validate
def label(self, selection, text):
"""
Label the selected atoms.
This method is a thin wrapper around the *PyMOL* ``label()``
command.
Parameters
----------
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
text : str
The label text.
"""
self._cmd.label(self._into_selection(selection), f'"{text}"')
@validate
def orient(self, selection=None, state=None):
"""
Align the principal components of the selected atoms with the
*xyz* axes.
This method is a thin wrapper around the *PyMOL* ``orient()``
command.
Parameters
----------
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
state : int, optional
The state to apply the command on.
By default, the command is applied on all states of this
*PyMOL* object.
"""
state = 0 if state is None else state
self._cmd.orient(self._into_selection(selection, True), state)
@validate
def origin(self, selection=None, state=None):
"""
Set the center of rotation about the selected atoms.
This method is a thin wrapper around the *PyMOL* ``origin()``
command.
Parameters
----------
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
state : int, optional
The state to apply the command on.
By default, the command is applied on all states of this
*PyMOL* object.
"""
state = 0 if state is None else state
self._cmd.origin(
selection=self._into_selection(selection), state=state
)
@validate
def select(self, name, selection=None):
"""
Create a named selection object from the selected atoms.
This method is a thin wrapper around the *PyMOL* ``select()``
command.
Parameters
----------
name : str
Name of the selection object to create.
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
"""
self._cmd.select(name, self._into_selection(selection))
@validate
def set(self, name, value, selection=None, state=None):
"""
Change per-atom settings.
This method is a thin wrapper around the *PyMOL* ``set()``
command.
Parameters
----------
name : str
The name of the setting to be changed.
One of
- ``'sphere_color'``,
- ``'surface_color'``,
- ``'mesh_color'``,
- ``'label_color'``,
- ``'dot_color'``,
- ``'cartoon_color'``,
- ``'ribbon_color'``,
- ``'transparency'`` (for surfaces) or
- ``'sphere_transparency'``.
value : object
The new value for the given setting name.
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, the command is applied on all atoms of this
*PyMOL* object.
state : int, optional
The state to apply the command on.
By default, the command is applied on all states of this
*PyMOL* object.
"""
state = 0 if state is None else state
self._cmd.set(name, value, self._into_selection(selection), state)
@validate
def set_bond(self, name, value, selection1=None, selection2=None,
state=None):
"""
Change per-bond settings for all bonds which exist
between two atom selections.
This method is a thin wrapper around the *PyMOL* ``set_bond()``
command.
Parameters
----------
name : str
The name of the setting to be changed.
One of
- ``'valence'``,
- ``'line_width'``,
- ``'line_color'``,
- ``'stick_radius'``,
- ``'stick_color'`` or
- ``'stick_transparency'``.
value : object
The new value for the given setting name.
selection1, selection2 : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, or a *PyMOL* selection expression that
selects the atoms of this *PyMOL* object to apply the
command on.
By default, `selection1` applies to all atoms of this
*PyMOL* object and `selection2` applies to the same atoms as
`selection1`.
state : int, optional
The state to apply the command on.
By default, the command is applied on all states of this
*PyMOL* object.
"""
state = 0 if state is None else state
selection2 = selection1 if selection2 is None else selection2
self._cmd.set_bond(
name, value,
self._into_selection(selection1), self._into_selection(selection2),
state
)
@validate
def show(self, representation, selection=None):
"""
Turn on an atom representation (e.g. sticks, spheres, etc.).
This method is a thin wrapper around the *PyMOL* ``show()``
command.
Parameters
----------
representation : str
One of
- ``'lines'``,
- ``'spheres'``,
- ``'mesh'``,
- ``'ribbon'``,
- ``'cartoon'``,
- ``'sticks'``,
- ``'dots'``,
- ``'surface'``,
- ``'label'``,
- ``'extent'``,
- ``'nonbonded'``,
- ``'nb_spheres'``,
- ``'slice'`` or
- ``'cell'``.
selection : str or int or slice or ndarray, dtype=bool or ndarray, dtype=int, optional
A *Biotite* compatible atom selection index,
e.g. a boolean mask, | |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class Test__LocalStack(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud._helpers import _LocalStack
return _LocalStack
def _make_one(self):
return self._get_target_class()()
def test_it(self):
batch1, batch2 = object(), object()
batches = self._make_one()
self.assertEqual(list(batches), [])
self.assertIsNone(batches.top)
batches.push(batch1)
self.assertIs(batches.top, batch1)
batches.push(batch2)
self.assertIs(batches.top, batch2)
popped = batches.pop()
self.assertIs(popped, batch2)
self.assertIs(batches.top, batch1)
self.assertEqual(list(batches), [batch1])
popped = batches.pop()
self.assertIsNone(batches.top)
self.assertEqual(list(batches), [])
class Test__UTC(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud._helpers import _UTC
return _UTC
def _make_one(self):
return self._get_target_class()()
def test_module_property(self):
from google.cloud import _helpers as MUT
klass = self._get_target_class()
try:
import pytz
except ImportError:
self.assertIsInstance(MUT.UTC, klass)
else:
self.assertIs(MUT.UTC, pytz.UTC) # pragma: NO COVER
def test_dst(self):
import datetime
tz = self._make_one()
self.assertEqual(tz.dst(None), datetime.timedelta(0))
def test_fromutc(self):
import datetime
naive_epoch = datetime.datetime.utcfromtimestamp(0)
self.assertIsNone(naive_epoch.tzinfo)
tz = self._make_one()
epoch = tz.fromutc(naive_epoch)
self.assertEqual(epoch.tzinfo, tz)
def test_tzname(self):
tz = self._make_one()
self.assertEqual(tz.tzname(None), 'UTC')
def test_utcoffset(self):
import datetime
tz = self._make_one()
self.assertEqual(tz.utcoffset(None), datetime.timedelta(0))
def test___repr__(self):
tz = self._make_one()
self.assertEqual(repr(tz), '<UTC>')
def test___str__(self):
tz = self._make_one()
self.assertEqual(str(tz), 'UTC')
class Test__ensure_tuple_or_list(unittest.TestCase):
def _call_fut(self, arg_name, tuple_or_list):
from google.cloud._helpers import _ensure_tuple_or_list
return _ensure_tuple_or_list(arg_name, tuple_or_list)
def test_valid_tuple(self):
valid_tuple_or_list = ('a', 'b', 'c', 'd')
result = self._call_fut('ARGNAME', valid_tuple_or_list)
self.assertEqual(result, ['a', 'b', 'c', 'd'])
def test_valid_list(self):
valid_tuple_or_list = ['a', 'b', 'c', 'd']
result = self._call_fut('ARGNAME', valid_tuple_or_list)
self.assertEqual(result, valid_tuple_or_list)
def test_invalid(self):
invalid_tuple_or_list = object()
with self.assertRaises(TypeError):
self._call_fut('ARGNAME', invalid_tuple_or_list)
def test_invalid_iterable(self):
invalid_tuple_or_list = 'FOO'
with self.assertRaises(TypeError):
self._call_fut('ARGNAME', invalid_tuple_or_list)
class Test__determine_default_project(unittest.TestCase):
def _call_fut(self, project=None):
from google.cloud._helpers import _determine_default_project
return _determine_default_project(project=project)
def test_it(self):
with mock.patch('google.auth.default', autospec=True) as default:
default.return_value = (
mock.sentinel.credentials, mock.sentinel.project)
project = self._call_fut()
self.assertEqual(project, mock.sentinel.project)
default.assert_called_once_with()
def test_explicit(self):
with mock.patch('google.auth.default', autospec=True) as default:
project = self._call_fut(mock.sentinel.project)
self.assertEqual(project, mock.sentinel.project)
self.assertFalse(default.called)
class Test__millis(unittest.TestCase):
def _call_fut(self, value):
from google.cloud._helpers import _millis
return _millis(value)
def test_one_second_from_epoch(self):
import datetime
from google.cloud._helpers import UTC
WHEN = datetime.datetime(1970, 1, 1, 0, 0, 1, tzinfo=UTC)
self.assertEqual(self._call_fut(WHEN), 1000)
class Test__microseconds_from_datetime(unittest.TestCase):
def _call_fut(self, value):
from google.cloud._helpers import _microseconds_from_datetime
return _microseconds_from_datetime(value)
def test_it(self):
import datetime
microseconds = 314159
timestamp = datetime.datetime(1970, 1, 1, hour=0,
minute=0, second=0,
microsecond=microseconds)
result = self._call_fut(timestamp)
self.assertEqual(result, microseconds)
class Test__millis_from_datetime(unittest.TestCase):
def _call_fut(self, value):
from google.cloud._helpers import _millis_from_datetime
return _millis_from_datetime(value)
def test_w_none(self):
self.assertIsNone(self._call_fut(None))
def test_w_utc_datetime(self):
import datetime
import six
from google.cloud._helpers import UTC
from google.cloud._helpers import _microseconds_from_datetime
NOW = datetime.datetime.utcnow().replace(tzinfo=UTC)
NOW_MICROS = _microseconds_from_datetime(NOW)
MILLIS = NOW_MICROS // 1000
result = self._call_fut(NOW)
self.assertIsInstance(result, six.integer_types)
self.assertEqual(result, MILLIS)
def test_w_non_utc_datetime(self):
import datetime
import six
from google.cloud._helpers import _UTC
from google.cloud._helpers import _microseconds_from_datetime
class CET(_UTC):
_tzname = 'CET'
_utcoffset = datetime.timedelta(hours=-1)
zone = CET()
NOW = datetime.datetime(2015, 7, 28, 16, 34, 47, tzinfo=zone)
NOW_MICROS = _microseconds_from_datetime(NOW)
MILLIS = NOW_MICROS // 1000
result = self._call_fut(NOW)
self.assertIsInstance(result, six.integer_types)
self.assertEqual(result, MILLIS)
def test_w_naive_datetime(self):
import datetime
import six
from google.cloud._helpers import UTC
from google.cloud._helpers import _microseconds_from_datetime
NOW = datetime.datetime.utcnow()
UTC_NOW = NOW.replace(tzinfo=UTC)
UTC_NOW_MICROS = _microseconds_from_datetime(UTC_NOW)
MILLIS = UTC_NOW_MICROS // 1000
result = self._call_fut(NOW)
self.assertIsInstance(result, six.integer_types)
self.assertEqual(result, MILLIS)
class Test__datetime_from_microseconds(unittest.TestCase):
def _call_fut(self, value):
from google.cloud._helpers import _datetime_from_microseconds
return _datetime_from_microseconds(value)
def test_it(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _microseconds_from_datetime
NOW = datetime.datetime(2015, 7, 29, 17, 45, 21, 123456,
tzinfo=UTC)
NOW_MICROS = _microseconds_from_datetime(NOW)
self.assertEqual(self._call_fut(NOW_MICROS), NOW)
class Test___date_from_iso8601_date(unittest.TestCase):
def _call_fut(self, value):
from google.cloud._helpers import _date_from_iso8601_date
return _date_from_iso8601_date(value)
def test_todays_date(self):
import datetime
TODAY = datetime.date.today()
self.assertEqual(self._call_fut(TODAY.strftime("%Y-%m-%d")), TODAY)
class Test___time_from_iso8601_time_naive(unittest.TestCase):
def _call_fut(self, value):
from google.cloud._helpers import _time_from_iso8601_time_naive
return _time_from_iso8601_time_naive(value)
def test_todays_date(self):
import datetime
WHEN = datetime.time(12, 9, 42)
self.assertEqual(self._call_fut(("12:09:42")), WHEN)
class Test__rfc3339_to_datetime(unittest.TestCase):
def _call_fut(self, dt_str):
from google.cloud._helpers import _rfc3339_to_datetime
return _rfc3339_to_datetime(dt_str)
def test_w_bogus_zone(self):
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456789
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dBOGUS' % (
year, month, day, hour, minute, seconds, micros)
with self.assertRaises(ValueError):
self._call_fut(dt_str)
def test_w_microseconds(self):
import datetime
from google.cloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dZ' % (
year, month, day, hour, minute, seconds, micros)
result = self._call_fut(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
self.assertEqual(result, expected_result)
def test_w_naonseconds(self):
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
nanos = 123456789
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%09dZ' % (
year, month, day, hour, minute, seconds, nanos)
with self.assertRaises(ValueError):
self._call_fut(dt_str)
class Test__rfc3339_nanos_to_datetime(unittest.TestCase):
def _call_fut(self, dt_str):
from google.cloud._helpers import _rfc3339_nanos_to_datetime
return _rfc3339_nanos_to_datetime(dt_str)
def test_w_bogus_zone(self):
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456789
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dBOGUS' % (
year, month, day, hour, minute, seconds, micros)
with self.assertRaises(ValueError):
self._call_fut(dt_str)
def test_w_truncated_nanos(self):
import datetime
from google.cloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
truncateds_and_micros = [
('12345678', 123456),
('1234567', 123456),
('123456', 123456),
('12345', 123450),
('1234', 123400),
('123', 123000),
('12', 120000),
('1', 100000),
]
for truncated, micros in truncateds_and_micros:
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%sZ' % (
year, month, day, hour, minute, seconds, truncated)
result = self._call_fut(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
self.assertEqual(result, expected_result)
def test_without_nanos(self):
import datetime
from google.cloud._helpers import UTC
year = 1988
month = 4
day = 29
hour = 12
minute = 12
seconds = 12
dt_str = '%d-%02d-%02dT%02d:%02d:%02dZ' % (
year, month, day, hour, minute, seconds)
result = self._call_fut(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, 0, UTC)
self.assertEqual(result, expected_result)
def test_w_naonseconds(self):
import datetime
from google.cloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
nanos = 123456789
micros = nanos // 1000
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%09dZ' % (
year, month, day, hour, minute, seconds, nanos)
result = self._call_fut(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
self.assertEqual(result, expected_result)
class Test__datetime_to_rfc3339(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud._helpers import _datetime_to_rfc3339
return _datetime_to_rfc3339(*args, **kwargs)
@staticmethod
def _make_timezone(offset):
from google.cloud._helpers import _UTC
class CET(_UTC):
_tzname = 'CET'
_utcoffset = offset
return CET()
def test_w_utc_datetime(self):
import datetime
from google.cloud._helpers import UTC
TIMESTAMP = datetime.datetime(2016, 4, 5, 13, 30, 0, tzinfo=UTC)
result = self._call_fut(TIMESTAMP, ignore_zone=False)
self.assertEqual(result, '2016-04-05T13:30:00.000000Z')
def test_w_non_utc_datetime(self):
import datetime
zone = self._make_timezone(offset=datetime.timedelta(hours=-1))
TIMESTAMP = datetime.datetime(2016, 4, 5, 13, 30, 0, tzinfo=zone)
result = self._call_fut(TIMESTAMP, ignore_zone=False)
self.assertEqual(result, '2016-04-05T14:30:00.000000Z')
def test_w_non_utc_datetime_and_ignore_zone(self):
import datetime
zone = self._make_timezone(offset=datetime.timedelta(hours=-1))
TIMESTAMP = datetime.datetime(2016, 4, 5, 13, 30, 0, tzinfo=zone)
result = self._call_fut(TIMESTAMP)
self.assertEqual(result, '2016-04-05T13:30:00.000000Z')
def test_w_naive_datetime(self):
import datetime
TIMESTAMP = datetime.datetime(2016, 4, 5, 13, 30, 0)
result = self._call_fut(TIMESTAMP)
self.assertEqual(result, '2016-04-05T13:30:00.000000Z')
class Test__to_bytes(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud._helpers import _to_bytes
return _to_bytes(*args, **kwargs)
def test_with_bytes(self):
value = b'bytes-val'
self.assertEqual(self._call_fut(value), value)
def test_with_unicode(self):
value = u'string-val'
encoded_value = b'string-val'
self.assertEqual(self._call_fut(value), encoded_value)
def test_unicode_non_ascii(self):
value = u'\u2013' # Long hyphen
encoded_value = b'\xe2\x80\x93'
self.assertRaises(UnicodeEncodeError, self._call_fut, value)
self.assertEqual(self._call_fut(value, encoding='utf-8'),
encoded_value)
def test_with_nonstring_type(self):
value = object()
self.assertRaises(TypeError, self._call_fut, value)
class Test__bytes_to_unicode(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud._helpers import _bytes_to_unicode
return _bytes_to_unicode(*args, **kwargs)
def test_with_bytes(self):
value = b'bytes-val'
encoded_value = 'bytes-val'
self.assertEqual(self._call_fut(value), encoded_value)
def test_with_unicode(self):
value = u'string-val'
encoded_value = 'string-val'
self.assertEqual(self._call_fut(value), encoded_value)
def test_with_nonstring_type(self):
value = object()
self.assertRaises(ValueError, self._call_fut, value)
class Test__pb_timestamp_to_datetime(unittest.TestCase):
def _call_fut(self, timestamp):
from google.cloud._helpers import _pb_timestamp_to_datetime
return _pb_timestamp_to_datetime(timestamp)
def test_it(self):
import datetime
from google.protobuf.timestamp_pb2 import Timestamp
from google.cloud._helpers import UTC
# Epoch is midnight on January 1, 1970 ...
dt_stamp = datetime.datetime(1970, month=1, day=1, hour=0,
minute=1, second=1, microsecond=1234,
tzinfo=UTC)
# ... so 1 minute and 1 second after is 61 seconds and 1234
# microseconds is 1234000 nanoseconds.
timestamp = Timestamp(seconds=61, nanos=1234000)
self.assertEqual(self._call_fut(timestamp), dt_stamp)
class Test__from_any_pb(unittest.TestCase):
def _call_fut(self, pb_type, any_pb):
from google.cloud._helpers import _from_any_pb
return _from_any_pb(pb_type, any_pb)
def test_success(self):
from google.protobuf import any_pb2
from google.type import date_pb2
in_message = date_pb2.Date(year=1990)
in_message_any = any_pb2.Any()
| |
/= retain_prob
# use nonsymbolic shape for dropout mask if possible
return tf.nn.dropout(input, keep_prob=retain_prob)
def get_output_shape_for(self, input_shape):
return input_shape
# TODO: add Conv3DLayer
class FlattenLayer(Layer):
"""
A layer that flattens its input. The leading ``outdim-1`` dimensions of
the output will have the same shape as the input. The remaining dimensions
are collapsed into the last dimension.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
outdim : int
The number of dimensions in the output.
See Also
--------
flatten : Shortcut
"""
def __init__(self, incoming, outdim=2, **kwargs):
super(FlattenLayer, self).__init__(incoming, **kwargs)
self.outdim = outdim
if outdim < 1:
raise ValueError('Dim must be >0, was %i', outdim)
def get_output_shape_for(self, input_shape):
to_flatten = input_shape[self.outdim - 1:]
if any(s is None for s in to_flatten):
flattened = None
else:
flattened = int(np.prod(to_flatten))
return input_shape[:self.outdim - 1] + (flattened, )
def get_output_for(self, input, **kwargs):
with tf.name_scope(self.name, values=[input]):
# total_entries = tf.reduce_prod(tf.shape(input))
pre_shape = tf.shape(input)[:self.outdim - 1]
to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:])
return tf.reshape(
input,
tf.concat(axis=0, values=[pre_shape,
tf.stack([to_flatten])]))
flatten = FlattenLayer # shortcut
class ReshapeLayer(Layer):
def __init__(self, incoming, shape, **kwargs):
super(ReshapeLayer, self).__init__(incoming, **kwargs)
shape = tuple(shape)
for s in shape:
if isinstance(s, int):
if s == 0 or s < -1:
raise ValueError("`shape` integers must be positive or -1")
elif isinstance(s, list):
if len(s) != 1 or not isinstance(s[0], int) or s[0] < 0:
raise ValueError("`shape` input references must be "
"single-element lists of int >= 0")
elif isinstance(s, (tf.Tensor, tf.Variable)):
raise NotImplementedError
else:
raise ValueError("`shape` must be a tuple of int and/or [int]")
if sum(s == -1 for s in shape) > 1:
raise ValueError("`shape` cannot contain multiple -1")
self.shape = shape
# try computing the output shape once as a sanity check
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape, **kwargs):
# Initialize output shape from shape specification
output_shape = list(self.shape)
# First, replace all `[i]` with the corresponding input dimension, and
# mask parts of the shapes thus becoming irrelevant for -1 inference
masked_input_shape = list(input_shape)
masked_output_shape = list(output_shape)
for dim, o in enumerate(output_shape):
if isinstance(o, list):
if o[0] >= len(input_shape):
raise ValueError("specification contains [%d], but input "
"shape has %d dimensions only" %
(o[0], len(input_shape)))
output_shape[dim] = input_shape[o[0]]
masked_output_shape[dim] = input_shape[o[0]]
if (input_shape[o[0]] is None) \
and (masked_input_shape[o[0]] is None):
# first time we copied this unknown input size: mask
# it, we have a 1:1 correspondence between out[dim] and
# in[o[0]] and can ignore it for -1 inference even if
# it is unknown.
masked_input_shape[o[0]] = 1
masked_output_shape[dim] = 1
# Secondly, replace all symbolic shapes with `None`, as we cannot
# infer their size here.
for dim, o in enumerate(output_shape):
if isinstance(o, (tf.Tensor, tf.Variable)):
raise NotImplementedError
# output_shape[dim] = None
# masked_output_shape[dim] = None
# From the shapes, compute the sizes of the input and output tensor
input_size = (None if any(x is None for x in masked_input_shape) else
np.prod(masked_input_shape))
output_size = (None if any(x is None for x in masked_output_shape) else
np.prod(masked_output_shape))
del masked_input_shape, masked_output_shape
# Finally, infer value for -1 if needed
if -1 in output_shape:
dim = output_shape.index(-1)
if (input_size is None) or (output_size is None):
output_shape[dim] = None
output_size = None
else:
output_size *= -1
output_shape[dim] = input_size // output_size
output_size *= output_shape[dim]
# Sanity check
if (input_size is not None) and (output_size is not None) \
and (input_size != output_size):
raise ValueError(
"%s cannot be reshaped to specification %s. "
"The total size mismatches." % (input_shape, self.shape))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
with tf.name_scope(self.name, values=[input]):
# Replace all `[i]` with the corresponding input dimension
output_shape = list(self.shape)
for dim, o in enumerate(output_shape):
if isinstance(o, list):
output_shape[dim] = tf.shape(input)[o[0]]
# Everything else is handled by TensorFlow
return tf.reshape(input, tf.stack(output_shape))
reshape = ReshapeLayer # shortcut
class SliceLayer(Layer):
def __init__(self, incoming, indices, axis=-1, **kwargs):
super(SliceLayer, self).__init__(incoming, **kwargs)
self.slice = indices
self.axis = axis
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if isinstance(self.slice, int):
del output_shape[self.axis]
elif input_shape[self.axis] is not None:
output_shape[self.axis] = len(
list(range(*self.slice.indices(input_shape[self.axis]))))
else:
output_shape[self.axis] = None
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
with tf.name_scope(self.name, values=[input]):
axis = self.axis
ndims = input.get_shape().ndims
if axis < 0:
axis += ndims
if isinstance(self.slice, int) and self.slice < 0:
return tf.reverse(
input, [self.axis + 1
])[(slice(None), ) * axis + (-1 - self.slice, ) +
(slice(None), ) * (ndims - axis - 1)]
# import ipdb; ipdb.set_trace()
return input[(slice(None), ) * axis + (self.slice, ) +
(slice(None), ) * (ndims - axis - 1)]
class DimshuffleLayer(Layer):
def __init__(self, incoming, pattern, **kwargs):
super(DimshuffleLayer, self).__init__(incoming, **kwargs)
# Sanity check the pattern
used_dims = set()
for p in pattern:
if isinstance(p, int):
# Dimension p
if p in used_dims:
raise ValueError("pattern contains dimension {0} more "
"than once".format(p))
used_dims.add(p)
elif p == 'x':
# Broadcast
pass
else:
raise ValueError("pattern should only contain dimension"
"indices or 'x', not {0}".format(p))
self.pattern = pattern
# try computing the output shape once as a sanity check
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape):
# Build output shape while keeping track of the dimensions that we are
# attempting to collapse, so we can ensure that they are broadcastable
output_shape = []
dims_used = [False] * len(input_shape)
for p in self.pattern:
if isinstance(p, int):
if p < 0 or p >= len(input_shape):
raise ValueError("pattern contains {0}, but input shape "
"has {1} dimensions "
"only".format(p, len(input_shape)))
# Dimension p
o = input_shape[p]
dims_used[p] = True
elif p == 'x':
# Broadcast; will be of size 1
o = 1
output_shape.append(o)
for i, (dim_size, used) in enumerate(zip(input_shape, dims_used)):
if not used and dim_size != 1 and dim_size is not None:
raise ValueError(
"pattern attempted to collapse dimension "
"{0} of size {1}; dimensions with size != 1/None are not"
"broadcastable and cannot be "
"collapsed".format(i, dim_size))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
with tf.name_scope(self.name, values=[input]):
return tf.transpose(input, self.pattern)
dimshuffle = DimshuffleLayer # shortcut
def apply_ln(layer):
with tf.name_scope("apply_ln"):
def _normalize(x, prefix):
eps = 1e-5
dim = x.get_shape()[-1].value
bias_name = prefix + "_ln/bias"
scale_name = prefix + "_ln/scale"
if bias_name not in layer.norm_params:
layer.norm_params[bias_name] = layer.add_param(
tf.zeros_initializer(), (dim, ),
name=bias_name,
regularizable=False)
if scale_name not in layer.norm_params:
layer.norm_params[scale_name] = layer.add_param(
tf.ones_initializer(), (dim, ), name=scale_name)
bias = layer.norm_params[bias_name]
scale = layer.norm_params[scale_name]
mean, var = tf.nn.moments(x, axes=[1], keep_dims=True)
x_normed = (x - mean) / tf.sqrt(var + eps)
return x_normed * scale + bias
return _normalize
class GRULayer(Layer):
"""
A gated recurrent unit implements the following update mechanism:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) * (h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = (1 - u(t)) * h(t-1) + u_t * c(t)
Note that the reset, update, and cell vectors must have the same dimension
as the hidden state
"""
def __init__(self,
incoming,
num_units,
hidden_nonlinearity,
gate_nonlinearity=tf.nn.sigmoid,
w_x_init=XavierUniformInitializer(),
w_h_init=OrthogonalInitializer(),
b_init=tf.zeros_initializer(),
hidden_init=tf.zeros_initializer(),
hidden_init_trainable=False,
layer_normalization=False,
**kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(GRULayer, self).__init__(incoming, **kwargs)
with tf.variable_scope("gru_layer_step"):
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
self.layer_normalization = layer_normalization
# Weights for the initial hidden state
self.h0 = self.add_param(
hidden_init, (num_units, ),
name="h0",
trainable=hidden_init_trainable,
regularizable=False)
# Weights for the reset gate
self.W_xr = self.add_param(
w_x_init, (input_dim, num_units), name="W_xr")
self.W_hr = self.add_param(
w_h_init, (num_units, num_units), name="W_hr")
self.b_r = self.add_param(
b_init, (num_units, ), name="b_r", regularizable=False)
# Weights for the update gate
self.W_xu = self.add_param(
w_x_init, (input_dim, num_units), name="W_xu")
self.W_hu = self.add_param(
w_h_init, (num_units, num_units), name="W_hu")
self.b_u = self.add_param(
b_init, (num_units, ), name="b_u", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(
w_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(
w_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(
b_init, (num_units, ), name="b_c", regularizable=False)
self.W_x_ruc = tf.concat(
axis=1,
values=[self.W_xr, self.W_xu, self.W_xc],
name="W_x_ruc")
self.W_h_ruc = tf.concat(
axis=1,
values=[self.W_hr, self.W_hu, self.W_hc],
name="W_h_ruc")
self.W_x_ru = tf.concat(
axis=1, values=[self.W_xr, self.W_xu], name="W_x_ru")
self.W_h_ru = tf.concat(
axis=1, values=[self.W_hr, | |
= pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.edges(es)
.plot()
**Example: Shorthand**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.plot(es)
"""
if graph is None:
if self._edges is None:
util.error('Graph/edges must be specified.')
g = self._edges
else:
g = graph
n = self._nodes if nodes is None else nodes
name = name or self._name or ("Untitled " + util.random_string(10))
description = description or self._description or ("")
self._check_mandatory_bindings(not isinstance(n, type(None)))
api_version = PyGraphistry.api_version()
if api_version == 1:
dataset = self._plot_dispatch(g, n, name, description, 'json')
if skip_upload:
return dataset
info = PyGraphistry._etl1(dataset)
elif api_version == 2:
dataset = self._plot_dispatch(g, n, name, description, 'vgraph')
if skip_upload:
return dataset
info = PyGraphistry._etl2(dataset)
elif api_version == 3:
PyGraphistry.refresh()
dataset = self._plot_dispatch(g, n, name, description, 'arrow')
if skip_upload:
return dataset
#fresh
dataset.token = PyGraphistry.api_token()
dataset.post()
info = {
'name': dataset.dataset_id,
'type': 'arrow',
'viztoken': str(uuid.uuid4())
}
viz_url = PyGraphistry._viz_url(info, self._url_params)
cfg_client_protocol_hostname = PyGraphistry._config['client_protocol_hostname']
full_url = ('%s:%s' % (PyGraphistry._config['protocol'], viz_url)) if cfg_client_protocol_hostname is None else viz_url
if render == False or (render == None and not self._render):
return full_url
elif util.in_ipython():
from IPython.core.display import HTML
return HTML(util.make_iframe(full_url, self._height))
else:
import webbrowser
webbrowser.open(full_url)
return full_url
def pandas2igraph(self, edges, directed=True):
"""Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig)
"""
import igraph
self._check_mandatory_bindings(False)
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
self._node = self._node or Plotter._defaultNodeId
eattribs = edges.columns.values.tolist()
eattribs.remove(self._source)
eattribs.remove(self._destination)
cols = [self._source, self._destination] + eattribs
etuples = [tuple(x) for x in edges[cols].values]
return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs,
vertex_name_attr=self._node)
def igraph2pandas(self, ig):
"""Under current bindings, transform an IGraph into a pandas edges dataframe and a nodes dataframe.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst').edges(es)
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
(es2, vs2) = g.igraph2pandas(ig)
g.nodes(vs2).bind(point_color='community').plot()
"""
def get_edgelist(ig):
idmap = dict(enumerate(ig.vs[self._node]))
for e in ig.es:
t = e.tuple
yield dict({self._source: idmap[t[0]], self._destination: idmap[t[1]]},
**e.attributes())
self._check_mandatory_bindings(False)
if self._node is None:
ig.vs[Plotter._defaultNodeId] = [v.index for v in ig.vs]
self._node = Plotter._defaultNodeId
elif self._node not in ig.vs.attributes():
util.error('Vertex attribute "%s" bound to "node" does not exist.' % self._node)
edata = get_edgelist(ig)
ndata = [v.attributes() for v in ig.vs]
nodes = pandas.DataFrame(ndata, columns=ig.vs.attributes())
cols = [self._source, self._destination] + ig.es.attributes()
edges = pandas.DataFrame(edata, columns=cols)
return (edges, nodes)
def networkx_checkoverlap(self, g):
import networkx as nx
[x, y] = [int(x) for x in nx.__version__.split('.')]
vattribs = None
if x == 1:
vattribs = g.nodes(data=True)[0][1] if g.number_of_nodes() > 0 else []
else:
vattribs = g.nodes(data=True) if g.number_of_nodes() > 0 else []
if not (self._node is None) and self._node in vattribs:
util.error('Vertex attribute "%s" already exists.' % self._node)
def networkx2pandas(self, g):
def get_nodelist(g):
for n in g.nodes(data=True):
yield dict({self._node: n[0]}, **n[1])
def get_edgelist(g):
for e in g.edges(data=True):
yield dict({self._source: e[0], self._destination: e[1]}, **e[2])
self._check_mandatory_bindings(False)
self.networkx_checkoverlap(g)
self._node = self._node or Plotter._defaultNodeId
nodes = pandas.DataFrame(get_nodelist(g))
edges = pandas.DataFrame(get_edgelist(g))
return (edges, nodes)
def _check_mandatory_bindings(self, node_required):
if self._source is None or self._destination is None:
util.error('Both "source" and "destination" must be bound before plotting.')
if node_required and self._node is None:
util.error('Node identifier must be bound when using node dataframe.')
def _check_bound_attribs(self, df, attribs, typ):
cols = df.columns.values.tolist()
for a in attribs:
b = getattr(self, '_' + a)
if b not in cols:
util.error('%s attribute "%s" bound to "%s" does not exist.' % (typ, a, b))
def _plot_dispatch(self, graph, nodes, name, description, mode='json'):
if isinstance(graph, pandas.core.frame.DataFrame) \
or isinstance(graph, pa.Table) \
or ( not (maybe_cudf is None) and isinstance(graph, maybe_cudf.DataFrame) ):
return self._make_dataset(graph, nodes, name, description, mode)
try:
import igraph
if isinstance(graph, igraph.Graph):
(e, n) = self.igraph2pandas(graph)
return self._make_dataset(e, n, name, description, mode)
except ImportError:
pass
try:
import networkx
if isinstance(graph, networkx.classes.graph.Graph) or \
isinstance(graph, networkx.classes.digraph.DiGraph) or \
isinstance(graph, networkx.classes.multigraph.MultiGraph) or \
isinstance(graph, networkx.classes.multidigraph.MultiDiGraph):
(e, n) = self.networkx2pandas(graph)
return self._make_dataset(e, n, name, description, mode)
except ImportError:
pass
util.error('Expected Pandas/Arrow/cuDF dataframe(s) or Igraph/NetworkX graph.')
# Sanitize node/edge dataframe by
# - dropping indices
# - dropping edges with NAs in source or destination
# - dropping nodes with NAs in nodeid
# - creating a default node table if none was provided.
# - inferring numeric types of all columns containing numpy objects
def _sanitize_dataset(self, edges, nodes, nodeid):
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
elist = edges.reset_index(drop=True) \
.dropna(subset=[self._source, self._destination])
obj_df = elist.select_dtypes(include=[numpy.object_])
elist[obj_df.columns] = obj_df.apply(pandas.to_numeric, errors='ignore')
if nodes is None:
nodes = pandas.DataFrame()
nodes[nodeid] = pandas.concat([edges[self._source], edges[self._destination]],
ignore_index=True).drop_duplicates()
else:
self._check_bound_attribs(nodes, ['node'], 'Vertex')
nlist = nodes.reset_index(drop=True) \
.dropna(subset=[nodeid]) \
.drop_duplicates(subset=[nodeid])
obj_df = nlist.select_dtypes(include=[numpy.object_])
nlist[obj_df.columns] = obj_df.apply(pandas.to_numeric, errors='ignore')
return (elist, nlist)
def _check_dataset_size(self, elist, nlist):
edge_count = len(elist.index)
node_count = len(nlist.index)
graph_size = edge_count + node_count
if edge_count > 8e6:
util.error('Maximum number of edges (8M) exceeded: %d.' % edge_count)
if node_count > 8e6:
util.error('Maximum number of nodes (8M) exceeded: %d.' % node_count)
if graph_size > 1e6:
util.warn('Large graph: |nodes| + |edges| = %d. Layout/rendering might be slow.' % graph_size)
# Bind attributes for ETL1 by creating a copy of the designated column renamed
# with magic names understood by ETL1 (eg. pointColor, etc)
def _bind_attributes_v1(self, edges, nodes):
def bind(df, pbname, attrib, default=None):
bound = getattr(self, attrib)
if bound:
if bound in df.columns.tolist():
df[pbname] = df[bound]
else:
util.warn('Attribute "%s" bound to %s does not exist.' % (bound, attrib))
elif default:
df[pbname] = df[default]
nodeid = self._node or Plotter._defaultNodeId
(elist, nlist) = self._sanitize_dataset(edges, nodes, nodeid)
self._check_dataset_size(elist, nlist)
bind(elist, 'edgeColor', '_edge_color')
bind(elist, 'edgeSourceColor', '_edge_source_color')
bind(elist, 'edgeDestinationColor', '_edge_destination_color')
bind(elist, 'edgeLabel', '_edge_label')
bind(elist, 'edgeTitle', '_edge_title')
bind(elist, 'edgeSize', '_edge_size')
bind(elist, 'edgeWeight', '_edge_weight')
bind(elist, 'edgeOpacity', '_edge_opacity')
bind(elist, 'edgeIcon', '_edge_icon')
bind(nlist, 'pointColor', '_point_color')
bind(nlist, 'pointLabel', '_point_label')
bind(nlist, 'pointTitle', '_point_title', nodeid)
bind(nlist, 'pointSize', '_point_size')
bind(nlist, 'pointWeight', '_point_weight')
bind(nlist, 'pointOpacity', '_point_opacity')
bind(nlist, 'pointIcon', '_point_icon')
bind(nlist, 'pointX', '_point_x')
bind(nlist, 'pointY', '_point_y')
return (elist, nlist)
# Bind attributes for ETL2 by an encodings map storing the visual semantic of
# each bound column.
def _bind_attributes_v2(self, edges, nodes):
def bind(enc, df, pbname, attrib, default=None):
bound = getattr(self, attrib)
if bound:
if bound in df.columns.tolist():
enc[pbname] = {'attributes' : [bound]}
else:
util.warn('Attribute "%s" bound to %s does not exist.' % (bound, attrib))
elif default:
enc[pbname] = {'attributes': [default]}
nodeid = self._node or Plotter._defaultNodeId
(elist, nlist) = self._sanitize_dataset(edges, nodes, nodeid)
self._check_dataset_size(elist, nlist)
edge_encodings = {
'source': {'attributes' : [self._source]},
'destination': {'attributes': [self._destination]},
}
node_encodings = {
'nodeId': {'attributes': [nodeid]}
}
bind(edge_encodings, elist, 'edgeColor', '_edge_color')
bind(edge_encodings, elist, 'edgeSourceColor', '_edge_source_color')
bind(edge_encodings, elist, 'edgeDestinationColor', '_edge_destination_color')
bind(edge_encodings, elist, 'edgeLabel', '_edge_label')
bind(edge_encodings, elist, 'edgeTitle', '_edge_title')
bind(edge_encodings, elist, 'edgeSize', '_edge_size')
bind(edge_encodings, elist, 'edgeWeight', '_edge_weight')
bind(edge_encodings, elist, 'edgeOpacity', '_edge_opacity')
bind(edge_encodings, elist, 'edgeIcon', '_edge_icon')
bind(node_encodings, nlist, 'pointColor', '_point_color')
bind(node_encodings, nlist, 'pointLabel', '_point_label')
bind(node_encodings, nlist, 'pointTitle', '_point_title', nodeid)
bind(node_encodings, nlist, 'pointSize', '_point_size')
bind(node_encodings, nlist, 'pointWeight', '_point_weight')
bind(node_encodings, nlist, 'pointOpacity', '_point_opacity')
bind(node_encodings, nlist, 'pointIcon', '_point_icon')
bind(node_encodings, nlist, 'pointX', '_point_x')
bind(node_encodings, nlist, 'pointY', '_point_y')
encodings = {
'nodes': node_encodings,
'edges': edge_encodings
}
return (elist, nlist, encodings)
def _table_to_pandas(self, table) -> pandas.DataFrame:
if table is None:
return table
if isinstance(table, pandas.DataFrame):
return table
if isinstance(table, pa.Table):
return table.to_pandas()
if not (maybe_cudf is None) and isinstance(table, maybe_cudf.DataFrame):
return table.to_pandas()
raise Exception('Unknown type %s: Could not convert data to Pandas dataframe' % str(type(table)))
def _table_to_arrow(self, table) -> pa.Table:
if table is None:
return table
if isinstance(table, pa.Table):
return table
if isinstance(table, pandas.DataFrame):
return pa.Table.from_pandas(table, preserve_index=False).replace_schema_metadata({})
if not (maybe_cudf is None) and isinstance(table, maybe_cudf.DataFrame):
return table.to_arrow()
raise Exception('Unknown type %s: Could not convert data to Arrow' % str(type(table)))
def _make_dataset(self, edges, nodes, name, description, mode):
try:
if len(edges) == 0:
util.warn('Graph has no edges, may have rendering issues')
except:
1
if mode == 'json':
edges_df = self._table_to_pandas(edges)
nodes_df = self._table_to_pandas(nodes)
return self._make_json_dataset(edges_df, nodes_df, name)
elif mode == 'vgraph':
edges_df = self._table_to_pandas(edges)
nodes_df = self._table_to_pandas(nodes)
return self._make_vgraph_dataset(edges_df, nodes_df, name)
elif mode == 'arrow':
edges_arr = self._table_to_arrow(edges)
nodes_arr = self._table_to_arrow(nodes)
return | |
INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed '
'BOOLEAN,visit_duration INTEGER DEFAULT 0 NOT NULL)')}
SCHEMAS = [_SCHEMA_8, _SCHEMA_16, _SCHEMA_19, _SCHEMA_20]
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.full_path = self._GetRowValue(query_hash, row, 'full_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'start_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleChrome27HistoryPlugin(BaseGoogleChromeHistoryPlugin):
"""SQLite parser plugin for Google Chrome 27+ history database files."""
NAME = 'chrome_27_history'
DATA_FORMAT = 'Google Chrome 27 and later history SQLite database file'
REQUIRED_STRUCTURE = {
'downloads': frozenset([
'id', 'target_path', 'received_bytes', 'total_bytes', 'start_time']),
'downloads_url_chains': frozenset([
'id', 'url']),
'urls': frozenset([
'id', 'url', 'title', 'visit_count', 'typed_count',
'last_visit_time', 'hidden']),
'visits': frozenset([
'visit_time', 'from_visit', 'transition', 'id'])}
QUERIES = [
(('SELECT urls.id, urls.url, urls.title, urls.visit_count, '
'urls.typed_count, urls.last_visit_time, urls.hidden, visits.'
'visit_time, visits.from_visit, visits.transition, visits.id '
'AS visit_id FROM urls, visits WHERE urls.id = visits.url ORDER '
'BY visits.visit_time'), 'ParseLastVisitedRow'),
(('SELECT downloads.id AS id, downloads.start_time,'
'downloads.target_path, downloads_url_chains.url, '
'downloads.received_bytes, downloads.total_bytes FROM downloads,'
' downloads_url_chains WHERE downloads.id = '
'downloads_url_chains.id'), 'ParseFileDownloadedRow')]
_SCHEMA_27 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed '
'BOOLEAN,visit_duration INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_31 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_37 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_51 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER | |
np.sum(normdata,axis=2)[:,:,np.newaxis]
# Divide by mean spectrum to remove wavelength dependence
normdata /= np.mean(normdata,axis=(0,1))[np.newaxis,np.newaxis,:]
# Average frames to get flat-field correction
flat_norm = np.mean(normdata,axis=0)
flat_norm[np.where(np.mean(normmask,axis=0)<1)] = 1
'''
normdata /= np.mean(normdata,axis=(1,2))[:,np.newaxis,np.newaxis]
flat_window = np.median(normdata,axis=0)
medflat = np.median(flat_window, axis=0)
flat_window /= medflat
flat_window /= np.median(flat_window,axis=1)[:,np.newaxis]
flat_norm = flat_window/np.mean(flat_window)
'''
plt.figure(3)
plt.clf()
plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
ff = np.load('ff.npy')
subff = ff[ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
#subdata[:,:,69:91,15:147] /= flat_norm
subdata /= subff
plt.figure(4)
plt.clf()
plt.imshow(subdata[10,-1],origin='lower',aspect='auto',vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
plt.figure(1)
plt.clf()
plt.imshow(flat_norm,origin='lower',aspect='auto')
plt.colorbar()
plt.tight_layout()
plt.pause(0.1)
ev.flat_norm = flat_norm
return ev
"""
"""
if isplots:
# Plot normalized flat fields
plt.figure(1000, figsize=(12,8))
plt.clf()
plt.suptitle('Master Flat Frames')
for i in range(ev.n_spec):
for j in range(ev.n_img):
#plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
plt.title(str(j) +','+ str(i))
plt.imshow(subflat[j][i], origin='lower')
plt.tight_layout()
plt.savefig(ev.eventdir + '/figs/fig1000-Flats.png')
# Plot masks
plt.figure(1001, figsize=(12,8))
plt.clf()
plt.suptitle('Mask Frames')
for i in range(ev.n_spec):
for j in range(ev.n_img):
#plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
plt.title(str(j) +','+ str(i))
plt.imshow(flatmask[j][i], origin='lower')
plt.tight_layout()
plt.savefig(ev.eventdir + '/figs/fig1001-Masks.png')
if ev.detector == 'IR':
# Plot Slit shift
plt.figure(1004, figsize=(12,8))
plt.clf()
plt.suptitle('Model Slit Tilts/Shifts')
plt.plot(ev.shift_values, ev.yfit, '.')
plt.plot(ev.slitshift, range(ev.ywindow[0][0],ev.ywindow[0][1]), 'r-', lw=2)
plt.xlim(-1,1)
plt.savefig(ev.eventdir + '/figs/fig1004-SlitTilt.png')
plt.pause(0.1)
"""
ev.ra = data_mhdr[0]['RA_TARG']*np.pi/180
ev.dec = data_mhdr[0]['DEC_TARG']*np.pi/180
if ev.horizonsfile != None:
# Apply light-time correction, convert to BJD_TDB
# Horizons file created for HST around time of observations
print("Converting times to BJD_TDB...")
ev.bjd_corr = suntimecorr.suntimecorr(ev.ra, ev.dec, ev.jd, ev.horizonsfile)
bjdutc = ev.jd + ev.bjd_corr/86400.
ev.bjdtdb = utc_tt.utc_tt(bjdutc,ev.leapdir)
print('BJD_corr range: ' + str(ev.bjd_corr[0]) + ', ' + str(ev.bjd_corr[-1]))
else:
print("No Horizons file found.")
ev.bjdtdb = ev.jd
if n_reads > 1:
ev.n_reads = n_reads
# Subtract pairs of subframes
diffdata = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
differr = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
for m in range(ev.n_files):
for n in range(n_reads-1):
#diffmask[m,n] = np.copy(flatmask[j][0])
#diffmask[m,n][np.where(suberr[m,n ] > diffthresh*np.std(suberr[m,n ]))] = 0
#diffmask[m,n][np.where(suberr[m,n+1] > diffthresh*np.std(suberr[m,n+1]))] = 0
diffdata[m,n] = subdata[m,n+1]-subdata[m,n]
differr [m,n] = np.sqrt(suberr[m,n+1]**2+suberr[m,n]**2)
else:
# FLT data has already been differenced
# FLT files subtract first from last, 2 reads
ev.n_reads = 2
diffdata = subdata
differr = suberr
diffmask = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
guess = np.zeros((ev.n_files,ev.n_reads-1),dtype=int)
for m in range(ev.n_files):
#Select appropriate mask
#if ev.n_img == (np.max(ev.orbitnum)+1):
# j = int(ev.orbitnum[m])
#else:
# j = 0
for n in range(n_reads-1):
diffmask[m,n] = np.copy(flatmask[m][0])
try:
diffmask[m,n][ np.where(differr[m,n] > ev.diffthresh*
np.median(differr[m,n],axis=1)[:,np.newaxis])] = 0
#diffdata[m,n] *= diffmask[m,n]
except:
# May fail for FLT files
print("Diffthresh failed.")
foo = diffdata[m,n]*diffmask[m,n]
guess[m,n] = np.median(np.where(foo > np.mean(foo))[0]).astype(int)
# Guess may be skewed if first file is zeros
if guess[m,0] < 0 or guess[m,0] > subny:
guess[m,0] = guess[m,1]
# Compute full scan length
ev.scanHeight = np.zeros(ev.n_files)
for m in range(ev.n_files):
scannedData = np.sum(subdata[m,-1], axis=1)
xmin = np.min(guess[m])
xmax = np.max(guess[m])
scannedData/= np.median(scannedData[xmin:xmax+1])
scannedData-= 0.5
#leftEdge = np.where(scannedData > 0)/2)[0][0]
#rightEdge = np.where(scannedData > 0)/2)[0][-1]
#yrng = range(leftEdge-5, leftEdge+5, 1)
yrng = range(subny)
spline = spi.UnivariateSpline(yrng, scannedData[yrng], k=3, s=0)
roots = spline.roots()
try:
ev.scanHeight[m] = roots[1]-roots[0]
except:
pass
#Outlier rejection of sky background along time axis
print("Performing background outlier rejection...")
import sigrej, optspex
for p in range(2):
iscan = np.where(ev.scandir == p)[0]
if len(iscan) > 0:
for n in range(ev.n_reads-1):
# Set limits on the sky background
x1 = (guess[iscan,n].min()-ev.fitbghw).astype(int)
x2 = (guess[iscan,n].max()+ev.fitbghw).astype(int)
bgdata1 = diffdata[iscan,n,:x1 ]
bgmask1 = diffmask[iscan,n,:x1 ]
bgdata2 = diffdata[iscan,n, x2:]
bgmask2 = diffmask[iscan,n, x2:]
bgerr1 = np.median(suberr[iscan,n,:x1 ])
bgerr2 = np.median(suberr[iscan,n, x2:])
estsig1 = [bgerr1 for j in range(len(ev.sigthresh))]
estsig2 = [bgerr2 for j in range(len(ev.sigthresh))]
diffmask[iscan,n,:x1 ] = sigrej.sigrej(bgdata1, ev.sigthresh, bgmask1, estsig1)
diffmask[iscan,n, x2:] = sigrej.sigrej(bgdata2, ev.sigthresh, bgmask2, estsig2)
# Write background
#global bg, diffmask
def writeBG(arg):
background, mask, m, n = arg
bg[m,n] = background
diffmask[m,n] = mask
return
# STEP 3: Fit sky background with out-of-spectra data
# FINDME: parallelrize bg subtraction
print("Performing background subtraction...")
x1 = np.zeros((ev.n_files,ev.n_reads-1), dtype=int)
x2 = np.zeros((ev.n_files,ev.n_reads-1), dtype=int)
bg = np.zeros((diffdata.shape))
if ev.ncpu == 1:
# Only 1 CPU
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
x1[m,n] = (guess[m,n]-ev.fitbghw).astype(int)
x2[m,n] = (guess[m,n]+ev.fitbghw).astype(int)
writeBG(hst.fitbg(diffdata[m,n], diffmask[m,n], x1[m,n], x2[m,n],
ev.bgdeg, ev.p3thresh, isplots, m, n, ev.n_files))
else:
# Multiple CPUs
pool = mp.Pool(ev.ncpu)
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
x1[m,n] = (guess[m,n]-ev.fitbghw).astype(int)
x2[m,n] = (guess[m,n]+ev.fitbghw).astype(int)
res = pool.apply_async(hst.fitbg, args=(diffdata[m,n], diffmask[m,n], x1[m,n], x2[m,n],
ev.bgdeg, ev.p3thresh, isplots, m, n, ev.n_files), callback=writeBG)
pool.close()
pool.join()
res.wait()
print(" Done.")
# STEP 2: Calculate variance
bgerr = np.std(bg, axis=2)/np.sqrt(np.sum(diffmask, axis=2))
bgerr[np.where(np.isnan(bgerr))] = 0.
ev.v0 += np.mean(bgerr**2)
variance = abs(diffdata) / ev.gain + ev.v0
#variance = abs(subdata*submask) / gain + v0
# Perform background subtraction
diffdata -= bg
#
'''
foo = np.sum(diffdata*diffmask, axis=2)
guess = []
for i in range(nreads-1):
guess.append(np.median(np.where(foo[i] > np.mean(foo[i]))[0]).astype(int))
guess = np.array(guess)
# Guess may be skewed if first file is zeros
if guess[0] < 0 or guess[0] > subnx:
guess[0] = guess[1]
'''
# Write drift2D
def writeDrift2D(arg):
drift2D, m, n = arg
# Assign to array of spectra and uncertainties
ev.drift2D[m,n] = drift2D
return
'''
# Calulate drift2D
def calcDrift2D():#im1, im2, m, n):
print("test")
drift2D = imr.chi2_shift(im1, im2, boundary='constant', nthreads=4,
zeromean=False, return_error=False)
return (drift2D, m, n)
'''
print("Calculating 2D drift...")
#FINDME: instead of calculating scanHeight, consider fitting stretch factor
ev.drift2D = np.zeros((ev.n_files, ev.n_reads-1, 2))
if ev.ncpu == 1:
# Only 1 CPU
for m in range(ev.n_files):
p = int(ev.scandir[m])
for n in range(ev.n_reads-1):
writeDrift2D(hst.calcDrift2D(diffdata[ev.iref[p],n]*diffmask[ev.iref[p],n],
diffdata[m,n]*diffmask[m,n], m, n, ev.n_files))
else:
# Multiple CPUs
pool = mp.Pool(ev.ncpu)
for m in range(ev.n_files):
p = int(ev.scandir[m])
for n in range(ev.n_reads-1):
#res = pool.apply_async(hst.calcDrift2D)
res = pool.apply_async(hst.calcDrift2D, args=(diffdata[ev.iref[p],n]*diffmask[ev.iref[p],n],
diffdata[m,n]*diffmask[m,n], m, n, ev.n_files), callback=writeDrift2D)
pool.close()
pool.join()
res.wait()
print(" Done.")
#np.save("drift2D.npy",ev.drift2D)
#global shiftdata, shiftmask
print("Performing rough, pixel-scale drift correction...")
import scipy.ndimage.interpolation as spni
ev.drift2D_int = np.round(ev.drift2D,0)
shiftdata = np.zeros(diffdata.shape)
shiftmask = np.zeros(diffmask.shape)
shiftvar = np.zeros(diffdata.shape)
shiftbg = np.zeros(diffdata.shape)
# Correct for drift by integer pixel numbers, no interpolation
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
shiftdata[m,n] = spni.shift(diffdata[m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
shiftmask[m,n] = spni.shift(diffmask[m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
shiftvar [m,n] = spni.shift(variance[m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
shiftbg [m,n] = spni.shift(bg [m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
"""
# spni.shift does not handle constant boundaries correctly
if ev.drift2D_int[m,n,0] > 0:
shiftdata[m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
shiftmask[m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
shiftvar [m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
shiftbg [m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
elif ev.drift2D_int[m,n,0] < 0:
#print(m,n,-1*ev.drift2D_int[m,n,0])
shiftdata[m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
shiftmask[m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
shiftvar [m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
shiftbg [m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
"""
# Outlier rejection of full frame along time axis
print("Performing full-frame outlier rejection...")
for p in range(2):
iscan = np.where(ev.scandir == p)[0]
if len(iscan) > 0:
for n in range(ev.n_reads-1):
#y1 = guess[ev.iref,n] - ev.spec_width
#y2 = guess[ev.iref,n] + ev.spec_width
#estsig = [differr[ev.iref,n,y1:y2] for j in range(len(ev.sigthresh))]
shiftmask[iscan,n] = sigrej.sigrej(shiftdata[iscan,n], ev.sigthresh, shiftmask[iscan,n])#, estsig)
"""
# Replace bad pixels using 2D Gaussian kernal along x and time axes
def writeReplacePixels(arg):
shift, m, n, i, j = arg
shiftdata[m,n,i,j] = shift
return
#import smoothing
#reload(smoothing)
ny, nx, sy, sx = (2,2,1,1)
wherebad = np.array(np.where(shiftmask==0)).T
#smdata = np.copy(shiftdata)
print("Replacing " + str(len(wherebad)) + " bad pixels...")
k = 0
ktot = len(wherebad)
#FINDME: multiple CPUs is inefficient
if ev.ncpu >= 1:
# Only 1 CPU
for m,n,i,j in wherebad:
#sys.stdout.write('\r'+str(k+1)+'/'+str(len(wherebad)))
#sys.stdout.flush()
writeReplacePixels(hst.replacePixels(shiftdata[:,n,:,j], shiftmask[:,n,:,j], m, n, i, j, k, ktot, ny, nx, sy, sx))
#Pad image initially with zeros
#newim = np.zeros(np.array(shiftdata[:,n,:,j].shape) + 2*np.array((ny, nx)))
#newim[ny:-ny, nx:-nx] = shiftdata[:,n,:,j]
#Calculate kernel
#gk = smoothing.gauss_kernel_mask2((ny,nx), (sy,sx), (m,i), shiftmask[:,n,:,j])
#shiftdata[m,n,i,j] = np.sum(gk * newim[m:m+2*ny+1, i:i+2*nx+1])
k += 1
else:
# Multiple CPUs
pool = mp.Pool(ev.ncpu)
for m,n,i,j in wherebad:
res = pool.apply_async(hst.replacePixels, args=(shiftdata[:,n,:,j], shiftmask[:,n,:,j], m, n, i, j, k, ktot, ny, nx, sy, sx), callback=writeReplacePixels)
k += 1
pool.close()
pool.join()
res.wait()
print(" Done.")
"""
if isplots >= 3:
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
plt.figure(1010)
plt.clf()
plt.suptitle(str(m) + "," + str(n))
plt.subplot(211)
plt.imshow(shiftdata[m,n]*shiftmask[m,n], origin='lower', aspect='auto', vmin=0, vmax=500)
plt.subplot(212)
#plt.imshow(submask[i], origin='lower', aspect='auto', vmax=1)
mean = np.median(shiftbg[m,n])
std = np.std(shiftbg[m,n])
plt.imshow(shiftbg[m,n], origin='lower', aspect='auto',vmin=mean-3*std,vmax=mean+3*std)
plt.savefig(ev.eventdir+'/figs/fig1010-'+str(m)+'-'+str(n)+'-Image+Background.png')
#plt.pause(0.1)
"""
apdata = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
apmask = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
apvar = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
apbg = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
for n in range(ev.n_reads-1):
y1 = guess[ev.iref,n] - ev.spec_width
y2 = guess[ev.iref,n] + ev.spec_width
apdata[:,n] = shiftdata[:,n,y1:y2]
apmask[:,n] = shiftmask[:,n,y1:y2]
apvar [:,n] = shiftvar [:,n,y1:y2]
apbg [:,n] = shiftbg [:,n,y1:y2]
"""
print("Performing | |
"""Probability distributions."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Union
import gym
import torch as th
from gym import spaces
from torch import nn
from torch.distributions import Bernoulli, Categorical, Normal
from stable_baselines3.common.preprocessing import get_action_dim
class Distribution(ABC):
"""Abstract base class for distributions."""
def __init__(self):
super(Distribution, self).__init__()
self.distribution = None
@abstractmethod
def proba_distribution_net(
self, *args, **kwargs
) -> Union[nn.Module, Tuple[nn.Module, nn.Parameter]]:
"""Create the layers and parameters that represent the distribution.
Subclasses must define this, but the arguments and return type vary between
concrete classes."""
@abstractmethod
def proba_distribution(self, *args, **kwargs) -> "Distribution":
"""Set parameters of the distribution.
:return: self
"""
@abstractmethod
def log_prob(self, x: th.Tensor) -> th.Tensor:
"""
Returns the log likelihood
:param x: the taken action
:return: The log likelihood of the distribution
"""
@abstractmethod
def entropy(self) -> Optional[th.Tensor]:
"""
Returns Shannon's entropy of the probability
:return: the entropy, or None if no analytical form is known
"""
@abstractmethod
def sample(self) -> th.Tensor:
"""
Returns a sample from the probability distribution
:return: the stochastic action
"""
@abstractmethod
def mode(self) -> th.Tensor:
"""
Returns the most likely action (deterministic output)
from the probability distribution
:return: the stochastic action
"""
def get_actions(self, deterministic: bool = False) -> th.Tensor:
"""
Return actions according to the probability distribution.
:param deterministic:
:return:
"""
if deterministic:
return self.mode()
return self.sample()
@abstractmethod
def actions_from_params(self, *args, **kwargs) -> th.Tensor:
"""
Returns samples from the probability distribution
given its parameters.
:return: actions
"""
@abstractmethod
def log_prob_from_params(self, *args, **kwargs) -> Tuple[th.Tensor, th.Tensor]:
"""
Returns samples and the associated log probabilities
from the probability distribution given its parameters.
:return: actions and log prob
"""
def sum_independent_dims(tensor: th.Tensor) -> th.Tensor:
"""
Continuous actions are usually considered to be independent,
so we can sum components of the ``log_prob`` or the entropy.
:param tensor: shape: (n_batch, n_actions) or (n_batch,)
:return: shape: (n_batch,)
"""
if len(tensor.shape) > 1:
tensor = tensor.sum(dim=1)
else:
tensor = tensor.sum()
return tensor
class DiagGaussianDistribution(Distribution):
"""
Gaussian distribution with diagonal covariance matrix, for continuous actions.
:param action_dim: Dimension of the action space.
"""
def __init__(self, action_dim: int):
super(DiagGaussianDistribution, self).__init__()
self.action_dim = action_dim
self.mean_actions = None
self.log_std = None
def proba_distribution_net(
self, latent_dim: int, log_std_init: float = 0.0
) -> Tuple[nn.Module, nn.Parameter]:
"""
Create the layers and parameter that represent the distribution:
one output will be the mean of the Gaussian, the other parameter will be the
standard deviation (log std in fact to allow negative values)
:param latent_dim: Dimension of the last layer of the policy (before the action layer)
:param log_std_init: Initial value for the log standard deviation
:return:
"""
mean_actions = nn.Linear(latent_dim, self.action_dim)
# TODO: allow action dependent std
log_std = nn.Parameter(
th.ones(self.action_dim) * log_std_init, requires_grad=True
)
return mean_actions, log_std
def proba_distribution(
self, mean_actions: th.Tensor, log_std: th.Tensor
) -> "DiagGaussianDistribution":
"""
Create the distribution given its parameters (mean, std)
:param mean_actions:
:param log_std:
:return:
"""
action_std = th.ones_like(mean_actions) * log_std.exp()
self.distribution = Normal(mean_actions, action_std)
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
"""
Get the log probabilities of actions according to the distribution.
Note that you must first call the ``proba_distribution()`` method.
:param actions:
:return:
"""
log_prob = self.distribution.log_prob(actions)
return sum_independent_dims(log_prob)
def entropy(self) -> th.Tensor:
return sum_independent_dims(self.distribution.entropy())
def sample(self) -> th.Tensor:
# Reparametrization trick to pass gradients
return self.distribution.rsample()
def mode(self) -> th.Tensor:
return self.distribution.mean
def actions_from_params(
self, mean_actions: th.Tensor, log_std: th.Tensor, deterministic: bool = False
) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(mean_actions, log_std)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(
self, mean_actions: th.Tensor, log_std: th.Tensor
) -> Tuple[th.Tensor, th.Tensor]:
"""
Compute the log probability of taking an action
given the distribution parameters.
:param mean_actions:
:param log_std:
:return:
"""
actions = self.actions_from_params(mean_actions, log_std)
log_prob = self.log_prob(actions)
return actions, log_prob
class SquashedDiagGaussianDistribution(DiagGaussianDistribution):
"""
Gaussian distribution with diagonal covariance matrix, followed by a squashing function (tanh) to ensure bounds.
:param action_dim: Dimension of the action space.
:param epsilon: small value to avoid NaN due to numerical imprecision.
"""
def __init__(self, action_dim: int, epsilon: float = 1e-6):
super(SquashedDiagGaussianDistribution, self).__init__(action_dim)
# Avoid NaN (prevents division by zero or log of zero)
self.epsilon = epsilon
self.gaussian_actions = None
def proba_distribution(
self, mean_actions: th.Tensor, log_std: th.Tensor
) -> "SquashedDiagGaussianDistribution":
super(SquashedDiagGaussianDistribution, self).proba_distribution(
mean_actions, log_std
)
return self
def log_prob(
self, actions: th.Tensor, gaussian_actions: Optional[th.Tensor] = None
) -> th.Tensor:
# Inverse tanh
# Naive implementation (not stable): 0.5 * torch.log((1 + x) / (1 - x))
# We use numpy to avoid numerical instability
if gaussian_actions is None:
# It will be clipped to avoid NaN when inversing tanh
gaussian_actions = TanhBijector.inverse(actions)
# Log likelihood for a Gaussian distribution
log_prob = super(SquashedDiagGaussianDistribution, self).log_prob(
gaussian_actions
)
# Squash correction (from original SAC implementation)
# this comes from the fact that tanh is bijective and differentiable
log_prob -= th.sum(th.log(1 - actions ** 2 + self.epsilon), dim=1)
return log_prob
def entropy(self) -> Optional[th.Tensor]:
# No analytical form,
# entropy needs to be estimated using -log_prob.mean()
return None
def sample(self) -> th.Tensor:
# Reparametrization trick to pass gradients
self.gaussian_actions = super().sample()
return th.tanh(self.gaussian_actions)
def mode(self) -> th.Tensor:
self.gaussian_actions = super().mode()
# Squash the output
return th.tanh(self.gaussian_actions)
def log_prob_from_params(
self, mean_actions: th.Tensor, log_std: th.Tensor
) -> Tuple[th.Tensor, th.Tensor]:
action = self.actions_from_params(mean_actions, log_std)
log_prob = self.log_prob(action, self.gaussian_actions)
return action, log_prob
class CategoricalDistribution(Distribution):
"""
Categorical distribution for discrete actions.
:param action_dim: Number of discrete actions
"""
def __init__(self, action_dim: int):
super(CategoricalDistribution, self).__init__()
self.action_dim = action_dim
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
"""
Create the layer that represents the distribution:
it will be the logits of the Categorical distribution.
You can then get probabilities using a softmax.
:param latent_dim: Dimension of the last layer
of the policy network (before the action layer)
:return:
"""
action_logits = nn.Linear(latent_dim, self.action_dim)
return action_logits
def proba_distribution(self, action_logits: th.Tensor) -> "CategoricalDistribution":
self.distribution = Categorical(logits=action_logits)
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
return self.distribution.log_prob(actions)
def entropy(self) -> th.Tensor:
return self.distribution.entropy()
def sample(self) -> th.Tensor:
return self.distribution.sample()
def mode(self) -> th.Tensor:
return th.argmax(self.distribution.probs, dim=1)
def actions_from_params(
self, action_logits: th.Tensor, deterministic: bool = False
) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(action_logits)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(
self, action_logits: th.Tensor
) -> Tuple[th.Tensor, th.Tensor]:
actions = self.actions_from_params(action_logits)
log_prob = self.log_prob(actions)
return actions, log_prob
class MultiCategoricalDistribution(Distribution):
"""
MultiCategorical distribution for multi discrete actions.
:param action_dims: List of sizes of discrete action spaces
"""
def __init__(self, action_dims: List[int]):
super(MultiCategoricalDistribution, self).__init__()
self.action_dims = action_dims
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
"""
Create the layer that represents the distribution:
it will be the logits (flattened) of the MultiCategorical distribution.
You can then get probabilities using a softmax on each sub-space.
:param latent_dim: Dimension of the last layer
of the policy network (before the action layer)
:return:
"""
action_logits = nn.Linear(latent_dim, sum(self.action_dims))
return action_logits
def proba_distribution(
self, action_logits: th.Tensor
) -> "MultiCategoricalDistribution":
self.distribution = [
Categorical(logits=split)
for split in th.split(action_logits, tuple(self.action_dims), dim=1)
]
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
# Extract each discrete action and compute log prob for their respective distributions
return th.stack(
[
dist.log_prob(action)
for dist, action in zip(self.distribution, th.unbind(actions, dim=1))
],
dim=1,
).sum(dim=1)
def entropy(self) -> th.Tensor:
return th.stack([dist.entropy() for dist in self.distribution], dim=1).sum(
dim=1
)
def sample(self) -> th.Tensor:
return th.stack([dist.sample() for dist in self.distribution], dim=1)
def mode(self) -> th.Tensor:
return th.stack(
[th.argmax(dist.probs, dim=1) for dist in self.distribution], dim=1
)
def actions_from_params(
self, action_logits: th.Tensor, deterministic: bool = False
) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(action_logits)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(
self, action_logits: th.Tensor
) -> Tuple[th.Tensor, th.Tensor]:
actions = self.actions_from_params(action_logits)
log_prob = self.log_prob(actions)
return actions, log_prob
class BernoulliDistribution(Distribution):
"""
Bernoulli distribution for MultiBinary action spaces.
:param action_dim: Number of binary actions
"""
def __init__(self, action_dims: int):
super(BernoulliDistribution, self).__init__()
self.action_dims = action_dims
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
"""
Create the layer that represents the distribution:
it will be the logits of the Bernoulli distribution.
:param latent_dim: Dimension of the last layer
of the policy network (before the action layer)
:return:
"""
action_logits = nn.Linear(latent_dim, self.action_dims)
return action_logits
def proba_distribution(self, action_logits: | |
import typing
import warnings
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor
from tsts.collators import Collator
from tsts.dataloaders.dataloader import DataLoader
from tsts.datasets import Dataset
from tsts.loggers import Logger
from tsts.losses.loss import Loss
from tsts.metrics import Metric
from tsts.models.module import Module
from tsts.optimizers import Optimizer
from tsts.scalers import Scaler
from tsts.scalers.builder import build_X_scaler, build_y_scaler
from tsts.schedulers import Scheduler
from tsts.trainers import Trainer
from tsts.types import MaybeRawDataset, RawDataset
from .solver import Solver
__all__ = ["TimeSeriesForecaster"]
_TestData = Tuple[Tensor, Tensor, Tensor, Optional[Tensor]]
class TimeSeriesForecaster(Solver):
"""Tool to solve time series forecasting."""
@property
def get_valid_data(self) -> bool:
if self._X_valid is None:
return False
return True
@property
def num_in_feats(self) -> int:
"""Get the number of input features.
This value is inferred by a given dataset.
Returns
-------
int
Number of input features
"""
if "num_in_feats" not in self.context_manager:
num_in_feats = self.infer_num_in_feats(self.X)
self.context_manager["num_in_feats"] = num_in_feats
num_in_feats = self.context_manager["num_in_feats"]
return num_in_feats
@property
def num_out_feats(self) -> int:
"""Get the number of output features.
This value is inferred by a given dataset.
Returns
-------
int
Number of output features
"""
if "num_out_feats" not in self.context_manager:
if self.y is not None:
num_out_feats = self.infer_num_out_feats(self.y)
else:
num_out_feats = self.infer_num_out_feats(self.X)
self.context_manager["num_out_feats"] = num_out_feats
num_out_feats = self.context_manager["num_out_feats"]
return num_out_feats
@property
def model(self) -> Module:
"""Get a target model.
Returns
-------
Module
Target model
"""
if "model" not in self.context_manager:
model = self.build_model(self.num_in_feats, self.num_out_feats)
self.context_manager["model"] = model
model = self.context_manager["model"]
return model
@property
def local_scaler(self) -> Module:
"""Get a target local scaler.
Returns
-------
Module
Target local scaler
"""
if "local_scaler" not in self.context_manager:
local_scaler = self.build_local_scaler(
self.num_in_feats,
self.num_out_feats,
)
self.context_manager["local_scaler"] = local_scaler
local_scaler = self.context_manager["local_scaler"]
return local_scaler
@property
def losses(self) -> List[Loss]:
"""Get a list of loss functions.
Returns
-------
Loss
List of loss functions
"""
if "losses" not in self.context_manager:
losses = self.build_losses()
self.context_manager["losses"] = losses
losses = self.context_manager["losses"]
return losses
@property
def metrics(self) -> List[Metric]:
"""Get a list of metrics.
Returns
-------
List[Metric]
List of metrics
"""
if "metrics" not in self.context_manager:
metrics = self.build_metrics()
self.context_manager["metrics"] = metrics
metrics = self.context_manager["metrics"]
return metrics
@property
def optimizer(self) -> Optimizer:
"""Get an optimizer.
Returns
-------
Optimizer
Optimizer
"""
if "optimizer" not in self.context_manager:
optimizer = self.build_optimizer(self.model, self.local_scaler)
self.context_manager["optimizer"] = optimizer
optimizer = self.context_manager["optimizer"]
return optimizer # type: ignore
@property
def scheduler(self) -> Scheduler:
"""Get an scheduler.
Returns
-------
Scheduler
Learning rate scheduler
"""
if "scheduler" not in self.context_manager:
scheduler = self.build_scheduler(
self.optimizer,
len(self.train_dataloader),
)
self.context_manager["scheduler"] = scheduler
scheduler = self.context_manager["scheduler"]
return scheduler
@property
def num_train_samples(self) -> List[int]:
"""Get a list of training samples per dataset.
There are 2 types of datasets. If TRAIN_DATA_SPLIT = "col", it returns a list of training
samples per dataset. If TRAIN_DATA_SPLIT = "row", it returns a list to contains a
single value. In this case, first num_train_samples[0] rows are used as training samples
and the last n - num_train_samples[0] rows are used as validation samples where n is the
total number of samples.
Returns
-------
List[int]
List of training samples per dataset
"""
if "num_train_samples" not in self.context_manager:
train_data_ratio = self.cfg.TRAINING.TRAIN_DATA_RATIO
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
num_datasets = len(self.X)
num_train_samples = []
# Split like [[train, valid], [train, valid], ...]
if train_data_split == "col":
for i in range(num_datasets):
num_samples = len(self.X[i])
num_train_samples.append(int(train_data_ratio * num_samples))
# Split like [train, train, valid, valid, ...]
else:
random_split = self.cfg.TRAINING.RANDOM_SPLIT
if random_split is True:
mask = np.random.uniform(0.0, 1.0, (num_datasets,))
mask = mask > (1.0 - train_data_ratio)
else:
mask = np.full((num_datasets,), True)
mask[int(train_data_ratio * num_datasets) :] = False
num_train_samples.append(mask.tolist())
self.context_manager["num_train_samples"] = num_train_samples
num_train_samples = self.context_manager["num_train_samples"]
return num_train_samples
@property
def X_train(self) -> RawDataset:
"""Get a training raw input dataset.
Returns
-------
RawDataset
Training raw input dataset
"""
if "X_train" not in self.context_manager:
if self.get_valid_data is False:
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
if train_data_split == "col":
X_train = []
num_datasets = len(self.X)
for i in range(num_datasets):
X_train.append(self.X[i][: self.num_train_samples[i]])
else:
num_train_samples = self.num_train_samples[0]
X_train = []
for (i, flag) in enumerate(num_train_samples): # type: ignore
if flag is True:
X_train.append(self.X[i])
self.context_manager["X_train"] = X_train
else:
self.context_manager["X_train"] = self.X
X_train = self.context_manager["X_train"]
return X_train
@property
def X_valid(self) -> RawDataset:
"""Get a validation raw input dataset.
Returns
-------
RawDataset
Validation raw input dataset
"""
if "X_valid" not in self.context_manager:
if self.get_valid_data is False:
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
lookback = self.cfg.IO.LOOKBACK
if train_data_split == "col":
X_valid = []
num_datasets = len(self.X)
for i in range(num_datasets):
X_valid.append(
self.X[i][self.num_train_samples[i] + lookback :]
)
else:
num_train_samples = self.num_train_samples[0]
X_valid = []
for (i, flag) in enumerate(num_train_samples): # type: ignore
if flag is False:
X_valid.append(self.X[i])
self.context_manager["X_valid"] = X_valid
else:
self.context_manager["X_valid"] = self._X_valid
X_valid = self.context_manager["X_valid"]
return X_valid
@property
def y_train(self) -> RawDataset:
"""Get a training raw target dataset.
Returns
-------
RawDataset
Training raw target dataset
"""
if "y_train" not in self.context_manager:
if self.get_valid_data is False:
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
if train_data_split == "col":
y_train = []
num_datasets = len(self.y)
for i in range(num_datasets):
y_train.append(self.y[i][: self.num_train_samples[i]])
else:
num_train_samples = self.num_train_samples[0]
y_train = []
for (i, flag) in enumerate(num_train_samples): # type: ignore
if flag is True:
y_train.append(self.y[i])
self.context_manager["y_train"] = y_train
else:
self.context_manager["y_train"] = self.y
y_train = self.context_manager["y_train"]
return y_train
@property
def y_valid(self) -> RawDataset:
"""Get a validation raw target dataset.
Returns
-------
RawDataset
Validation raw target dataset
"""
if "y_valid" not in self.context_manager:
if self.get_valid_data is False:
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
lookback = self.cfg.IO.LOOKBACK
if train_data_split == "col":
y_valid = []
num_datasets = len(self.y)
for i in range(num_datasets):
y_valid.append(
self.y[i][self.num_train_samples[i] + lookback :]
)
else:
num_train_samples = self.num_train_samples[0]
y_valid = []
for (i, flag) in enumerate(num_train_samples): # type: ignore
if flag is False:
y_valid.append(self.y[i])
self.context_manager["y_valid"] = y_valid
else:
self.context_manager["y_valid"] = self._y_valid
y_valid = self.context_manager["y_valid"]
return y_valid
@property
def time_stamps_train(self) -> MaybeRawDataset:
"""Get time stamps for training samples
Returns
-------
MaybeRawDataset
time stamps for training samples
"""
if "time_stamps_train" not in self.context_manager:
if self.get_valid_data is False:
# time_stamps is given as input
if self.time_stamps is not None:
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
time_stamps_train = []
if train_data_split == "col":
num_datasets = len(self.y)
for i in range(num_datasets):
if self.time_stamps is not None:
ts = self.time_stamps[i][: self.num_train_samples[i]]
time_stamps_train.append(ts)
else:
warnings.warn(
"time_stamps is not supported when TRAIN_DATA_SPLIT = 'col'"
)
self.context_manager["time_stamps_train"] = time_stamps_train
else:
self.context_manager["time_stamps_train"] = None
else:
if self.time_stamps is not None:
raise NotImplementedError
self.context_manager["time_stamps_train"] = None
time_stamps_train = self.context_manager["time_stamps_train"]
return typing.cast(MaybeRawDataset, time_stamps_train)
@property
def time_stamps_valid(self) -> MaybeRawDataset:
"""Get time stamps for validation samples
Returns
-------
MaybeRawDataset
time stamps for validation samples
"""
if "time_stamps_valid" not in self.context_manager:
if self.get_valid_data is False:
# time_stamps is given as input
if self.time_stamps is not None:
train_data_split = self.cfg.TRAINING.TRAIN_DATA_SPLIT
lookback = self.cfg.IO.LOOKBACK
time_stamps_valid = []
if train_data_split == "col":
num_datasets = len(self.y)
for i in range(num_datasets):
if self.time_stamps is not None:
offset = self.num_train_samples[i] + lookback
ts = self.time_stamps[i][offset:]
time_stamps_valid.append(ts)
else:
warnings.warn(
"time_stamps is not supported when TRAIN_DATA_SPLIT = 'col'"
)
self.context_manager["time_stamps_valid"] = time_stamps_valid
else:
self.context_manager["time_stamps_valid"] = None
else:
if self.time_stamps is not None:
raise NotImplementedError
self.context_manager["time_stamps_valid"] = None
time_stamps_valid = self.context_manager["time_stamps_valid"]
return typing.cast(MaybeRawDataset, time_stamps_valid)
@property
def train_dataset(self) -> Dataset:
"""Get a training dataset.
RawDataset is a list of datasets. They are concatenated inside build_train_dataset method.
Returns
-------
Dataset
Training dataset
"""
if "train_dataset" not in self.context_manager:
train_dataset = self.build_train_dataset(
self.X_train,
self.y_train,
self.time_stamps_train,
self.X_scaler,
self.y_scaler,
)
self.context_manager["train_dataset"] = train_dataset
train_dataset = self.context_manager["train_dataset"]
return train_dataset
@property
def valid_dataset(self) -> Dataset:
"""Get a validation dataset.
RawDataset is a list of datasets. They are concatenated inside build_train_dataset method.
Returns
-------
Dataset
Validation dataset
"""
if "valid_dataset" not in self.context_manager:
valid_dataset = self.build_valid_dataset(
self.X_valid,
self.y_valid,
self.time_stamps_valid,
self.X_scaler,
self.y_scaler,
)
self.context_manager["valid_dataset"] = valid_dataset
valid_dataset = self.context_manager["valid_dataset"]
return valid_dataset
@property
def X_scaler(self) -> Scaler:
"""Get a scaler for input.
Returns
-------
Scaler
Scale for input
"""
if "X_scaler" not in self.context_manager:
X_scaler = build_X_scaler(self.cfg)
X_scaler.fit_batch(self.X_train)
self.context_manager["X_scaler"] = X_scaler
X_scaler = self.context_manager["X_scaler"]
return X_scaler
@property
def y_scaler(self) -> Scaler:
"""Get a scaler for target.
Returns
-------
Scaler
Scale for target
"""
if "y_scaler" not in self.context_manager:
y_scaler = build_y_scaler(self.cfg)
y_scaler.fit_batch(self.y_train)
self.context_manager["y_scaler"] = y_scaler
y_scaler = self.context_manager["y_scaler"]
return y_scaler
@property
def collator(self) -> Collator:
"""Get a collator.
Returns
-------
Collator
Collator
"""
if "collator" not | |
<reponame>wlongo/django-rest-framework-braces<filename>drf_braces/serializers/form_serializer.py<gh_stars>10-100
from __future__ import absolute_import, print_function, unicode_literals
from collections import OrderedDict
import six
from django import forms
from rest_framework import serializers
from .. import fields
from ..utils import (
find_matching_class_kwargs,
get_attr_from_base_classes,
get_class_name_with_new_suffix,
reduce_attr_dict_from_instance,
)
class FormSerializerFailure(object):
"""
Enum for the possible form validation failure modes.
'fail': validation failures should be added to self.errors
and `is_valid()` should return False.
'drop': validation failures for a given attribute will result in
that attribute being dropped from `cleaned_data`;
`is_valid()` will return True.
'ignore': validation failures will be ignored, and the (invalid)
data provided will be preserved in `cleaned_data`.
"""
fail = 'fail'
drop = 'drop'
ignore = 'ignore'
class FormSerializerFieldMixin(object):
def run_validation(self, data):
try:
return super(FormSerializerFieldMixin, self).run_validation(data)
except (serializers.ValidationError, forms.ValidationError) as e:
# Only handle a ValidationError if the full validation is
# requested or if field is in minimum required in the case
# of partial validation.
if any([not self.parent.partial,
self.parent.Meta.failure_mode == FormSerializerFailure.fail,
self.field_name in self.parent.Meta.minimum_required]):
raise
self.capture_failed_field(self.field_name, data, e.detail)
raise serializers.SkipField
def capture_failed_field(self, field_name, field_data, error_msg):
"""
Hook for capturing invalid fields. This is used to track which fields have been skipped.
Args:
field_name (str): the name of the field whose data failed to validate
field_data (object): the data of the field that failed validation
error_msg (str): validation error message
Returns:
Not meant to return anything.
"""
def make_form_serializer_field(field_class, validation_form_serializer_field_mixin_class=FormSerializerFieldMixin):
return type(
get_class_name_with_new_suffix(field_class, 'Field', 'FormSerializerField'),
(validation_form_serializer_field_mixin_class, field_class,),
{}
)
FORM_SERIALIZER_FIELD_MAPPING = {
forms.CharField: make_form_serializer_field(fields.CharField),
forms.MultipleChoiceField: make_form_serializer_field(fields.ChoiceField),
forms.ChoiceField: make_form_serializer_field(fields.ChoiceField),
forms.BooleanField: make_form_serializer_field(fields.BooleanField),
forms.IntegerField: make_form_serializer_field(fields.IntegerField),
forms.EmailField: make_form_serializer_field(fields.EmailField),
forms.DateTimeField: make_form_serializer_field(fields.DateTimeField),
forms.DateField: make_form_serializer_field(fields.DateField),
forms.TimeField: make_form_serializer_field(fields.TimeField),
}
class FormSerializerOptions(object):
"""
Defines what options FormSerializer can have in Meta.
:param form: The ``django.form.Form`` class to use as the base
for the serializer.
:param failure_mode: `FormSerializerFailure`
:param minimum_required: the minimum required fields that
must validate in order for validation to succeed.
"""
def __init__(self, meta, class_name):
self.form = getattr(meta, 'form', None)
self.failure_mode = getattr(meta, 'failure_mode', FormSerializerFailure.fail)
self.minimum_required = getattr(meta, 'minimum_required', [])
self.field_mapping = getattr(meta, 'field_mapping', {})
self.exclude = getattr(meta, 'exclude', [])
assert self.form, (
'Class {serializer_class} missing "Meta.form" attribute'.format(
serializer_class=class_name
)
)
assert self.failure_mode in vars(FormSerializerFailure).values(), (
'Failure mode "{}" is not supported'.format(self.failure_mode)
)
if self.failure_mode == FormSerializerFailure.ignore:
raise NotImplementedError(
'Failure mode "{}" is not supported since it is not clear '
'what is an expected behavior'.format(self.failure_mode)
)
# copy all other custom keys
for k, v in vars(meta).items():
if hasattr(self, k):
continue
setattr(self, k, v)
class FormSerializerMeta(serializers.SerializerMetaclass):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FormSerializer)]
except NameError:
# We are defining FormSerializer itself
parents = None
if not parents or attrs.pop('_is_base', False):
return super(FormSerializerMeta, cls).__new__(cls, name, bases, attrs)
assert 'Meta' in attrs, (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=name
)
)
options_class = get_attr_from_base_classes(
bases, attrs, '_options_class', default=FormSerializerOptions
)
attrs['Meta'] = options_class(attrs['Meta'], name)
return super(FormSerializerMeta, cls).__new__(cls, name, bases, attrs)
class FormSerializerBase(serializers.Serializer):
"""
The base Form serializer class.
When a subclassing serializer is validated or saved, this will
pass-through those operations to the mapped Form.
"""
_is_base = True
_options_class = FormSerializerOptions
def __init__(self, *args, **kwargs):
# We override partial validation handling, since for
# it to be properly implemented for a Form the caller
# must also choose whether or not to include the data
# that failed validation in the result cleaned_data.
# Unfortunately there is no way to prevent a caller from
# sending this param themselves, because of the way DRFv2
# serializers work internally.
if self.Meta.failure_mode != FormSerializerFailure.fail:
kwargs['partial'] = True
self.form_instance = None
super(FormSerializerBase, self).__init__(*args, **kwargs)
def get_form(self, data=None, **kwargs):
"""
Create an instance of configured form class.
:param data: optional initial data
:param kwargs: key args to pass to form instance
:return: instance of `self.opts.form`, bound if data was provided,
otherwise unbound.
"""
form_cls = self.Meta.form
instance = form_cls(data=data, **kwargs)
# Handle partial validation on the form side
if self.partial:
set_form_partial_validation(
instance, self.Meta.minimum_required
)
return instance
def get_fields(self):
"""
Return all the fields that should be serialized for the form.
This is a hook provided by parent class.
:return: dict of {'field_name': serializer_field_instance}
"""
ret = super(FormSerializerBase, self).get_fields()
field_mapping = reduce_attr_dict_from_instance(
self,
lambda i: getattr(getattr(i, 'Meta', None), 'field_mapping', {}),
FORM_SERIALIZER_FIELD_MAPPING
)
# Iterate over the form fields, creating an
# instance of serializer field for each.
form = self.Meta.form
for field_name, form_field in getattr(form, 'all_base_fields', form.base_fields).items():
# if field is specified as excluded field
if field_name in getattr(self.Meta, 'exclude', []):
continue
# if field is already defined via declared fields
# skip mapping it from forms which then honors
# the custom validation defined on the DRF declared field
if field_name in ret:
continue
try:
serializer_field_class = field_mapping[form_field.__class__]
except KeyError:
raise TypeError(
"{field} is not mapped to a serializer field. "
"Please add {field} to {serializer}.Meta.field_mapping. "
"Currently mapped fields: {mapped}".format(
field=form_field.__class__.__name__,
serializer=self.__class__.__name__,
mapped=', '.join(sorted([i.__name__ for i in field_mapping.keys()]))
)
)
else:
ret[field_name] = self._get_field(form_field, serializer_field_class)
return ret
def _get_field(self, form_field, serializer_field_class):
kwargs = self._get_field_kwargs(form_field, serializer_field_class)
field = serializer_field_class(**kwargs)
for kwarg, value in kwargs.items():
# set corresponding DRF attributes which don't have alternative
# in Django form fields
if kwarg == 'required':
field.allow_blank = not value
field.allow_null = not value
# ChoiceField natively uses choice_strings_to_values
# in the to_internal_value flow
elif kwarg == 'choices':
field.choice_strings_to_values = {
six.text_type(key): key for key in OrderedDict(value).keys()
}
return field
def _get_field_kwargs(self, form_field, serializer_field_class):
"""
For a given Form field, determine what validation attributes
have been set. Includes things like max_length, required, etc.
These will be used to create an instance of ``rest_framework.fields.Field``.
:param form_field: a ``django.forms.field.Field`` instance
:return: dictionary of attributes to set
"""
attrs = find_matching_class_kwargs(form_field, serializer_field_class)
if 'choices' in attrs:
choices = OrderedDict(attrs['choices']).keys()
attrs['choices'] = OrderedDict(zip(choices, choices))
if getattr(form_field, 'initial', None):
attrs['default'] = form_field.initial
# avoid "May not set both `required` and `default`"
if attrs.get('required') and 'default' in attrs:
del attrs['required']
return attrs
def validate(self, data):
"""
Validate a form instance using the data that has been run through
the serializer field validation.
:param data: deserialized data to validate
:return: validated, cleaned form data
:raise: ``django.core.exceptions.ValidationError`` on failed
validation.
"""
self.form_instance = form = self.get_form(data=data)
if not form.is_valid():
_cleaned_data = getattr(form, 'cleaned_data', None) or {}
if self.Meta.failure_mode == FormSerializerFailure.fail:
raise serializers.ValidationError(form.errors)
else:
self.capture_failed_fields(data, form.errors)
cleaned_data = {k: v for k, v in data.items() if k not in form.errors}
# use any cleaned data form might of validated right until
# this moment even if validation failed
cleaned_data.update(_cleaned_data)
else:
cleaned_data = form.cleaned_data
return cleaned_data
def to_representation(self, instance):
"""
It doesn't make much sense to serialize a Form instance to JSON.
"""
raise NotImplementedError(
'{} does not currently serialize Form --> JSON'
''.format(self.__class__.__name__)
)
def capture_failed_fields(self, raw_data, form_errors):
"""
Hook for capturing all failed form data when the failure mode is not FormSerializerFailure.fail
Args:
raw_data (dict): raw form data
form_errors (dict): all form errors
Returns:
Not meant to return anything.
"""
class FormSerializer(six.with_metaclass(FormSerializerMeta, FormSerializerBase)):
pass
class LazyLoadingValidationsMixin(object):
"""
Provides a method for re-evaluating the validations for
a form using an instance of it (whereas the FormSerializer
only uses the form class).
If your form class loads validations in `__init__()`, you
need this.
"""
def repopulate_form_fields(self):
"""
Repopulate the form fields, update choices.
The repopulation is required b/c some DT forms use a lazy-load approach
to populating choices of a ChoiceField, by putting the load
in the form's constructor. Also, the DT fields may require context_data,
which is unavailable when the fields are first constructed
(which happens during evaluation of the serializer classes).
:return: None
"""
instance = self.get_form()
for form_field_name, form_field in getattr(instance, 'all_fields', instance.fields).items():
if hasattr(form_field, 'choices'):
# let drf normalize choices down to key: key
# key:value is unsupported unlike in django form fields
self.fields[form_field_name].choices = OrderedDict(form_field.choices).keys()
self.fields[form_field_name].choice_strings_to_values = {
six.text_type(key): key for key in OrderedDict(form_field.choices).keys()
}
def to_internal_value(self, data):
"""
We have tons of "choices" loading in form `__init__()`,
(so that DB query is evaluated at last possible moment) so require the
use of ``common.common_json.serializers.LazyLoadingValidationsMixin``.
"""
self.repopulate_form_fields()
return super(LazyLoadingValidationsMixin, self).to_internal_value(data)
def set_form_partial_validation(form, minimum_required):
"""
| |
proceed to calculating the (y) coordinate
if x1 == x2:
x0 = x1
# if the vertical direction is positive from
# vertex 1 to vertex 2 on the euclidean plane
if y1 < y2:
y0 = y1 + distance
# if the vertical direction is negative from
# vertex 1 to vertex 2 on the euclidean plane
# -- this shouldn't happen due to vertex sorting in
# -- self._extractnetwork() and self.extractgraph()
elif y1 > y2:
y0 = y2 + distance
# otherwise the link is zero-length
# -- this should never happen
else:
y0 = y1
return x0, y0
# calculate the slope of the arc, `m`
m = (y2 - y1) / (x2 - x1)
# if the horizontal direction is negative from
# vertex 1 to vertex 2 on the euclidean plane
if x1 > x2:
x0 = x1 - distance / numpy.sqrt(1 + m ** 2)
# if the horizontal direction is positive from
# vertex 1 to vertex 2 on the euclidean plane
elif x1 < x2:
x0 = x1 + distance / numpy.sqrt(1 + m ** 2)
# calculate the (y) coordinate
y0 = m * (x0 - x1) + y1
# the new (x,y) coordinates for the snapped observation
return x0, y0
def simulate_observations(self, count, distribution="uniform"):
"""Generate a simulated point pattern on the network.
Parameters
----------
count : int
The number of points to create.
distribution : str
A distribution of random points. Currently, the only
supported distribution is uniform.
Returns
-------
random_pts : dict
Keys are the edge tuple. Values are lists of new point coordinates.
See also
--------
numpy.random.Generator.uniform
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Isolate the number of points in the dataset.
>>> npts = ntw.pointpatterns["crimes"].npoints
>>> npts
287
Simulate ``npts`` number of points along the network
in a `uniform` distribution.
>>> sim = ntw.simulate_observations(npts)
>>> isinstance(sim, spaghetti.network.SimulatedPointPattern)
True
>>> sim.npoints
287
"""
# instantiate an empty `SimulatedPointPattern()`
simpts = SimulatedPointPattern()
# record throw-away arcs enumerator
arcs_ = []
# create array and fill each entry as length of network arc
lengths = numpy.zeros(len(self.arc_lengths))
for i, key in enumerate(self.arc_lengths.keys()):
arcs_.append(key)
lengths[i] = self.arc_lengths[key]
# cumulative network length
stops = numpy.cumsum(lengths)
cumlen = stops[-1]
# create lengths with a uniform distribution
if distribution.lower() == "uniform":
nrandompts = numpy.random.uniform(0, cumlen, size=(count,))
else:
msg = "%s distribution not currently supported." % distribution
raise RuntimeError(msg)
# iterate over random distances created above
for i, r in enumerate(nrandompts):
# take the first element of the index array (arc ID) where the
# random distance is greater than that of its value in `stops`
idx = numpy.where(r < stops)[0][0]
# assign the simulated point to the arc
assignment_arc = arcs_[idx]
# calculate and set the distance from the arc start
distance_from_start = stops[idx] - r
# populate the coordinates dict
x0, y0 = self._newpoint_coords(assignment_arc, distance_from_start)
# record the snapped coordinates and associated vertices
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_vertex[assignment_arc[0]].append(i)
simpts.obs_to_vertex[assignment_arc[1]].append(i)
# calculate and set the distance from the arc end
distance_from_end = self.arc_lengths[arcs_[idx]] - distance_from_start
# populate the distances to vertices
simpts.dist_to_vertex[i] = {
assignment_arc[0]: distance_from_start,
assignment_arc[1]: distance_from_end,
}
# set snapped coordinates and point count attributes
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_vertex(self, v0):
"""Returns the arcs (links) adjacent to vertices.
Parameters
-----------
v0 : int
The vertex ID.
Returns
-------
links : list
List of tuple arcs adjacent to the vertex.
Examples
--------
Create an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Enumerate the links/arcs that are adjacent to vertex ``24``.
>>> ntw.enum_links_vertex(24)
[(24, 48), (24, 25), (24, 26)]
"""
# instantiate links list
links = []
neighbor_vertices = self.adjacencylist[v0]
# enumerate links associated with the current vertex
for n in neighbor_vertices:
links.append(tuple(sorted([n, v0])))
return links
def full_distance_matrix(self, n_processes, gen_tree=False):
"""All vertex-to-vertex distances on a network. This method
is called from within ``allneighbordistances()``,
``nearestneighbordistances()``, and ``distancebandweights()``.
Parameters
-----------
n_processes : int
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
Notes
-----
Based on :cite:`Dijkstra1959a` and :cite:`doi:10.1002/9781119967101.ch3`.
"""
# create an empty matrix which will store shortest path distance
nvtx = len(self.vertex_list)
self.distance_matrix = numpy.empty((nvtx, nvtx))
# create `network_trees` attribute that stores
# all network path trees (if desired)
self.network_trees = {}
# single-core processing
if n_processes == 1:
# iterate over each network vertex
for vtx in self.vertex_list:
# calculate the shortest path and preceding
# vertices for traversal route
distance, pred = util.dijkstra(self, vtx)
pred = numpy.array(pred)
# generate the shortest path tree
if gen_tree:
tree = util.generatetree(pred)
else:
tree = None
# populate distances and paths
self.distance_matrix[vtx] = distance
self.network_trees[vtx] = tree
# multiprocessing
else:
# set up multiprocessing schema
import multiprocessing as mp
from itertools import repeat
if n_processes == "all":
cores = mp.cpu_count()
else:
cores = n_processes
p = mp.Pool(processes=cores)
# calculate the shortest path and preceding
# vertices for traversal route by mapping each process
distance_pred = p.map(util.dijkstra_mp, zip(repeat(self), self.vertex_list))
# set range of iterations
iterations = range(len(distance_pred))
# fill shortest paths
distance = [distance_pred[itr][0] for itr in iterations]
# fill preceding vertices
pred = numpy.array([distance_pred[itr][1] for itr in iterations])
# iterate of network vertices and generate
# the shortest path tree for each
for vtx in self.vertex_list:
if gen_tree:
tree = util.generatetree(pred[vtx])
else:
tree = None
# populate distances and paths
self.distance_matrix[vtx] = distance[vtx]
self.network_trees[vtx] = tree
def allneighbordistances(
self,
sourcepattern,
destpattern=None,
fill_diagonal=None,
n_processes=1,
gen_tree=False,
snap_dist=False,
):
"""Compute either all distances between :math:`i` and :math:`j` in a
single point pattern or all distances between each :math:`i` from a
source pattern and all :math:`j` from a destination pattern.
Parameters
----------
sourcepattern : {str, spaghetti.PointPattern}
The key of a point pattern snapped to the network or
the full ``spaghetti.PointPattern`` object.
destpattern : str
(Optional) The key of a point pattern snapped to the network
or the full ``spaghetti.PointPattern`` object.
fill_diagonal : {float, int}
(Optional) Fill the diagonal of the cost matrix. Default is
``None`` and will populate the diagonal with ``numpy.nan``.
Do not declare a ``destpattern`` for a custom
``fill_diagonal``.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
Returns
-------
nearest : numpy.ndarray
An array of shape (n,m) storing distances between all
source and destination points.
tree_nearest : dict
Nearest network node to point pattern vertex shortest
path lookup. The values of the dictionary are a tuple
of the nearest source vertex and the nearest destination
vertex to query the lookup tree. If two observations are
snapped to the same network arc a flag of -.1 is set for
both the source and destination network vertex
indicating the same arc is used while also raising an
``IndexError`` when rebuilding the path.
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> import numpy
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Calculate all distances between observations in the ``crimes`` dataset.
>>> s2s_dist = ntw.allneighbordistances("crimes")
If calculating a ``type-a`` to ``type-a`` distance matrix
the distance between an observation and itself is ``nan`` and
the distance between one observation and | |
bound = (1 + percentile/200)*latest_D
predictions = [0 for i in range(int(14)+start)]
predictions = predictions + [0 for i in range(int(extrapolate-14))]
forecast = list(np.concatenate((deaths, predictions)))
death_cdf.append(forecast)
for percentile in [80, 90]:
if percentile == 80:
bound = np.mean(deaths)
else:
bound = np.mean([d for d in deaths if d>0])
predictions = [bound for i in range(int(14)+start)]
predictions = predictions + [0 for i in range(int(extrapolate-14))]
forecast = list(np.concatenate((deaths, predictions)))
death_cdf.append(forecast)
else:
for percentile in [10, 20, 30, 40, 50, 60, 70, 80, 90]: #make this a separate function
predictions = [0 for i in range(int(extrapolate))]
forecast = list(np.concatenate((deaths, predictions)))
death_cdf.append(forecast)
death_cdf = np.transpose(death_cdf)
counties_dates.append(dates)
counties_death_errors.append(death_cdf)
return (counties_dates, counties_death_errors, counties_fips)
def fit(data, bias=None, bias_value=0.4, weight=False, plot=False, extrapolate=14, guesses=None, error_start=-1, quick=False, tail=False, fitQ=False, getbounds=False, death_metric="deaths"):
param_ranges = [(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1)]
initial_ranges = [(0,1), (0,1), (0,0.01), (0,0.01), (0,0.01), (0,0.01), (0,0.01)]
ranges = param_ranges+initial_ranges
if guesses is None:
params = [9e-02, 1e-01, 7e-02, 3.e-01, 4.e-01, 1e-01, 1e-01, 3e-01, 4e-01, 7e-02, 2e-04, 8e-02, 7e-03, 2e-02, 2e-04, 2e-06, 4e-03]
initial_conditions = [7e-01, 2e-01, 4e-08, 7e-03, 1e-08, 3e-20, 7e-06]
guesses = params+initial_conditions
else:
initial_ranges = [(0.1*guesses[17],10*guesses[17]), (0.1*guesses[18],10*guesses[18]), (0.1*guesses[19],10*guesses[19]), (0.1*guesses[20],10*guesses[20]), (0.1*guesses[21],10*guesses[21]), \
(0.1*guesses[22],10*guesses[22]), (0.1*guesses[23],10*guesses[23])]
ranges = param_ranges+initial_ranges
if bias is not None and bias < 0:
bias = None
for boundary in [len(data)]:
res = least_squares(leastsq_qd, guesses, args=(data[:boundary],bias, bias_value, weight, fitQ, death_metric), bounds=np.transpose(np.array(ranges)))
predictions = get_deaths(res, data, extrapolate=extrapolate)
convergent_status = test_convergence(len(data), data['Population'].values[0], predictions)
if convergent_status == False:
return (None,None,None)
death_pdf = []
if plot:
plot_model(res, data, extrapolate=extrapolate, boundary=boundary, plot_infectious=True, death_metric=death_metric)
death_pdf = plot_with_errors_sample(res, guesses[:17], data, extrapolate=extrapolate, boundary=boundary, plot_infectious=False, error_start=error_start, quick=quick, tail=tail, death_metric=death_metric)
else:
if getbounds:
death_pdf = get_fit_errors(res, guesses[:17], data, extrapolate=extrapolate, error_start=error_start, quick=quick, tail=tail, death_metric=death_metric)
else:
prediction_fit = [point[1] for point in predictions]
death_error = quickie(prediction_fit, data, None, error_start=None)
death_error = np.concatenate((data["daily_deaths"].values[0:1], death_error))
death_pdf.append(death_error)
return (predictions, death_pdf, res)
###########################################################
def test(end, bias=False, policy_regime=False, tail_regime=False, weight=True, plot=False, guesses=None, error_start=-1, quick=False, tail=False, fitQ=False, getbounds=True, adaptive=False, death_metric="deaths"):
counties_dates = []
counties_death_errors = []
counties_fips = []
nonconvergent = []
parameters = {}
# us = process_data("/data/us/covid/nyt_us_counties.csv", "/data/us/demographics/county_populations.csv")
us = loader.load_data("/models/epidemiological/production/us_training_data.csv")
us_daily = loader.load_data("/data/us/covid/nyt_us_counties_daily.csv")
policies = loader.load_data("/data/us/other/policies.csv")
fips_key = loader.load_data("/data/us/processing_data/fips_key.csv", encoding="latin-1")
# fips_list = fips_key["FIPS"]
fips_list = [36061] #34017, 17031, 25013, 34023, 36059, 33011 56013,1017, 44007, 42101, 6037 27053
total = len(fips_list)
for index, county in enumerate(fips_list):
print(f"{index+1} / {total}")
county_data = loader.query(us, "fips", county)
county_data['daily_deaths'] = loader.query(us_daily, "fips", county)["deaths"]
county_data['avg_deaths'] = county_data.iloc[:,6].rolling(window=3).mean()
county_data = county_data[2:]
firstnonzero = next((i for i,value in enumerate(county_data[death_metric].values) if value != 0), None)
final_death = (county_data["deaths"].values)[-1]
initial_death = (county_data["deaths"].values)[firstnonzero]
if firstnonzero is not None:
if firstnonzero > len(county_data)-7 or final_death-initial_death == 0:
# add to nonconvergent counties
nonconvergent.append(county)
continue
death_observations = (county_data['daily_deaths'].values)[firstnonzero:]
if list(death_observations).count(0) > len(death_observations)/2:
nonconvergent.append(county)
continue # for fit_single_county use return [county]
begin = firstnonzero-death_time
if begin >= 0:
county_data = county_data[begin:]
firstnonzero = death_time
county_data.reset_index(drop=True, inplace=True)
else:
continue # dont add to convonvergent counties, just leave blank and submission script will fill it in with all zeros
if adaptive and death_metric=="deaths":
actual_deaths = (county_data['deaths'].values)[firstnonzero:]
moving_deaths = (county_data['avg_deaths'].values)[firstnonzero:]
residuals = []
for index in range(1, len(actual_deaths)):
moving_change = moving_deaths[index] - moving_deaths[index-1]
if moving_change > 0:
residue = actual_deaths[index] - moving_deaths[index]
residue = residue/moving_change
residuals.append(residue)
if np.std(residuals) > 0.25:
print("gottem")
death_metric = "avg_deaths"
dates = pd.to_datetime(county_data["date"].values)
county_policy_regime = policy_regime
policy_regime_change = -2*death_time
if bias or policy_regime:
policy_date = loader.query(policies, "FIPS", county)["stay at home"]
if len(policy_date) == 0 or math.isnan(policy_date.values[0]):
county_policy_regime = False
else:
policy_date = int(policy_date.values[0])
policy_regime_change = int((datetime.datetime.fromordinal(policy_date)-dates[0])/np.timedelta64(1, 'D'))
if policy_regime_change < (death_time-5) or policy_regime_change > len(county_data) - (death_time+5) or policy_regime_change <= firstnonzero:
county_policy_regime = False
policy_regime_change = -2*death_time
if tail_regime and type(tail_regime)==int:
tail = tail_regime
extrapolate = (end-dates[-1])/np.timedelta64(1, 'D')
county_tail_regime = len(county_data) + tail_regime
county_tail_regime = max(firstnonzero, county_tail_regime)
predictions, death_pdf, res = fit(county_data, bias=policy_regime_change+death_time, weight=weight, plot=False, extrapolate=extrapolate, guesses=guesses, fitQ=fitQ, getbounds=getbounds, death_metric=death_metric)
if res is not None:
predictions2, death_pdf2, res2 = fit(county_data, bias=county_tail_regime, bias_value=0.01, weight=weight, plot=plot, extrapolate=extrapolate, guesses=res.x, error_start=error_start, quick=quick, tail=tail, fitQ=fitQ, getbounds=getbounds, death_metric=death_metric)
if res2 is not None:
predictions, death_pdf, res = predictions2, death_pdf2, res2
else:
extrapolate = (end-dates[-1])/np.timedelta64(1, 'D')
predictions, death_pdf, res = fit(county_data, bias=policy_regime_change+death_time, weight=weight, plot=plot, extrapolate=extrapolate, guesses=guesses, error_start=error_start, quick=quick, tail=tail, fitQ=fitQ, getbounds=getbounds, death_metric=death_metric)
if res is None:
# add to nonconvergent counties
nonconvergent.append(county)
continue
print(list(res.x))
death_cdf = get_death_cdf(death_pdf, extrapolate, switch=quick)
if death_cdf is None:
if policy_regime:
death_pdf = get_fit_errors2(res, guesses[:17], county_data1, county_data2, extrapolate=extrapolate, error_start=error_start, quick=True, tail=tail, death_metric=death_metric)
else:
death_pdf = get_fit_errors(res, guesses[:17], county_data, extrapolate=extrapolate, error_start=error_start, quick=True, tail=tail, death_metric=death_metric)
death_cdf = get_death_cdf(death_pdf, extrapolate, switch=True)
death_cdf = np.transpose(death_cdf)
counties_dates.append(dates)
counties_death_errors.append(death_cdf)
counties_fips.append(county)
if county in parameters.keys():
parameters[county].append(res.x)
else:
parameters[county] = [res.x]
if len(nonconvergent) > 0:
print(f"nonconvergent: {nonconvergent}")
counties_dates_non, counties_death_errors_non, counties_fips_non = fill_nonconvergent(nonconvergent, us_daily, end, error_start=error_start)
counties_dates = counties_dates + counties_dates_non
for death_cdf in counties_death_errors_non:
counties_death_errors.append(death_cdf)
# county_death_errors = counties_death_errors + counties_death_errors2
counties_fips = counties_fips + counties_fips_non
output_dict = {"counties_dates": np.array(counties_dates), "counties_death_errors": np.array(counties_death_errors), "counties_fips": np.array(counties_fips), \
"nonconvergent": nonconvergent, "parameters": parameters}
return output_dict
class Empty:
pass
def generate_single_confidence(input_dict):
us = input_dict["us"]
us_daily = input_dict["us_daily"]
policies = input_dict["policies"]
county = input_dict["county"]
end = input_dict["end"]
bias = input_dict["bias"]
policy_regime = input_dict["policy_regime"]
tail_regime = input_dict["tail_regime"]
weight = input_dict["weight"]
params = input_dict["params"]
quick = input_dict["quick"]
error_start = input_dict["error_start"]
tail = input_dict["tail"]
adaptive = input_dict["adaptive"]
death_metric = input_dict["death_metric"]
nonconvergent = None
if params is None:
# add to nonconvergent counties
return [county]
res = Empty()
res.x = params
county_data = loader.query(us, "fips", county)
county_data['daily_deaths'] = loader.query(us_daily, "fips", county)["deaths"]
county_data['avg_deaths'] = county_data.iloc[:,6].rolling(window=3).mean()
county_data = county_data[2:-8]
firstnonzero = next((index for index,value in enumerate(county_data[death_metric].values) if value != 0), None)
if firstnonzero is not None:
if firstnonzero > len(county_data)-7 or (county_data["deaths"].values)[-1]-(county_data["deaths"].values)[firstnonzero] == 0:
return [county] # add to nonconvergent counties
death_observations = (county_data['daily_deaths'].values)[firstnonzero:]
if list(death_observations).count(0) > len(death_observations)/2:
return [county]
begin = firstnonzero-death_time
if begin >= 0:
county_data = county_data[begin:]
firstnonzero = death_time
county_data.reset_index(drop=True, inplace=True)
else:
return None # dont add to nonconvergent counties, just leave blank and submission script will fill it in with all zeros
if adaptive and death_metric == "deaths":
actual_deaths = (county_data['deaths'].values)[firstnonzero:]
moving_deaths = (county_data['avg_deaths'].values)[firstnonzero:]
residuals = []
for index in range(1, len(actual_deaths)):
moving_change = moving_deaths[index] - moving_deaths[index-1]
if moving_change > 0:
residue = actual_deaths[index] - moving_deaths[index]
residue = residue/moving_change
residuals.append(residue)
if np.std(residuals) >= 0.25:
death_metric = "avg_deaths"
dates = pd.to_datetime(county_data["date"].values)
extrapolate = (end-dates[-1])/np.timedelta64(1, 'D')
policy_regime_change = -2*death_time
if bias or policy_regime:
policy_date = loader.query(policies, "FIPS", county)["stay at home"]
if len(policy_date) == 0:
bias = False
policy_regime = False
else:
policy_date = int(policy_date.values[0])
policy_regime_change = int((datetime.datetime.fromordinal(policy_date)-dates[0])/np.timedelta64(1, 'D'))
if policy_regime_change < (death_time-5) or policy_regime_change > len(county_data) - (death_time+5) or policy_regime_change <= firstnonzero:
bias = False
policy_regime = False
policy_regime_change = -2*death_time
if tail_regime and type(tail_regime)==int:
tail = tail_regime
plot_model(res, county_data, extrapolate=extrapolate, plot_infectious=True, death_metric=death_metric)
death_pdf = plot_with_errors_sample(res, params, county_data, extrapolate=extrapolate, error_start=error_start, quick=True, tail=tail, death_metric=death_metric)
# death_pdf = get_fit_errors(res, params, county_data, extrapolate=extrapolate, error_start=error_start, quick=True, tail=tail, death_metric=death_metric)
death_cdf = get_death_cdf(death_pdf, extrapolate, switch=True)
death_cdf = np.transpose(death_cdf)
return (dates, death_cdf, county)
def multi_generate_confidence(combined_parameters, end, quick=True, error_start=-14, tail=False, fix_nonconvergent=False):
#Get date range of April1 to June30 inclusive. Figure out how much to extrapolate
counties_dates = []
counties_death_errors = []
counties_fips = []
nonconvergent = []
# us = process_data("/data/us/covid/nyt_us_counties.csv", "/data/us/demographics/county_populations.csv")
us = loader.load_data("/models/epidemiological/production/us_training_data.csv")
us_daily = loader.load_data("/data/us/covid/nyt_us_counties_daily.csv")
policies = loader.load_data("/data/us/other/policies.csv")
policies = policies.dropna(subset=['stay at home'])
fips_key = loader.load_data("/data/us/processing_data/fips_key.csv", encoding="latin-1")
fips_list = fips_key["FIPS"]
data = []
for county in fips_list:
input_dict = {}
input_dict["us"] = us
input_dict["us_daily"] = us_daily
input_dict["policies"] = policies
input_dict["county"] = county
input_dict["end"] = end
input_dict["quick"] = quick
input_dict["error_start"] = error_start
input_dict['tail'] = tail
# input_dict["params"] = combined_parameters["params"]
# input_dict["policy_regime"] = combined_parameters["policy_regime"]
# input_dict["tail_regime"] = combined_parameters["tail_regime"]
# input_dict["adaptive"] = combined_parameters["adaptive"]
# input_dict["death_metric"] = combined_parameters["death_metric"]
county_key = str(county)
if county_key in list(combined_parameters.keys()): #combined_parameters has an entry for every county from the optimize script submission files. If the file they came from not have parameters for county, feed in None
if combined_parameters[county_key] is None:
continue
for key in list(combined_parameters[county_key].keys()):
input_dict[key] = (combined_parameters[county_key])[key]
else:
continue
data.append(input_dict)
if len(data) == 1:
result = generate_single_confidence(data[0])
if len(result) == 1:
nonconvergent.append(result[0])
else:
(dates, death_cdf, county) = result
counties_dates.append(dates)
counties_death_errors.append(death_cdf)
counties_fips.append(county)
else:
pool = Pool(os.cpu_count()) ## According to TA this will saturate more cores in the hpc?
results = pool.map(generate_single_confidence, data)
for result in results:
if result is not None:
if len(result) == 1:
nonconvergent.append(result[0])
else:
(dates, death_cdf, county) = result
counties_dates.append(dates)
counties_death_errors.append(death_cdf)
counties_fips.append(county)
if len(nonconvergent) > 0:
counties_dates_non, counties_death_errors_non, counties_fips_non = fill_nonconvergent(nonconvergent, us_daily, end, fix_nonconvergent=fix_nonconvergent)
counties_dates = counties_dates + counties_dates_non
for death_cdf in counties_death_errors_non:
counties_death_errors.append(death_cdf)
counties_fips = counties_fips + counties_fips_non
output_dict = {"counties_dates": np.array(counties_dates), "counties_death_errors": np.array(counties_death_errors), "counties_fips": np.array(counties_fips), \
"nonconvergent": nonconvergent}
return output_dict
###########################################################
if __name__ == '__main__':
end = datetime.datetime(2020, 6, 30)
# guesses = [1.41578513e-01, 1.61248129e-01, 2.48362028e-01, 3.42978127e-01, 5.79023652e-01, 4.64392758e-02, \
# 9.86745420e-06, 4.83700388e-02, 4.85290835e-01, 3.72688900e-02, 4.92398129e-04, 5.20319673e-02, \
# 4.16822944e-02, 2.93718207e-02, 2.37765976e-01, 6.38313283e-04, 1.00539865e-04, 7.86113867e-01, \
# 3.26287443e-01, 8.18317732e-06, 5.43511913e-10, 1.30387168e-04, 3.58953133e-03, 1.57388153e-05]
guesses = None
# guesses = [0.347788756754361, 0.34491677887291705, 0.003154722699912536, 0.4985060201450094, 0.550563984674294, 0.03823471453254752, 0.0015897569370657341, 0.37585163777022107, 0.4154389095590687, 0.08168387387250878, 9.304462310605794e-06, 0.06250864864173467, 0.021263444109332643, 0.05731367025219088, 0.16611190329700892, 0.0013937980057879332, 0.00042326122491193784, 0.7664703213628267, 0.6574011838097715, 2.183979980914606e-07, 1.1781675019848431e-09, 6.883361598806804e-05, 0.009839663454234501, 2.03354867231974e-05]
# guesses = [0.09450366904673224, 0.0004710760723271189, 0.1400163735156153, 0.22484294992189927, 0.45928280320621817, 0.10881967853088996, 0.04383382741493148, 0.3198398363291732, 0.31772919729568444, 0.18029197284930204, 4.5633833774879513e-10, 0.0001555994932997694, 0.03131462536531793, 0.06482950780667499, 0.09014858524860507, 4.0645019531833214e-09, 0.03245638906145152, 0.739559707217159, 0.24539613824473772, 0.0039457747991040164, 0.0066152618110283355, 0.00023626676203165555, 7.944968195828775e-38, 7.057330849865465e-06]
test(end, bias=False, policy_regime=False, tail_regime=False, weight=True, plot=True, guesses=guesses, error_start=None, quick=True, tail=False, fitQ=False, adaptive=True, death_metric="deaths")
# test(end, bias=True, policy_regime=False, tail_regime=-14, weight=True, plot=True, guesses=guesses, error_start=None, quick=True, tail=False, fitQ=True, adaptive=True, death_metric="deaths")
# params1 = [0.347788756754361, 0.34491677887291705, 0.003154722699912536, 0.4985060201450094, 0.550563984674294, 0.03823471453254752, 0.0015897569370657341, 0.37585163777022107, 0.4154389095590687, 0.08168387387250878, 9.304462310605794e-06, 0.06250864864173467, 0.021263444109332643, 0.05731367025219088, 0.16611190329700892, 0.0013937980057879332, 0.00042326122491193784, 0.7664703213628267, 0.6574011838097715, 2.183979980914606e-07, 1.1781675019848431e-09, 6.883361598806804e-05, 0.009839663454234501, 2.03354867231974e-05]
# # # params1 = list(0.8*np.array(params1))
# a_1 = params1[0] #decrease to delay
# a_2 = params1[1] #decrease to delay
# a_3 = params1[2] #increase to raise peak
# b_1 = params1[3] #decrease to delay
# b_2 = 0.95*params1[4] #decrease to delay
# b_3 = 2*params1[5] #Increase to delay
# b_4 = params1[6] #Increase to delay
# g_a = 0.8*params1[7] #Increase to lower peak
# g_i = 1.4*params1[8] #Decrease to widen
# th = 0.1*params1[9] #Decrease to make tail heavier
# del_a = params1[10]
# del_i = params1[11] #Increase to flatten
# r_a = params1[12]
# r_i = | |
defines = {}
if __preprocessedFiles is None:
__preprocessedFiles = []
log.info("preprocess(infile=%r, outfile=%r, defines=%r, force=%r, "
"keepLines=%r, includePath=%r, contentType=%r, "
"__preprocessedFiles=%r)", infile, outfile, defines, force,
keepLines, includePath, contentType, __preprocessedFiles)
absInfile = os.path.normpath(os.path.abspath(infile))
if absInfile in __preprocessedFiles:
raise PreprocessError("detected recursive #include of '%s'"
% infile)
__preprocessedFiles.append(os.path.abspath(infile))
# Determine the content type and comment info for the input file.
if contentType is None:
registry = contentTypesRegistry or getDefaultContentTypesRegistry()
contentType = registry.getContentType(infile)
if contentType is None:
contentType = "Text"
log.warn("defaulting content type for '%s' to '%s'",
infile, contentType)
try:
cgs = _commentGroups[contentType]
except KeyError:
raise PreprocessError("don't know comment delimiters for content "
"type '%s' (file '%s')"
% (contentType, infile))
# Generate statement parsing regexes. Basic format:
# <comment-prefix> <preprocessor-stmt> <comment-suffix>
# Examples:
# <!-- #if foo -->
# ...
# <!-- #endif -->
#
# # #if BAR
# ...
# # #else
# ...
# # #endif
stmts = ['#\s*(?P<op>if|elif|ifdef|ifndef)\s+(?P<expr>.*?)',
r'#\s*(?P<op>else|endif)',
r'#\s*(?P<op>error)\s+(?P<error>.*?)',
r'#\s*(?P<op>define)\s+(?P<var>[^\s]*?)(\s+(?P<val>.+?))?',
r'#\s*(?P<op>undef)\s+(?P<var>[^\s]*?)',
r'#\s*(?P<op>include)\s+"(?P<fname>.*?)"',
r'#\s*(?P<op>include)\s+(?P<var>[^\s]+?)',
]
patterns = []
for stmt in stmts:
# The comment group prefix and suffix can either be just a
# string or a compiled regex.
for cprefix, csuffix in cgs:
if hasattr(cprefix, "pattern"):
pattern = cprefix.pattern
else:
pattern = r"^\s*%s\s*" % re.escape(cprefix)
pattern += stmt
if hasattr(csuffix, "pattern"):
pattern += csuffix.pattern
else:
pattern += r"\s*%s\s*$" % re.escape(csuffix)
patterns.append(pattern)
stmtRes = [re.compile(p) for p in patterns]
# Process the input file.
# (Would be helpful if I knew anything about lexing and parsing
# simple grammars.)
fin = open(infile)
lines = fin.readlines()
fin.close()
if type(outfile) in str:
if force and os.path.exists(outfile):
os.chmod(outfile, 0o777)
os.remove(outfile)
fout = open(outfile, 'w')
else:
fout = outfile
defines['__FILE__'] = infile
SKIP, EMIT = list(range(2)) # states
states = [(EMIT, # a state is (<emit-or-skip-lines-in-this-section>,
0, # <have-emitted-in-this-if-block>,
0)] # <have-seen-'else'-in-this-if-block>)
lineNum = 0
for line in lines:
lineNum += 1
log.debug("line %d: %r", lineNum, line)
defines['__LINE__'] = lineNum
# Is this line a preprocessor stmt line?
# XXX Could probably speed this up by optimizing common case of
# line NOT being a preprocessor stmt line.
for stmtRe in stmtRes:
match = stmtRe.match(line)
if match:
break
else:
match = None
if match:
op = match.group("op")
log.debug("%r stmt (states: %r)", op, states)
if op == "define":
if not (states and states[-1][0] == SKIP):
var, val = match.group("var", "val")
if val is None:
val = None
else:
try:
val = eval(val, {}, {})
except BaseException:
pass
defines[var] = val
elif op == "undef":
if not (states and states[-1][0] == SKIP):
var = match.group("var")
try:
del defines[var]
except KeyError:
pass
elif op == "include":
if not (states and states[-1][0] == SKIP):
if "var" in match.groupdict():
# This is the second include form: #include VAR
var = match.group("var")
f = defines[var]
else:
# This is the first include form: #include "path"
f = match.group("fname")
for d in [os.path.dirname(infile)] + includePath:
fname = os.path.normpath(os.path.join(d, f))
if os.path.exists(fname):
break
else:
raise PreprocessError("could not find #include'd file "
"\"%s\" on include path: %r"
% (f, includePath))
defines = preprocess(fname, fout, defines, force,
keepLines, includePath, substitute,
contentTypesRegistry=contentTypesRegistry,
__preprocessedFiles=__preprocessedFiles)
elif op in ("if", "ifdef", "ifndef"):
if op == "if":
expr = match.group("expr")
elif op == "ifdef":
expr = "defined('%s')" % match.group("expr")
elif op == "ifndef":
expr = "not defined('%s')" % match.group("expr")
try:
if states and states[-1][0] == SKIP:
# Were are nested in a SKIP-portion of an if-block.
states.append((SKIP, 0, 0))
elif _evaluate(expr, defines):
states.append((EMIT, 1, 0))
else:
states.append((SKIP, 0, 0))
except KeyError:
raise PreprocessError("use of undefined variable in "
"#%s stmt" % op, defines['__FILE__'],
defines['__LINE__'], line)
elif op == "elif":
expr = match.group("expr")
try:
if states[-1][2]: # already had #else in this if-block
raise PreprocessError("illegal #elif after #else in "
"same #if block", defines['__FILE__'],
defines['__LINE__'], line)
elif states[-1][1]: # if have emitted in this if-block
states[-1] = (SKIP, 1, 0)
elif states[:-1] and states[-2][0] == SKIP:
# Were are nested in a SKIP-portion of an if-block.
states[-1] = (SKIP, 0, 0)
elif _evaluate(expr, defines):
states[-1] = (EMIT, 1, 0)
else:
states[-1] = (SKIP, 0, 0)
except IndexError:
raise PreprocessError("#elif stmt without leading #if "
"stmt", defines['__FILE__'],
defines['__LINE__'], line)
elif op == "else":
try:
if states[-1][2]: # already had #else in this if-block
raise PreprocessError("illegal #else after #else in "
"same #if block", defines['__FILE__'],
defines['__LINE__'], line)
elif states[-1][1]: # if have emitted in this if-block
states[-1] = (SKIP, 1, 1)
elif states[:-1] and states[-2][0] == SKIP:
# Were are nested in a SKIP-portion of an if-block.
states[-1] = (SKIP, 0, 1)
else:
states[-1] = (EMIT, 1, 1)
except IndexError:
raise PreprocessError("#else stmt without leading #if "
"stmt", defines['__FILE__'],
defines['__LINE__'], line)
elif op == "endif":
try:
states.pop()
except IndexError:
raise PreprocessError("#endif stmt without leading #if"
"stmt", defines['__FILE__'],
defines['__LINE__'], line)
elif op == "error":
if not (states and states[-1][0] == SKIP):
error = match.group("error")
raise PreprocessError("#error: " + error, defines['__FILE__'],
defines['__LINE__'], line)
log.debug("states: %r", states)
if keepLines:
fout.write("\n")
else:
try:
if states[-1][0] == EMIT:
log.debug("emit line (%s)" % states[-1][1])
# Substitute all defines into line.
# XXX Should avoid recursive substitutions. But that
# would be a pain right now.
sline = line
if substitute:
for name in reversed(sorted(defines, key=len)):
value = defines[name]
sline = sline.replace(name, str(value))
fout.write(sline)
elif keepLines:
log.debug("keep blank line (%s)" % states[-1][1])
fout.write("\n")
else:
log.debug("skip line (%s)" % states[-1][1])
except IndexError:
raise PreprocessError("superfluous #endif before this line",
defines['__FILE__'],
defines['__LINE__'])
if len(states) > 1:
raise PreprocessError("unterminated #if block", defines['__FILE__'],
defines['__LINE__'])
elif len(states) < 1:
raise PreprocessError("superfluous #endif on or before this line",
defines['__FILE__'], defines['__LINE__'])
if fout != outfile:
fout.close()
return defines
# ---- content-type handling
_gDefaultContentTypes = """
# Default file types understood by "preprocess.py".
#
# Format is an extension of 'mime.types' file syntax.
# - '#' indicates a comment to the end of the line.
# - a line is:
# <filetype> [<pattern>...]
# where,
# <filetype>'s are equivalent in spirit to the names used in the Windows
# registry in HKCR, but some of those names suck or are inconsistent;
# and
# <pattern> is a suffix (pattern starts with a '.'), a regular expression
# (pattern is enclosed in '/' characters), a full filename (anything
# else).
#
# Notes on case-sensitivity:
#
# A suffix pattern is case-insensitive on Windows and case-sensitive
# elsewhere. A filename pattern is case-sensitive everywhere. A regex
# pattern's case-sensitivity is defined by the regex. This means it is by
# default case-sensitive, but this can be changed using Python's inline
# regex option syntax. E.g.:
# Makefile /^(?i)makefile.*$/ # case-INsensitive regex
Python .py
Python .pyw
Perl .pl
Ruby .rb
Tcl .tcl
XML .xml
XML .kpf
XML .xul
XML .rdf
XML .xslt
XML .xsl
XML .wxs
XML .wxi
HTML .htm
HTML .html
XML .xhtml
Makefile /^[Mm]akefile.*$/
PHP .php
JavaScript .js
CSS .css
C++ .c # C++ because then we can use //-style comments
C++ .cpp
C++ .cxx
C++ .cc
C++ .h
C++ .hpp
C++ .hxx
C++ .hh
IDL .idl
Text .txt
Fortran .f
Fortran .f90
Shell .sh
Shell .csh
Shell .ksh
Shell .zsh
Java .java
C# .cs
TeX .tex
# Some Komodo-specific file extensions
Python .ksf # Fonts & Colors scheme files
Text .kkf # Keybinding schemes files
"""
class ContentTypesRegistry:
"""A class that handles determining the filetype of a given path.
Usage:
>>> registry = ContentTypesRegistry()
>>> registry.getContentType("foo.py")
"Python"
"""
def __init__(self, contentTypesPaths=None):
self.contentTypesPaths = contentTypesPaths
self._load()
def _load(self):
from os.path import dirname, join, exists
self.suffixMap = {}
self.regexMap = {}
self.filenameMap = {}
self._loadContentType(_gDefaultContentTypes)
localContentTypesPath = join(dirname(__file__), "content.types")
if exists(localContentTypesPath):
log.debug("load content types file: `%r'" % localContentTypesPath)
self._loadContentType(open(localContentTypesPath).read())
for path in (self.contentTypesPaths or []):
log.debug("load content types file: `%r'" % path)
self._loadContentType(open(path).read())
def _loadContentType(self, content, path=None):
"""Return the registry for the given content.types file.
The registry is three mappings:
<suffix> -> <content type>
<regex> -> <content type>
<filename> -> <content type>
"""
for line in content.splitlines(0):
words = line.strip().split()
for i in | |
# encoding=utf8
from __future__ import unicode_literals
from datetime import datetime
from sw.allotmentclub import User, Member, Organization
from sw.allotmentclub.model import ENGINE_NAME
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import random
import psycopg2
import csv
import json
import os.path
import sqlalchemy
import sqlalchemy.orm.session
import pkg_resources
import pytest
import risclog.sqlalchemy.testing
import transaction
class JSONFixture(object):
basepath = ('sw.allotmentclub',
'../../../spec/javascripts/fixtures/json/')
_url = None
def __init__(self, fixture, key):
package, path = self.basepath
self.fixture_path = pkg_resources.resource_filename(
package, os.path.join(path, fixture))
self.key = key
self.fixture = self.load().get(self.key, {})
def url(self, default=None):
self._url = self.fixture.get('url', default)
return self._url
def load(self):
with open(self.fixture_path, 'r') as fixture:
return json.load(fixture)
def write(self, content):
with open(self.fixture_path, 'w') as fixture:
json.dump(content, fixture, sort_keys=True, indent=4,
separators=(',', ': '))
def assertEqual(self, data, data_key, save=False):
if save:
fixture = self.load()
if self.key not in fixture:
fixture[self.key] = {}
fixture[self.key]['url'] = self._url
fixture[self.key][data_key] = data.get(data_key, None)
self.write(fixture)
self.fixture = self.load()[self.key]
expected = self.fixture[data_key] or []
got = data.get(data_key, [])
assert len(expected) == len(got)
for item in expected:
assert item in got
def ajax(self, browser):
"""Simulate ajax call with data from fixture."""
data = self.fixture['data']
url = data['url'].replace('/api/', '')
if data.get('type', data.get('method', '')).lower() == 'post':
browser.post(
'http://localhost{}'.format(url),
data=data['data'],
type=data['contentType'],
xhr=True)
else:
browser.get('http://localhost{}'.format(url), xhr=True)
def assertFileEqual(generated_data, master_filename):
from pdf_diff.command_line import compute_changes
import pkg_resources
import tempfile
import os
import pytest
master_file = pkg_resources.resource_filename(
'sw.allotmentclub.browser.tests', master_filename)
handle, generated_file = tempfile.mkstemp(suffix='pdf')
os.fdopen(handle, 'wb').write(generated_data)
changes = compute_changes(master_file, generated_file)
if changes:
changes_text = ""
for change in changes:
if change == '*':
changes_text += '\n\r'
continue
changes_text += '{}: {}\n\r'.format(
change['pdf']['file'], change['text'])
pytest.fail(
'Generated pdf does not equal master: \n\r\n\r{}'.format(
changes_text))
else:
os.remove(generated_file)
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
import sys # This was missing from the manual
del sys._called_from_test
@pytest.fixture(scope='function')
def json_fixture(request):
"""Helper that retrieves JSON fixtures from files on disk."""
return JSONFixture(
request.module.__name__.split('.')[-1].replace('test_', '') + '.json',
request.function.__name__.replace('test_', ''))
class PostgreSQLTestDB(object):
user = 'allotmentclubtest'
passwd = '<PASSWORD>'
host = 'localhost'
port = 5432
name = None
def __init__(self, prefix, schema_path=None):
self.prefix = prefix
db_name = '%012x' % random.getrandbits(48)
self.name = f'{self.prefix}{db_name}'
self.dsn = self.get_dsn()
self.schema_path = schema_path
def get_dsn(self):
login = ''
if self.user:
login += self.user
if self.passwd:
login += ':' + self.passwd
login += '@'
return f'postgresql://{login}{self.host}:{self.port}/{self.name}'
def create(self):
with psycopg2.connect(
database='postgres',
user=self.user,
password=<PASSWORD>,
host=self.host,
port=self.port,
) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
sql = f'CREATE DATABASE {self.name} WITH OWNER {self.user}'
cursor.execute(sql)
self.mark_testing()
if self.schema_path:
with psycopg2.connect(
database=self.name,
user=self.user,
password=<PASSWORD>,
host=self.host,
port=self.port,
) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
cursor.execute(open(self.schema_path, 'r').read())
def drop(self):
with psycopg2.connect(
database='postgres',
user=self.user,
password=<PASSWORD>,
host=self.host,
port=self.port,
) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
for sql in [
f'UPDATE pg_database SET datallowconn = false '
f"WHERE datname = '{self.name}'",
f'ALTER DATABASE "{self.name}" CONNECTION LIMIT 1',
f'SELECT pg_terminate_backend(pid) FROM pg_stat_activity '
f"WHERE datname = '{self.name}'",
f'drop database {self.name}',
]:
cursor.execute(sql)
def mark_testing(self):
engine = sqlalchemy.create_engine(self.dsn)
meta = sqlalchemy.MetaData()
meta.bind = engine
table = sqlalchemy.Table(
'tmp_functest',
meta,
sqlalchemy.Column('schema_mtime', sqlalchemy.Integer),
)
table.create()
engine.dispose()
def database_fixture_factory(
request,
prefix,
name,
schema_path=None,
create_all=True,
alembic_location=None,
expire_on_commit=False,
):
db = PostgreSQLTestDB(prefix=prefix, schema_path=schema_path)
db.create()
db_util = risclog.sqlalchemy.db.get_database(
testing=True, keep_session=True, expire_on_commit=expire_on_commit
)
db_util.register_engine(
db.dsn, name=name, alembic_location=alembic_location
)
if create_all:
db_util.create_all(name)
transaction.commit()
def dropdb():
transaction.abort()
db_util.drop_engine(name)
for conn in sqlalchemy.pool._refs:
conn.close()
db.drop()
if db_util and not db_util.get_all_engines():
db_util._teardown_utility()
request.addfinalizer(dropdb)
return db_util
@pytest.fixture(scope='session')
def database_session(request):
"""Set up and tear down the import test database.
Returns the database utility object.
"""
yield database_fixture_factory(
request, 'sw_allotmentclub_', ENGINE_NAME
)
@pytest.fixture(scope='function')
def database(request, database_session):
"""Perform database setup and tear down for test function.
Will empty all tables beforehand and close the session afterwards.
Since this fixture is effectively used in every unit test, we also run the
cleanup here.
Returns the database utility object.
"""
for engine in database_session.get_all_engines():
database_session.empty(engine)
yield database_session
database_session.session.close_all()
@pytest.fixture(scope='function')
def organization(request, database):
"""Fixture that creates an organization."""
org = Organization.find_or_create(id=1, title='Leuna-Siedlung Roter See')
database.session.flush()
transaction.commit()
return org
@pytest.fixture(scope='function')
def verwalter(request, organization, database):
"""Fixture creating a user with role Administrator."""
user = User.find_or_create(
username='admin', password='<PASSWORD>',
vorname='Admin', nachname='istrator',
unrestricted_access=True,
organization_id=1)
database.session.flush()
transaction.commit()
return user
@pytest.fixture(scope='function')
def user(request, organization, database):
"""Fixture creating a user with no role."""
user = User.find_or_create(
username='user', password='<PASSWORD>', nachname='<PASSWORD>',
position='Vorsitzender', ort='Leuna', organization_id=1)
database.session.flush()
return user
@pytest.fixture(scope='function')
def member(request, database):
"""Fixture creating a member."""
def delete_member():
Member.query().filter(Member.lastname == 'Mittag').one().delete()
database.session.flush()
member = Member.create(firstname='Gerd', lastname='Mittag')
database.session.flush()
request.addfinalizer(delete_member)
return member
class Amount(object):
amount = None
currency = None
def __init__(self, amount, currency):
self.amount = amount
self.currency = currency
def import_bookings():
from .account import import_transactions, BankingAccount
account = BankingAccount.find_or_create(
organization_id=1, number='3440000167')
statements = []
with open(pkg_resources.resource_filename(
'sw.allotmentclub.tests', 'test_account_import.csv')) as f:
reader = csv.reader(f, delimiter=';')
for line in reader:
(_, _, _, account_number, _, _, _, _, _, _, _, bic, _, iban, _, _,
recipient, _, _, _, _, _, booking_date, _, value, currency, _, _,
_, _, _, _, _, _, booking_text, _, _, purpose, _, _, _, _, _, _,
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _,
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _,
_) = line
statements.append({
'date': datetime.strptime(booking_date, '%Y/%m/%d'),
'currency': currency,
'purpose': purpose,
'amount': Amount(
float(value.replace('/1', ''))/10000,
currency),
'applicant_name': recipient,
'posting_text': booking_text,
'applicant_iban': iban,
'applicant_bin': bic,
'customer_reference': None
})
import_transactions(statements, account)
def import_energy_meters():
from sw.allotmentclub import Allotment, ElectricMeter, EnergyValue
from sw.allotmentclub import EnergyPrice
EnergyPrice.find_or_create(year=2014, value=1342354, bill=42312300,
price=3020, normal_fee=81700, power_fee=243300)
EnergyPrice.find_or_create(year=2015, value=1354334, bill=42134200)
for allot, number, v2013, v2014, power, disc, comment in [
['102', '318992603', '6893', '6893', '', '', ''],
['104', '136426011', '10019', '10634', 'X', '', ''],
['106', '21785640', '23154', '24207', '', '', ''],
['108', '81112116', '8165', '8411', '', '', ''],
['110', '31850195', '65811', '66345', '', '', ''],
['112', '20232757', '56221', '56371', '', '', ''],
['114', '364754', '7361', '7407', 'X', '', ''],
['118', '21292097', '935', '960', '', '', ''],
['122', '0063487695', '7028', '7988', 'X', '', ''],
['124', '4270447', '7671', '8033', '', '', ''],
['203', '21181284', '5933', '5997', '', '', ''],
['203', '3295328', '1307', '1349', '', '', 'Satanlage'],
['249', '20868068', '12115', '12115', '', '', ''],
['251', '20236014', '17339', '17352', '', '', 'Wasserpumpe'],
['328', '409120', '5075', '5075', '', '', ''],
['405', '8056675', '66018', '66098', '', '', '']]:
allotment = Allotment.query().filter(
Allotment.number == allot).one()
meter = ElectricMeter.create(
allotment=allotment,
number=number,
electric_power=bool(power),
disconnected=bool(disc),
comment=comment)
for year, value in [(2013, v2013), (2014, v2014)]:
value = EnergyValue.create(
electric_meter=meter, year=year, value=int(value))
value.update_member()
value.update_usage()
value.update_data()
meter.energy_values.append(value)
transaction.savepoint()
def import_members(max=99999):
from sw.allotmentclub import Member, Allotment, Parcel
for line in [
['60', '102', '', 'Frau', 'Ines', 'Groß', 'Mittelweg 7',
'01458', 'Ottendorf-Okrilla', '', '', ''],
['62', '104', '', 'Herr', 'Reiner', 'Pfeil', 'Schillerstr. 42',
'06247', '<NAME>', '', '034635 32731', ''],
['64', '106', '', 'Frau', 'Astrid', 'Ritter',
'Brandenburgische Str. 27', '15366', 'Hönow', '', '', ''],
['67', '108', '', 'Herr', 'Sebastian', 'Wehrmann',
'Geseniusstr. 34', '06110', 'Halle', '', '', '0172 1096832'],
['70', '110', '', 'Herr', 'Johannes', 'Hennig', 'A.-Einstein-'
'Str. 15', '06237', 'Leuna', '', '03461 502682', ''],
['74', '112', '', 'Frau', 'Regina', 'Esser', 'Ringstr. 42',
'06886', 'Wittenberg', '', '03491 662813', ''],
['76', '114', 'Dr.', 'Frau', 'Brigitte', 'Helbig', 'Wolfgang-'
'Heinze-Str. 20', '04277', 'Leipzig', '', '0341 3520609', ''],
['83', '118', '', 'Frau', 'Margit ', 'Götze', 'Heinrich-Heine'
'-Str. 19', '06237', 'Leuna', '', '03461 811244', ''],
['92/93', '122/ 225', '', 'Herr', 'Claus', 'Masthoff', 'Paul-'
'Thiersch-Str. 16', '06124', 'Halle', '', '0345 6876407', ''],
['94/50', '124', '', 'Frau', 'Britta', 'Grimmling', 'Fors 190',
'87391', 'Bollstabruk', 'Schweden', '', '0157 84943178'],
['150', '249/251', '', 'Herr', 'Lutz', 'Rösler',
'Cloppenburger Str. 12', '06126', 'Halle', '', '', ''],
['100/137', '405/406', '', 'Frau', 'Marlies', 'Leutloff', 'Wei'
'ßenfelser Str. 11c', '06231', '<NAME>', '', '', ''],
['', '', '', 'Herr', 'Günter', 'Tillack', 'Harry-S.-Truman-'
'Allee 4', '14167', 'Berlin', '', '', '0162 9541236'],
['153', '328', '', '', '', 'Leuna Bungalowgemeinschaft Roter '
'See e.V.', 'Postfach 1106', '06773', 'Gräfenhainichen', '', '', ''],
['58', '203', '', '', '', 'Werkstatt', '', '', '', '', '', '']
]:
if '/' in line[0] or '/' in line[1]:
lines = [line[:], line[:]]
else:
lines = [line]
if '/' in line[0]:
p1, p2 = line[0].split('/')
lines[0][0] = p1.strip()
lines[1][0] = p2.strip()
if '/' in line[1]:
p1, p2 = line[1].split('/')
lines[0][1] = p1.strip()
lines[1][1] = p2.strip()
for line in lines:
member = Member.find_or_create(
firstname=line[4], lastname=line[5])
member.title = line[2]
member.appellation = line[3]
| |
, eol_))
if self.cUF is not None:
namespaceprefix_ = self.cUF_nsprefix_ + ':' if (UseCapturedNS_ and self.cUF_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%scUF>%s</%scUF>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.cUF), input_name='cUF')), namespaceprefix_ , eol_))
if self.xServ is not None:
namespaceprefix_ = self.xServ_nsprefix_ + ':' if (UseCapturedNS_ and self.xServ_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sxServ>%s</%sxServ>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.xServ), input_name='xServ')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('versao', node)
if value is not None and 'versao' not in already_processed:
already_processed.add('versao')
self.versao = value
self.versao = ' '.join(self.versao.split())
self.validate_TVerConsStatServ(self.versao) # validate type TVerConsStatServ
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'tpAmb':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'tpAmb')
value_ = self.gds_validate_string(value_, node, 'tpAmb')
self.tpAmb = value_
self.tpAmb_nsprefix_ = child_.prefix
# validate type TAmb
self.validate_TAmb(self.tpAmb)
elif nodeName_ == 'cUF':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'cUF')
value_ = self.gds_validate_string(value_, node, 'cUF')
self.cUF = value_
self.cUF_nsprefix_ = child_.prefix
# validate type TCodUfIBGE
self.validate_TCodUfIBGE(self.cUF)
elif nodeName_ == 'xServ':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'xServ')
value_ = self.gds_validate_string(value_, node, 'xServ')
self.xServ = value_
self.xServ_nsprefix_ = child_.prefix
# validate type xServType
self.validate_xServType(self.xServ)
# end class TConsStatServ
class TRetConsStatServ(GeneratedsSuper):
"""Tipo Resultado da Consulta do Status do Serviço"""
__hash__ = GeneratedsSuper.__hash__
member_data_items_ = [
MemberSpec_('versao', 'TVerConsStatServ', 0, 0, {'use': 'required'}),
MemberSpec_('tpAmb', ['TAmb', 'xs:string'], 0, 0, {'name': 'tpAmb', 'type': 'xs:string'}, None),
MemberSpec_('verAplic', ['TVerAplic', 'nfe:TString'], 0, 0, {'name': 'verAplic', 'type': 'xs:string'}, None),
MemberSpec_('cStat', ['TStat', 'xs:string'], 0, 0, {'name': 'cStat', 'type': 'xs:string'}, None),
MemberSpec_('xMotivo', ['TMotivo', 'nfe:TString'], 0, 0, {'name': 'xMotivo', 'type': 'xs:string'}, None),
MemberSpec_('cUF', ['TCodUfIBGE', 'xs:string'], 0, 0, {'name': 'cUF', 'type': 'xs:string'}, None),
MemberSpec_('dhRecbto', ['TDateTimeUTC', 'xs:string'], 0, 0, {'name': 'dhRecbto', 'type': 'xs:string'}, None),
MemberSpec_('tMed', ['TMed', 'xs:string'], 0, 1, {'minOccurs': '0', 'name': 'tMed', 'type': 'xs:string'}, None),
MemberSpec_('dhRetorno', ['TDateTimeUTC', 'xs:string'], 0, 1, {'minOccurs': '0', 'name': 'dhRetorno', 'type': 'xs:string'}, None),
MemberSpec_('xObs', ['TMotivo', 'nfe:TString'], 0, 1, {'minOccurs': '0', 'name': 'xObs', 'type': 'xs:string'}, None),
]
subclass = None
superclass = None
def __init__(self, versao=None, tpAmb=None, verAplic=None, cStat=None, xMotivo=None, cUF=None, dhRecbto=None, tMed=None, dhRetorno=None, xObs=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.versao = _cast(None, versao)
self.versao_nsprefix_ = None
self.tpAmb = tpAmb
self.validate_TAmb(self.tpAmb)
self.tpAmb_nsprefix_ = None
self.verAplic = verAplic
self.validate_TVerAplic(self.verAplic)
self.verAplic_nsprefix_ = None
self.cStat = cStat
self.validate_TStat(self.cStat)
self.cStat_nsprefix_ = None
self.xMotivo = xMotivo
self.validate_TMotivo(self.xMotivo)
self.xMotivo_nsprefix_ = None
self.cUF = cUF
self.validate_TCodUfIBGE(self.cUF)
self.cUF_nsprefix_ = None
self.dhRecbto = dhRecbto
self.validate_TDateTimeUTC(self.dhRecbto)
self.dhRecbto_nsprefix_ = None
self.tMed = tMed
self.validate_TMed(self.tMed)
self.tMed_nsprefix_ = None
self.dhRetorno = dhRetorno
self.validate_TDateTimeUTC(self.dhRetorno)
self.dhRetorno_nsprefix_ = None
self.xObs = xObs
self.validate_TMotivo(self.xObs)
self.xObs_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TRetConsStatServ)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TRetConsStatServ.subclass:
return TRetConsStatServ.subclass(*args_, **kwargs_)
else:
return TRetConsStatServ(*args_, **kwargs_)
factory = staticmethod(factory)
def validate_TAmb(self, value):
result = True
# Validate type TAmb, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['1', '2']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on TAmb' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_TVerAplic(self, value):
result = True
# Validate type TVerAplic, a restriction on nfe:TString.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 20:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on TVerAplic' % {"value": value, "lineno": lineno} )
result = False
if len(value) < 1:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on TVerAplic' % {"value" : value, "lineno": lineno} )
result = False
if not self.gds_validate_simple_patterns(
self.validate_TVerAplic_patterns_, value):
self.gds_collector_.add_message('Value "%s" does not match xsd pattern restrictions: %s' % (encode_str_2_3(value), self.validate_TVerAplic_patterns_, ))
result = False
return result
validate_TVerAplic_patterns_ = [['^([!-ÿ]{1}[ -ÿ]{0,}[!-ÿ]{1}|[!-ÿ]{1})$']]
def validate_TStat(self, value):
result = True
# Validate type TStat, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 3:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on TStat' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
if not self.gds_validate_simple_patterns(
self.validate_TStat_patterns_, value):
self.gds_collector_.add_message('Value "%s" does not match xsd pattern restrictions: %s' % (encode_str_2_3(value), self.validate_TStat_patterns_, ))
result = False
return result
validate_TStat_patterns_ = [['^([0-9]{3})$']]
def validate_TMotivo(self, value):
result = True
# Validate type TMotivo, a restriction on nfe:TString.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 255:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on TMotivo' % {"value": value, "lineno": lineno} )
result = False
if len(value) < 1:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on TMotivo' % {"value" : value, "lineno": lineno} )
result = False
if not self.gds_validate_simple_patterns(
self.validate_TMotivo_patterns_, value):
self.gds_collector_.add_message('Value "%s" does not match xsd pattern restrictions: %s' % (encode_str_2_3(value), self.validate_TMotivo_patterns_, ))
result = False
return result
validate_TMotivo_patterns_ = [['^([!-ÿ]{1}[ -ÿ]{0,}[!-ÿ]{1}|[!-ÿ]{1})$']]
def validate_TCodUfIBGE(self, value):
result = True
# Validate type TCodUfIBGE, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['11', '12', '13', '14', '15', '16', '17', '21', '22', '23', '24', '25', '26', '27', '28', '29', '31', '32', '33', '35', '41', '42', '43', '50', '51', '52', '53']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on TCodUfIBGE' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_TDateTimeUTC(self, value):
result = True
# Validate type TDateTimeUTC, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if not self.gds_validate_simple_patterns(
self.validate_TDateTimeUTC_patterns_, value):
self.gds_collector_.add_message('Value "%s" does not match xsd pattern restrictions: %s' % (encode_str_2_3(value), self.validate_TDateTimeUTC_patterns_, ))
result = False
return result
validate_TDateTimeUTC_patterns_ = [['^((((20(([02468][048])|([13579][26]))-02-29))|(20[0-9][0-9])-((((0[1-9])|(1[0-2]))-((0[1-9])|(1\\d)|(2[0-8])))|((((0[13578])|(1[02]))-31)|(((0[1,3-9])|(1[0-2]))-(29|30)))))T(20|21|22|23|[0-1]\\d):[0-5]\\d:[0-5]\\d([\\-,\\+](0[0-9]|10|11):00|([\\+](12):00)))$']]
def validate_TMed(self, value):
result = True
# Validate type TMed, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if not self.gds_validate_simple_patterns(
self.validate_TMed_patterns_, value):
self.gds_collector_.add_message('Value "%s" does not match xsd pattern restrictions: %s' % (encode_str_2_3(value), self.validate_TMed_patterns_, ))
result = False
return result
validate_TMed_patterns_ = [['^([0-9]{1,4})$']]
def validate_TVerConsStatServ(self, value):
# Validate type TVerConsStatServ, a restriction on xs:token.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if not self.gds_validate_simple_patterns(
self.validate_TVerConsStatServ_patterns_, value):
self.gds_collector_.add_message('Value "%s" does not match xsd pattern restrictions: %s' % (encode_str_2_3(value), self.validate_TVerConsStatServ_patterns_, ))
validate_TVerConsStatServ_patterns_ = [['^(4\\.00)$']]
def hasContent_(self):
if (
self.tpAmb is not None | |
required: If C{True}, indicates that the attribute must appear
in the DOM node used to create an instance of the corresponding
L{pyxb.binding.basis.complexTypeDefinition}. The default value is
C{False}. No more that one of L{required} and L{prohibited} should be
assigned C{True}.
@type required: C{bool}
@keyword prohibited: If C{True}, indicates that the attribute must
B{not} appear in the DOM node used to create an instance of the
corresponding L{pyxb.binding.basis.complexTypeDefinition}. The
default value is C{False}. No more that one of L{required} and
L{prohibited} should be assigned C{True}.
@type prohibited: C{bool}
@raise pyxb.BadTypeValueError: the L{unicode_default} cannot be used
to initialize an instance of L{data_type}
"""
self.__name = name
self.__id = id
self.__key = key
self.__dataType = data_type
self.__unicodeDefault = unicode_default
if self.__unicodeDefault is not None:
self.__defaultValue = self.__dataType.Factory(self.__unicodeDefault)
self.__fixed = fixed
self.__required = required
self.__prohibited = prohibited
def name (self):
"""The expanded name of the element.
@rtype: L{pyxb.namespace.ExpandedName}
"""
return self.__name
def defaultValue (self):
"""The default value of the attribute."""
return self.__defaultValue
def fixed (self):
"""C{True} iff the value of the attribute cannot be changed."""
return self.__fixed
def required (self):
"""Return True iff the attribute must be assigned a value."""
return self.__required
def prohibited (self):
"""Return True iff the attribute must not be assigned a value."""
return self.__prohibited
def provided (self, ctd_instance):
"""Return True iff the given instance has been explicitly given a
value for the attribute.
This is used for things like only generating an XML attribute
assignment when a value was originally given (even if that value
happens to be the default).
"""
return self.__getProvided(ctd_instance)
def id (self):
"""Tag used within Python code for the attribute.
This is not used directly in the default code generation template."""
return self.__id
def key (self):
"""String used as key within object dictionary when storing attribute value."""
return self.__key
def dataType (self):
"""The subclass of L{pyxb.binding.basis.simpleTypeDefinition} of which any attribute value must be an instance."""
return self.__dataType
def __getValue (self, ctd_instance):
"""Retrieve the value information for this attribute in a binding instance.
@param ctd_instance: The instance object from which the attribute is to be retrieved.
@type ctd_instance: subclass of L{pyxb.binding.basis.complexTypeDefinition}
@return: C{(provided, value)} where C{provided} is a C{bool} and
C{value} is C{None} or an instance of the attribute's datatype.
"""
return getattr(ctd_instance, self.__key, (False, None))
def __getProvided (self, ctd_instance):
return self.__getValue(ctd_instance)[0]
def value (self, ctd_instance):
"""Get the value of the attribute from the instance."""
return self.__getValue(ctd_instance)[1]
def __setValue (self, ctd_instance, new_value, provided):
return setattr(ctd_instance, self.__key, (provided, new_value))
def reset (self, ctd_instance):
"""Set the value of the attribute in the given instance to be its
default value, and mark that it has not been provided."""
self.__setValue(ctd_instance, self.__defaultValue, False)
def addDOMAttribute (self, dom_support, ctd_instance, element):
"""If this attribute as been set, add the corresponding attribute to the DOM element."""
( provided, value ) = self.__getValue(ctd_instance)
if provided:
assert value is not None
dom_support.addAttribute(element, self.__name, value.xsdLiteral())
return self
def validate (self, ctd_instance):
(provided, value) = self.__getValue(ctd_instance)
if value is not None:
if self.__prohibited:
raise pyxb.ProhibitedAttributeError('Value given for prohibited attribute %s' % (self.__name,))
if self.__required and not provided:
assert self.__fixed
raise pyxb.MissingAttributeError('Fixed required attribute %s was never set' % (self.__name,))
if not self.__dataType._IsValidValue(value):
raise pyxb.BindingValidationError('Attribute %s value type %s not %s' % (self.__name, type(value), self.__dataType))
self.__dataType.XsdConstraintsOK(value)
else:
if self.__required:
raise pyxb.MissingAttributeError('Required attribute %s does not have a value' % (self.__name,))
return True
def set (self, ctd_instance, new_value):
"""Set the value of the attribute.
This validates the value against the data type, creating a new instance if necessary.
@param ctd_instance: The binding instance for which the attribute
value is to be set
@type ctd_instance: subclass of L{pyxb.binding.basis.complexTypeDefinition}
@param new_value: The value for the attribute
@type new_value: An C{xml.dom.Node} instance, or any value that is
permitted as the input parameter to the C{Factory} method of the
attribute's datatype.
"""
provided = True
if isinstance(new_value, xml.dom.Node):
unicode_value = self.__name.getAttribute(new_value)
if unicode_value is None:
if self.__required:
raise pyxb.MissingAttributeError('Required attribute %s from %s not found' % (self.__name, ctd_instance._ExpandedName or type(ctd_instance)))
provided = False
unicode_value = self.__unicodeDefault
if unicode_value is None:
# Must be optional and absent
provided = False
new_value = None
else:
new_value = unicode_value
else:
assert new_value is not None
if self.__prohibited:
raise pyxb.ProhibitedAttributeError('Value given for prohibited attribute %s' % (self.__name,))
if (new_value is not None) and (not isinstance(new_value, self.__dataType)):
new_value = self.__dataType.Factory(new_value)
if self.__fixed and (new_value != self.__defaultValue):
raise pyxb.AttributeChangeError('Attempt to change value of fixed attribute %s' % (self.__name,))
self.__setValue(ctd_instance, new_value, provided)
return new_value
def _description (self, name_only=False, user_documentation=True):
if name_only:
return str(self.__name)
assert issubclass(self.__dataType, basis._TypeBinding_mixin)
desc = [ str(self.__id), ': ', str(self.__name), ' (', self.__dataType._description(name_only=True, user_documentation=False), '), ' ]
if self.__required:
desc.append('required')
elif self.__prohibited:
desc.append('prohibited')
else:
desc.append('optional')
if self.__defaultValue is not None:
desc.append(', ')
if self.__fixed:
desc.append('fixed')
else:
desc.append('default')
desc.extend(['=', self.__unicodeDefault ])
return ''.join(desc)
class ElementUse (ContentState_mixin, ContentModel_mixin):
"""Aggregate the information relevant to an element of a complex type.
This includes the L{original tag name<name>}, the spelling of L{the
corresponding object in Python <id>}, an L{indicator<isPlural>} of whether
multiple instances might be associated with the field, and other relevant
information..
"""
def name (self):
"""The expanded name of the element.
@rtype: L{pyxb.namespace.ExpandedName}
"""
return self.__name
__name = None
def id (self):
"""The string name of the binding class field used to hold the element
values.
This is the user-visible name, and excepting disambiguation will be
equal to the local name of the element."""
return self.__id
__id = None
# The dictionary key used to identify the value of the element. The value
# is the same as that used for private member variables in the binding
# class within which the element declaration occurred.
__key = None
def elementBinding (self):
"""The L{basis.element} instance identifying the information
associated with the element declaration.
"""
return self.__elementBinding
def _setElementBinding (self, element_binding):
# Set the element binding for this use. Only visible at all because
# we have to define the uses before the element instances have been
# created.
self.__elementBinding = element_binding
return self
__elementBinding = None
def isPlural (self):
"""True iff the content model indicates that more than one element
can legitimately belong to this use.
This includes elements in particles with maxOccurs greater than one,
and when multiple elements with the same NCName are declared in the
same type.
"""
return self.__isPlural
__isPlural = False
def __init__ (self, name, id, key, is_plural, element_binding=None):
"""Create an ElementUse instance.
@param name: The name by which the element is referenced in the XML
@type name: L{pyxb.namespace.ExpandedName}
@param id: The Python name for the element within the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is a public
identifier, albeit modified to be unique, and is usually used as the
name of the element's inspector method or property.
@type id: C{str}
@param key: The string used to store the element
value in the dictionary of the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is mangled so
that it is unique among and is treated as a Python private member.
@type key: C{str}
@param is_plural: If C{True}, documents for the corresponding type may
have multiple instances of this element. As a consequence, the value
of the element will be a list. If C{False}, the value will be C{None}
if the element is absent, and a reference to an instance of the type
identified by L{pyxb.binding.basis.element.typeDefinition} if present.
@type is_plural: C{bool}
@param element_binding: Reference to the class that serves as the
binding for the element.
"""
self.__name = name
self.__id = id
self.__key = key
self.__isPlural = is_plural
self.__elementBinding = element_binding
def defaultValue (self):
"""Return the default value for this element.
@todo: Right now, this returns C{None} for non-plural and an empty
list for plural elements. Need to support schema-specified default
values for simple-type content.
"""
if self.isPlural():
return []
return None
def value (self, ctd_instance):
"""Return the value for this use within the given instance."""
return getattr(ctd_instance, self.__key, self.defaultValue())
def reset (self, ctd_instance):
"""Set the value | |
<reponame>cowboygneox/boto3_type_annotations
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_role_to_group(self, GroupId: str, RoleArn: str = None) -> Dict:
"""
Associates a role with a group. Your Greengrass core will use the role to access AWS cloud services. The role's permissions should allow Greengrass core Lambda functions to perform actions against the cloud.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/AssociateRoleToGroup>`_
**Request Syntax**
::
response = client.associate_role_to_group(
GroupId='string',
RoleArn='string'
)
**Response Syntax**
::
{
'AssociatedAt': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time, in milliseconds since the epoch, when the role ARN was associated with the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type RoleArn: string
:param RoleArn: The ARN of the role you wish to associate with this group.
:rtype: dict
:returns:
"""
pass
def associate_service_role_to_account(self, RoleArn: str = None) -> Dict:
"""
Associates a role with your account. AWS IoT Greengrass will use the role to access your Lambda functions and AWS IoT resources. This is necessary for deployments to succeed. The role must have at least minimum permissions in the policy ''AWSGreengrassResourceAccessRolePolicy''.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/AssociateServiceRoleToAccount>`_
**Request Syntax**
::
response = client.associate_service_role_to_account(
RoleArn='string'
)
**Response Syntax**
::
{
'AssociatedAt': 'string'
}
**Response Structure**
- *(dict) --* success
- **AssociatedAt** *(string) --* The time when the service role was associated with the account.
:type RoleArn: string
:param RoleArn: The ARN of the service role you wish to associate with your account.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_connector_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a connector definition. You may provide the initial version of the connector definition now or use ''CreateConnectorDefinitionVersion'' at a later time.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateConnectorDefinition>`_
**Request Syntax**
::
response = client.create_connector_definition(
AmznClientToken='string',
InitialVersion={
'Connectors': [
{
'ConnectorArn': 'string',
'Id': 'string',
'Parameters': {
'string': 'string'
}
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the connector definition.
- **Connectors** *(list) --* A list of references to connectors in this version, with their corresponding configuration settings.
- *(dict) --* Information about a connector. Connectors run on the Greengrass core and contain built-in integration with local infrastructure, device protocols, AWS, and other cloud services.
- **ConnectorArn** *(string) --* The ARN of the connector.
- **Id** *(string) --* A descriptive or arbitrary ID for the connector. This value must be unique within the connector definition version. Max length is 128 characters with pattern [a-zA-Z0-9:_-]+.
- **Parameters** *(dict) --* The parameters or configuration that the connector uses.
- *(string) --*
- *(string) --*
:type Name: string
:param Name: The name of the connector definition.
:type tags: dict
:param tags: Tag(s) to add to the new resource
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_connector_definition_version(self, ConnectorDefinitionId: str, AmznClientToken: str = None, Connectors: List = None) -> Dict:
"""
Creates a version of a connector definition which has already been defined.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateConnectorDefinitionVersion>`_
**Request Syntax**
::
response = client.create_connector_definition_version(
AmznClientToken='string',
ConnectorDefinitionId='string',
Connectors=[
{
'ConnectorArn': 'string',
'Id': 'string',
'Parameters': {
'string': 'string'
}
},
]
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type ConnectorDefinitionId: string
:param ConnectorDefinitionId: **[REQUIRED]** The ID of the connector definition.
:type Connectors: list
:param Connectors: A list of references to connectors in this version, with their corresponding configuration settings.
- *(dict) --* Information about a connector. Connectors run on the Greengrass core and contain built-in integration with local infrastructure, device protocols, AWS, and other cloud services.
- **ConnectorArn** *(string) --* The ARN of the connector.
- **Id** *(string) --* A descriptive or arbitrary ID for the connector. This value must be unique within the connector definition version. Max length is 128 characters with pattern [a-zA-Z0-9:_-]+.
- **Parameters** *(dict) --* The parameters or configuration that the connector uses.
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def create_core_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict:
"""
Creates a core definition. You may provide the initial version of the core definition now or use ''CreateCoreDefinitionVersion'' at a later time. Greengrass groups must each contain exactly one Greengrass core.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateCoreDefinition>`_
**Request Syntax**
::
response = client.create_core_definition(
AmznClientToken='string',
InitialVersion={
'Cores': [
{
'CertificateArn': 'string',
'Id': 'string',
'SyncShadow': True|False,
'ThingArn': 'string'
},
]
},
Name='string',
tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
:type AmznClientToken: string
:param AmznClientToken: A client token used to correlate requests and responses.
:type InitialVersion: dict
:param InitialVersion: Information about the initial version of the core definition.
- **Cores** *(list) --* A list of cores in the core definition version.
- *(dict) --* Information about a core.
- **CertificateArn** *(string) --* The ARN of the certificate associated with the core.
- **Id** *(string) --* A descriptive or arbitrary ID for the core. This value must be unique within the core definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'.
- **SyncShadow** *(boolean) --* If true, the core\'s local shadow is automatically synced with the cloud.
- **ThingArn** *(string) --* The ARN of the thing which is the core.
:type Name: string
:param Name: The name of the core | |
convert units from mm to cm
numslices = int(gdata['nslices'])
numangles = int(gdata['nangles'])
angularrange = float(gdata['arange'])
numrays = int(gdata['nrays'])
npad = int(np.ceil(numrays * np.sqrt(2)) - numrays)//2 if npad is None else npad
projused = (0,numangles-1,1) if projused is None else projused
# ndark = int(gdata['num_dark_fields'])
# ind_dark = list(range(0, ndark))
# group_dark = [numangles - 1]
inter_bright = int(gdata['i0cycle'])
nflat = int(gdata['num_bright_field'])
ind_flat = list(range(0, nflat))
if inter_bright > 0:
group_flat = list(range(0, numangles, inter_bright))
if group_flat[-1] != numangles - 1:
group_flat.append(numangles - 1)
elif inter_bright == 0:
group_flat = [0, numangles - 1]
else:
group_flat = None
ind_tomo = list(range(0, numangles))
floc_independent = dxchange.reader._map_loc(ind_tomo, group_flat)
#figure out the angle list (a list of angles, one per projection image)
dtemp = datafile[list(datafile.keys())[0]]
fltemp = list(dtemp.keys())
firstangle = float(dtemp[fltemp[0]].attrs.get('rot_angle',0))
if anglelist is None:
#the offset angle should offset from the angle of the first image, which is usually 0, but in the case of timbir data may not be.
#we add the 270 to be inte same orientation as previous software used at bl832
angle_offset = 270 + angle_offset - firstangle
anglelist = tomopy.angles(numangles, angle_offset, angle_offset-angularrange)
elif anglelist==-1:
anglelist = np.zeros(shape=numangles)
for icount in range(0,numangles):
anglelist[icount] = np.pi/180*(270 + angle_offset - float(dtemp[fltemp[icount]].attrs['rot_angle']))
#if projused is different than default, need to chnage numangles and angularrange
#can't do useNormalize_nf and doOutliers2D at the same time, or doOutliers2D and doOutliers1D at the same time, b/c of the way we chunk, for now just disable that
if useNormalize_nf==True and doOutliers2D==True:
useNormalize_nf = False
print("we cannot currently do useNormalize_nf and doOutliers2D at the same time, turning off useNormalize_nf")
if doOutliers2D==True and doOutliers1D==True:
doOutliers1D = False
print("we cannot currently do doOutliers1D and doOutliers2D at the same time, turning off doOutliers1D")
#figure out how user can pass to do central x number of slices, or set of slices dispersed throughout (without knowing a priori the value of numslices)
if sinoused is None:
sinoused = (0,numslices,1)
elif sinoused[0]<0:
sinoused=(int(np.floor(numslices/2.0)-np.ceil(sinoused[1]/2.0)),int(np.floor(numslices/2.0)+np.floor(sinoused[1]/2.0)),1)
num_proj_per_chunk = np.minimum(chunk_proj,projused[1]-projused[0])
numprojchunks = (projused[1]-projused[0]-1)//num_proj_per_chunk+1
num_sino_per_chunk = np.minimum(chunk_sino,sinoused[1]-sinoused[0])
numsinochunks = (sinoused[1]-sinoused[0]-1)//num_sino_per_chunk+1
numprojused = (projused[1]-projused[0])//projused[2]
numsinoused = (sinoused[1]-sinoused[0])//sinoused[2]
BeamHardeningCoefficients = (0, 1, 0, 0, 0, .1) if BeamHardeningCoefficients is None else BeamHardeningCoefficients
if cor is None:
print("Detecting center of rotation", end="")
if angularrange>300:
lastcor = int(np.floor(numangles/2)-1)
else:
lastcor = numangles-1
#I don't want to see the warnings about the reader using a deprecated variable in dxchange
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tomo, flat, dark, floc = dxchange.read_als_832h5(inputPath+filename,ind_tomo=(0,lastcor))
tomo = tomo.astype(np.float32)
if useNormalize_nf:
tomopy.normalize_nf(tomo, flat, dark, floc, out=tomo)
else:
tomopy.normalize(tomo, flat, dark, out=tomo)
if corFunction == 'vo':
# same reason for catching warnings as above
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cor = tomopy.find_center_vo(tomo, ind=voInd, smin=voSMin, smax=voSMax, srad=voSRad, step=voStep,
ratio=voRatio, drop=voDrop)
elif corFunction == 'nm':
cor = tomopy.find_center(tomo, tomopy.angles(numangles, angle_offset, angle_offset-angularrange),
ind=nmInd, init=nmInit, tol=nmTol, mask=nmMask, ratio=nmRatio,
sinogram_order=nmSinoOrder)
elif corFunction == 'pc':
cor = tomopy.find_center_pc(tomo[0], tomo[1], tol=0.25)
else:
raise ValueError("\'corFunction\' must be one of: [ pc, vo, nm ].")
print(", {}".format(cor))
else:
print("using user input center of {}".format(cor))
function_list = []
if doOutliers1D:
function_list.append('remove_outlier1d')
if doOutliers2D:
function_list.append('remove_outlier2d')
if useNormalize_nf:
function_list.append('normalize_nf')
else:
function_list.append('normalize')
function_list.append('minus_log')
if doBeamHardening:
function_list.append('beam_hardening')
if doFWringremoval:
function_list.append('remove_stripe_fw')
if doTIringremoval:
function_list.append('remove_stripe_ti')
if doSFringremoval:
function_list.append('remove_stripe_sf')
if correcttilt:
function_list.append('correcttilt')
if use360to180:
function_list.append('do_360_to_180')
if doPhaseRetrieval:
function_list.append('phase_retrieval')
function_list.append('recon_mask')
if doPolarRing:
function_list.append('polar_ring')
if castTo8bit:
function_list.append('castTo8bit')
if doBilateralFilter:
function_list.append('bilateral_filter')
function_list.append('write_output')
# Figure out first direction to slice
for func in function_list:
if slice_dir[func] != 'both':
axis = slice_dir[func]
break
done = False
curfunc = 0
curtemp = 0
while True: # Loop over reading data in certain chunking direction
if axis=='proj':
niter = numprojchunks
else:
niter = numsinochunks
for y in range(niter): # Loop over chunks
print("{} chunk {} of {}".format(axis, y+1, niter))
if curfunc==0:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if axis=='proj':
tomo, flat, dark, floc = dxchange.read_als_832h5(inputPath+filename,ind_tomo=range(y*num_proj_per_chunk+projused[0],np.minimum((y + 1)*num_proj_per_chunk+projused[0],numangles)),sino=(sinoused[0],sinoused[1], sinoused[2]) )
else:
tomo, flat, dark, floc = dxchange.read_als_832h5(inputPath+filename,ind_tomo=range(projused[0],projused[1],projused[2]),sino=(y*num_sino_per_chunk+sinoused[0],np.minimum((y + 1)*num_sino_per_chunk+sinoused[0],numslices),1) )
else:
if axis=='proj':
start, end = y * num_proj_per_chunk, np.minimum((y + 1) * num_proj_per_chunk,numprojused)
tomo = dxchange.reader.read_hdf5(tempfilenames[curtemp],'/tmp/tmp',slc=((start,end,1),(0,numslices,1),(0,numrays,1))) #read in intermediate file
else:
start, end = y * num_sino_per_chunk, np.minimum((y + 1) * num_sino_per_chunk,numsinoused)
tomo = dxchange.reader.read_hdf5(tempfilenames[curtemp],'/tmp/tmp',slc=((0,numangles,1),(start,end,1),(0,numrays,1)))
dofunc = curfunc
keepvalues = None
while True: # Loop over operations to do in current chunking direction
func_name = function_list[dofunc]
newaxis = slice_dir[func_name]
if newaxis != 'both' and newaxis != axis:
# We have to switch axis, so flush to disk
if y==0:
try:
os.remove(tempfilenames[1-curtemp])
except OSError:
pass
appendaxis = 1 if axis=='sino' else 0
dxchange.writer.write_hdf5(tomo,fname=tempfilenames[1-curtemp],gname='tmp',dname='tmp',overwrite=False,appendaxis=appendaxis) #writing intermediate file...
break
print(func_name, end=" ")
curtime = time.time()
if func_name == 'remove_outlier1d':
tomo = tomo.astype(np.float32,copy=False)
remove_outlier1d(tomo, outlier_diff1D, size=outlier_size1D, out=tomo)
if func_name == 'remove_outlier2d':
tomo = tomo.astype(np.float32,copy=False)
tomopy.remove_outlier(tomo, outlier_diff2D, size=outlier_size2D, axis=0, out=tomo)
elif func_name == 'normalize_nf':
tomo = tomo.astype(np.float32,copy=False)
tomopy.normalize_nf(tomo, flat, dark, floc_independent, out=tomo) #use floc_independent b/c when you read file in proj chunks, you don't get the correct floc returned right now to use here.
elif func_name == 'normalize':
tomo = tomo.astype(np.float32,copy=False)
tomopy.normalize(tomo, flat, dark, out=tomo)
elif func_name == 'minus_log':
mx = np.float32(0.00000000000000000001)
ne.evaluate('where(tomo>mx, tomo, mx)', out=tomo)
tomopy.minus_log(tomo, out=tomo)
elif func_name == 'beam_hardening':
loc_dict = {'a{}'.format(i):np.float32(val) for i,val in enumerate(BeamHardeningCoefficients)}
tomo = ne.evaluate('a0 + a1*tomo + a2*tomo**2 + a3*tomo**3 + a4*tomo**4 + a5*tomo**5', local_dict=loc_dict, out=tomo)
elif func_name == 'remove_stripe_fw':
tomo = tomopy.remove_stripe_fw(tomo, sigma=ringSigma, level=ringLevel, pad=True, wname=ringWavelet)
elif func_name == 'remove_stripe_ti':
tomo = tomopy.remove_stripe_ti(tomo, nblock=ringNBlock, alpha=ringAlpha)
elif func_name == 'remove_stripe_sf':
tomo = tomopy.remove_stripe_sf(tomo, size=ringSize)
elif func_name == 'correcttilt':
if tiltcenter_slice is None:
tiltcenter_slice = numslices/2.
if tiltcenter_det is None:
tiltcenter_det = tomo.shape[2]/2
new_center = tiltcenter_slice - 0.5 - sinoused[0]
center_det = tiltcenter_det - 0.5
#add padding of 10 pixels, to be unpadded right after tilt correction. This makes the tilted image not have zeros at certain edges, which matters in cases where sample is bigger than the field of view. For the small amounts we are generally tilting the images, 10 pixels is sufficient.
# tomo = tomopy.pad(tomo, 2, npad=10, mode='edge')
# center_det = center_det + 10
cntr = (center_det, new_center)
for b in range(tomo.shape[0]):
tomo[b] = st.rotate(tomo[b], correcttilt, center=cntr, preserve_range=True, order=1, mode='edge', clip=True) #center=None means image is rotated around its center; order=1 is default, order of spline interpolation
# tomo = tomo[:, :, 10:-10]
elif func_name == 'do_360_to_180':
# Keep values around for processing the next chunk in the list
keepvalues = [angularrange, numangles, projused, num_proj_per_chunk, numprojchunks, numprojused, numrays, anglelist]
#why -.5 on one and not on the other?
if tomo.shape[0]%2>0:
tomo = sino_360_to_180(tomo[0:-1,:,:], overlap=int(np.round((tomo.shape[2]-cor-.5))*2), rotation='right')
angularrange = angularrange/2 - angularrange/(tomo.shape[0]-1)
else:
tomo = sino_360_to_180(tomo[:,:,:], overlap=int(np.round((tomo.shape[2]-cor))*2), rotation='right')
angularrange = angularrange/2
numangles = int(numangles/2)
projused = (0,numangles-1,1)
num_proj_per_chunk = np.minimum(chunk_proj,projused[1]-projused[0])
numprojchunks = (projused[1]-projused[0]-1)//num_proj_per_chunk+1
numprojused = (projused[1]-projused[0])//projused[2]
numrays = tomo.shape[2]
anglelist = anglelist[:numangles]
elif func_name == 'phase_retrieval':
tomo = tomopy.retrieve_phase(tomo, pixel_size=pxsize, dist=propagation_dist, energy=kev, alpha=alphaReg, pad=True)
elif func_name == 'translation_correction':
tomo = linear_translation_correction(tomo,dx=xshift,dy=yshift,interpolation=False):
elif func_name == 'recon_mask':
tomo = tomopy.pad(tomo, 2, npad=npad, mode='edge')
if projIgnoreList is not None:
for badproj in projIgnoreList:
tomo[badproj] = 0
rec = tomopy.recon(tomo, anglelist, center=cor+npad, algorithm='gridrec', filter_name='butterworth', filter_par=[butterworth_cutoff, butterworth_order])
rec = rec[:, npad:-npad, npad:-npad]
rec /= pxsize # convert reconstructed voxel values from 1/pixel to 1/cm
rec = tomopy.circ_mask(rec, 0)
elif func_name == 'polar_ring':
rec = np.ascontiguousarray(rec, dtype=np.float32)
rec = tomopy.remove_ring(rec, theta_min=Rarc, rwidth=Rmaxwidth, thresh_max=Rtmax, thresh=Rthr, thresh_min=Rtmin,out=rec)
elif func_name == 'castTo8bit':
rec = convert8bit(rec, cast8bit_min, cast8bit_max)
elif func_name == 'bilateral_filter':
rec = pyF3D.run_BilateralFilter(rec, spatialRadius=bilateral_srad, rangeRadius=bilateral_rrad)
elif func_name == 'write_output':
dxchange.write_tiff_stack(rec, fname=filenametowrite, start=y*num_sino_per_chunk + sinoused[0])
print('(took {:.2f} seconds)'.format(time.time()-curtime))
dofunc+=1
if dofunc==len(function_list):
break
if y<niter-1 and keepvalues: # Reset original values for next chunk
angularrange, numangles, projused, num_proj_per_chunk, numprojchunks, numprojused, numrays, anglelist = keepvalues
curtemp = 1 - curtemp
curfunc = dofunc
if curfunc==len(function_list):
break
axis = slice_dir[function_list[curfunc]]
print("cleaning up temp files")
for tmpfile in tempfilenames:
try:
os.remove(tmpfile)
except OSError:
pass
print("End Time: "+time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()))
print('It took {:.3f} s to process {}'.format(time.time()-start_time,inputPath+filename))
def recon_from_spreadsheet(filePath):
"""
Runs recon() function using data read from spreadsheet
"""
parameterList = spreadsheet(filePath)
for | |
nid.NodeIdType == NodeIdType.String:
nid.NamespaceIndex = uabin.Primitives.UInt16.unpack(data)
nid.Identifier = uabin.Primitives.String.unpack(data)
elif nid.NodeIdType == NodeIdType.ByteString:
nid.NamespaceIndex = uabin.Primitives.UInt16.unpack(data)
nid.Identifier = uabin.Primitives.Bytes.unpack(data)
elif nid.NodeIdType == NodeIdType.Guid:
nid.NamespaceIndex = uabin.Primitives.UInt16.unpack(data)
nid.Identifier = uabin.Primitives.Guid.unpack(data)
else:
raise UaError("Unknown NodeId encoding: " + str(nid.NodeIdType))
if uabin.test_bit(encoding, 7):
nid.NamespaceUri = uabin.Primitives.String.unpack(data)
if uabin.test_bit(encoding, 6):
nid.ServerIndex = uabin.Primitives.UInt32.unpack(data)
return nid
class TwoByteNodeId(NodeId):
def __init__(self, identifier):
NodeId.__init__(self, identifier, 0, NodeIdType.TwoByte)
class FourByteNodeId(NodeId):
def __init__(self, identifier, namespace=0):
NodeId.__init__(self, identifier, namespace, NodeIdType.FourByte)
class NumericNodeId(NodeId):
def __init__(self, identifier, namespace=0):
NodeId.__init__(self, identifier, namespace, NodeIdType.Numeric)
class ByteStringNodeId(NodeId):
def __init__(self, identifier, namespace=0):
NodeId.__init__(self, identifier, namespace, NodeIdType.ByteString)
class GuidNodeId(NodeId):
def __init__(self, identifier, namespace=0):
NodeId.__init__(self, identifier, namespace, NodeIdType.Guid)
class StringNodeId(NodeId):
def __init__(self, identifier, namespace=0):
NodeId.__init__(self, identifier, namespace, NodeIdType.String)
ExpandedNodeId = NodeId
class QualifiedName(FrozenClass):
"""
A string qualified with a namespace index.
"""
def __init__(self, name=None, namespaceidx=0):
if not isinstance(namespaceidx, int):
raise UaError("namespaceidx must be an int")
self.NamespaceIndex = namespaceidx
self.Name = name
self._freeze = True
def to_string(self):
return "{0}:{1}".format(self.NamespaceIndex, self.Name)
@staticmethod
def from_string(string):
if ":" in string:
try:
idx, name = string.split(":", 1)
idx = int(idx)
except (TypeError, ValueError) as ex:
raise UaStringParsingError("Error parsing string {0}".format(string), ex)
else:
idx = 0
name = string
return QualifiedName(name, idx)
def to_binary(self):
packet = []
packet.append(uabin.Primitives.UInt16.pack(self.NamespaceIndex))
packet.append(uabin.Primitives.String.pack(self.Name))
return b''.join(packet)
@staticmethod
def from_binary(data):
obj = QualifiedName()
obj.NamespaceIndex = uabin.Primitives.UInt16.unpack(data)
obj.Name = uabin.Primitives.String.unpack(data)
return obj
def __eq__(self, bname):
return isinstance(bname, QualifiedName) and self.Name == bname.Name and self.NamespaceIndex == bname.NamespaceIndex
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, QualifiedName):
raise TypeError("Cannot compare QualifiedName and {0}".format(other))
if self.NamespaceIndex == other.NamespaceIndex:
return self.Name < other.Name
else:
return self.NamespaceIndex < other.NamespaceIndex
def __str__(self):
return 'QualifiedName({0}:{1})'.format(self.NamespaceIndex, self.Name)
__repr__ = __str__
class LocalizedText(FrozenClass):
"""
A string qualified with a namespace index.
"""
ua_types = {
"Text": "ByteString",
"Locale": "ByteString"
}
def __init__(self, text=None):
self.Encoding = 0
self.Text = text
if isinstance(self.Text, unicode):
self.Text = self.Text.encode('utf-8')
if self.Text:
self.Encoding |= (1 << 1)
self.Locale = None
self._freeze = True
def to_binary(self):
packet = []
if self.Locale:
self.Encoding |= (1 << 0)
if self.Text:
self.Encoding |= (1 << 1)
packet.append(uabin.Primitives.UInt8.pack(self.Encoding))
if self.Locale:
packet.append(uabin.Primitives.Bytes.pack(self.Locale))
if self.Text:
packet.append(uabin.Primitives.Bytes.pack(self.Text))
return b''.join(packet)
@staticmethod
def from_binary(data):
obj = LocalizedText()
obj.Encoding = ord(data.read(1))
if obj.Encoding & (1 << 0):
obj.Locale = uabin.Primitives.Bytes.unpack(data)
if obj.Encoding & (1 << 1):
obj.Text = uabin.Primitives.Bytes.unpack(data)
return obj
def to_string(self):
# FIXME: use local
if self.Text is None:
return ""
return self.Text.decode('utf-8')
def __str__(self):
return 'LocalizedText(' + 'Encoding:' + str(self.Encoding) + ', ' + \
'Locale:' + str(self.Locale) + ', ' + \
'Text:' + str(self.Text) + ')'
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, LocalizedText) and self.Locale == other.Locale and self.Text == other.Text:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
class ExtensionObject(FrozenClass):
"""
Any UA object packed as an ExtensionObject
:ivar TypeId:
:vartype TypeId: NodeId
:ivar Body:
:vartype Body: bytes
"""
ua_types = {
"TypeId": "NodeId",
"Encoding": "Byte",
"Body": "ByteString"
}
def __init__(self):
self.TypeId = NodeId()
self.Encoding = 0
self.Body = b''
self._freeze = True
def to_binary(self):
packet = []
if self.Body:
self.Encoding = 0x01
packet.append(self.TypeId.to_binary())
packet.append(uabin.Primitives.UInt8.pack(self.Encoding))
if self.Body:
packet.append(uabin.Primitives.ByteString.pack(self.Body))
return b''.join(packet)
@staticmethod
def from_binary(data):
obj = ExtensionObject()
obj.TypeId = NodeId.from_binary(data)
obj.Encoding = uabin.Primitives.UInt8.unpack(data)
if obj.Encoding & (1 << 0):
obj.Body = uabin.Primitives.ByteString.unpack(data)
return obj
@staticmethod
def from_object(obj):
ext = ExtensionObject()
oid = getattr(ObjectIds, "{0}_Encoding_DefaultBinary".format(obj.__class__.__name__))
ext.TypeId = FourByteNodeId(oid)
ext.Body = obj.to_binary()
return ext
def __str__(self):
return 'ExtensionObject(' + 'TypeId:' + str(self.TypeId) + ', ' + \
'Encoding:' + str(self.Encoding) + ', ' + str(len(self.Body)) + ' bytes)'
__repr__ = __str__
class VariantType(Enum):
"""
The possible types of a variant.
:ivar Null:
:ivar Boolean:
:ivar SByte:
:ivar Byte:
:ivar Int16:
:ivar UInt16:
:ivar Int32:
:ivar UInt32:
:ivar Int64:
:ivar UInt64:
:ivar Float:
:ivar Double:
:ivar String:
:ivar DateTime:
:ivar Guid:
:ivar ByteString:
:ivar XmlElement:
:ivar NodeId:
:ivar ExpandedNodeId:
:ivar StatusCode:
:ivar QualifiedName:
:ivar LocalizedText:
:ivar ExtensionObject:
:ivar DataValue:
:ivar Variant:
:ivar DiagnosticInfo:
"""
Null = 0
Boolean = 1
SByte = 2
Byte = 3
Int16 = 4
UInt16 = 5
Int32 = 6
UInt32 = 7
Int64 = 8
UInt64 = 9
Float = 10
Double = 11
String = 12
DateTime = 13
Guid = 14
ByteString = 15
XmlElement = 16
NodeId = 17
ExpandedNodeId = 18
StatusCode = 19
QualifiedName = 20
LocalizedText = 21
ExtensionObject = 22
DataValue = 23
Variant = 24
DiagnosticInfo = 25
class VariantTypeCustom(object):
"""
Looks like sometime we get variant with other values than those
defined in VariantType.
FIXME: We should not need this class, as far as I iunderstand the spec
variants can only be of VariantType
"""
def __init__(self, val):
self.name = "Custom"
self.value = val
if self.value > 0b00111111:
raise UaError("Cannot create VariantType. VariantType must be {0} > x > {1}, received {2}".format(0b111111, 25, val))
def __str__(self):
return "VariantType.Custom:{0}".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
class Variant(FrozenClass):
"""
Create an OPC-UA Variant object.
if no argument a Null Variant is created.
if not variant type is given, attemps to guess type from python type
if a variant is given as value, the new objects becomes a copy of the argument
:ivar Value:
:vartype Value: Any supported type
:ivar VariantType:
:vartype VariantType: VariantType
:ivar Dimension:
:vartype Dimensions: The length of each dimensions. Usually guessed from value.
:ivar is_array:
:vartype is_array: If the variant is an array. Usually guessed from value.
"""
def __init__(self, value=None, varianttype=None, dimensions=None, is_array=None):
self.Value = value
self.VariantType = varianttype
self.Dimensions = dimensions
self.is_array = is_array
if self.is_array is None:
if isinstance(value, (list, tuple)):
self.is_array = True
else:
self.is_array = False
self._freeze = True
if isinstance(value, Variant):
self.Value = value.Value
self.VariantType = value.VariantType
if self.VariantType is None:
self.VariantType = self._guess_type(self.Value)
if self.Value is None and not self.is_array and self.VariantType not in (
VariantType.Null,
VariantType.String,
VariantType.DateTime):
raise UaError("Non array Variant of type {0} cannot have value None".format(self.VariantType))
if self.Dimensions is None and isinstance(self.Value, (list, tuple)):
dims = get_shape(self.Value)
if len(dims) > 1:
self.Dimensions = dims
def __eq__(self, other):
if isinstance(other, Variant) and self.VariantType == other.VariantType and self.Value == other.Value:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def _guess_type(self, val):
if isinstance(val, (list, tuple)):
error_val = val
while isinstance(val, (list, tuple)):
if len(val) == 0:
raise UaError("could not guess UA type of variable {0}".format(error_val))
val = val[0]
if val is None:
return VariantType.Null
elif isinstance(val, bool):
return VariantType.Boolean
elif isinstance(val, float):
return VariantType.Double
elif isinstance(val, IntEnum):
return VariantType.Int32
elif isinstance(val, int):
return VariantType.Int64
elif type(val) in (str, unicode):
return VariantType.String
elif isinstance(val, bytes):
return VariantType.ByteString
elif isinstance(val, datetime):
return VariantType.DateTime
elif isinstance(val, uuid.UUID):
return VariantType.Guid
else:
if isinstance(val, object):
try:
return getattr(VariantType, val.__class__.__name__)
except AttributeError:
return VariantType.ExtensionObject
else:
raise UaError("Could not guess UA type of {0} with type {1}, specify UA type".format(val, type(val)))
def __str__(self):
return "Variant(val:{0!s},type:{1})".format(self.Value, self.VariantType)
__repr__ = __str__
def to_binary(self):
b = []
encoding = self.VariantType.value & 0b111111
if self.is_array or isinstance(self.Value, (list, tuple)):
self.is_array = True
encoding = uabin.set_bit(encoding, 7)
if self.Dimensions is not None:
encoding = uabin.set_bit(encoding, 6)
b.append(uabin.Primitives.UInt8.pack(encoding))
b.append(uabin.pack_uatype_array(self.VariantType, flatten(self.Value)))
if self.Dimensions is not None:
b.append(uabin.pack_uatype_array(VariantType.Int32, self.Dimensions))
else:
b.append(uabin.Primitives.UInt8.pack(encoding))
b.append(uabin.pack_uatype(self.VariantType, self.Value))
return b"".join(b)
@staticmethod
def from_binary(data):
dimensions = None
array = False
encoding = ord(data.read(1))
int_type = encoding & 0b00111111
vtype = datatype_to_varianttype(int_type)
if uabin.test_bit(encoding, 7):
value = uabin.unpack_uatype_array(vtype, data)
array = True
else:
value = uabin.unpack_uatype(vtype, data)
if uabin.test_bit(encoding, 6):
dimensions = uabin.unpack_uatype_array(VariantType.Int32, data)
value = reshape(value, dimensions)
return Variant(value, vtype, dimensions, is_array=array)
def reshape(flat, dims):
subdims = dims[1:]
subsize = 1
for i in subdims:
if i == 0:
i = 1
subsize *= i
while dims[0] * subsize > len(flat):
flat.append([])
if not subdims or subdims == [0]:
return flat
return [reshape(flat[i: i + subsize], subdims) for i in range(0, len(flat), subsize)]
def _split_list(l, n):
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
def flatten_and_get_shape(mylist):
dims = []
dims.append(len(mylist))
while isinstance(mylist[0], (list, tuple)):
dims.append(len(mylist[0]))
mylist = [item for sublist in mylist for item in sublist]
| |
ext + "_imm", [w_bit, root010, (0xf, 4, 22)],
[OK.XREG_5_9_SP, imm_scaled, src1, src2], OPC_FLAG.STORE | OPC_FLAG.REG_PAIR)
########################################
root011 = (7, 3, 26)
########################################
for ext, reg1, reg2, imm, bits in [
("s", OK.SREG_0_4, OK.SREG_10_14, OK.SIMM_15_21_TIMES_4, (7, 1, 29)),
("d", OK.DREG_0_4, OK.DREG_10_14, OK.SIMM_15_21_TIMES_8, (7, 3, 29)),
("q", OK.QREG_0_4, OK.QREG_10_14, OK.SIMM_15_21_TIMES_16, (7, 5, 29))]:
Opcode("fstp", ext + "_imm_post", [bits, root011, (0xf, 2, 22)],
[OK.XREG_5_9_SP, imm, reg1, reg2, ], OPC_FLAG.STORE | OPC_FLAG.REG_PAIR)
Opcode("fstp", ext + "_imm_pre", [bits, root011, (0xf, 6, 22)],
[OK.XREG_5_9_SP, imm, reg1, reg2], OPC_FLAG.STORE | OPC_FLAG.REG_PAIR)
Opcode("fstp", ext + "_imm", [bits, root011, (0xf, 4, 22)],
[OK.XREG_5_9_SP, imm, reg1, reg2], OPC_FLAG.STORE | OPC_FLAG.REG_PAIR)
Opcode("fldp", ext + "_imm_post", [bits, root011, (0xf, 3, 22)],
[reg1, reg2, OK.XREG_5_9_SP, imm], OPC_FLAG.LOAD | OPC_FLAG.REG_PAIR)
Opcode("fldp", ext + "_imm_pre", [bits, root011, (0xf, 7, 22)],
[reg1, reg2, OK.XREG_5_9_SP, imm], OPC_FLAG.LOAD | OPC_FLAG.REG_PAIR)
Opcode("fldp", ext + "_imm", [bits, root011, (0xf, 5, 22)],
[reg1, reg2, OK.XREG_5_9_SP, imm], OPC_FLAG.LOAD | OPC_FLAG.REG_PAIR)
for ext, bits in [("2s", [(1, 0, 30), (1, 0, 22)]),
("4s", [(1, 1, 30), (1, 0, 22)]),
("2d", [(1, 1, 30), (1, 1, 22)])]:
Opcode("fneg", ext, [root011, (1, 0, 31), (1, 1, 29), (7, 5, 23), (0xfff, 0x83e, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9], OPC_FLAG(0))
#
Opcode("fadd", ext, [root011, (1, 0, 31), (1, 0, 29), (7, 4, 23), (1, 1, 21), (0x3f, 0x35, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("fmaxnm", ext, [root011, (1, 0, 31), (1, 0, 29), (7, 4, 23), (1, 1, 21), (0x3f, 0x31, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("fmax", ext, [root011, (1, 0, 31), (1, 0, 29), (7, 4, 23), (1, 1, 21), (0x3f, 0x3d, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
#
Opcode("faddp", ext, [root011, (1, 0, 31), (1, 1, 29), (7, 4, 23), (1, 1, 21), (0x3f, 0x35, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("fmaxp", ext, [root011, (1, 0, 31), (1, 1, 29), (7, 4, 23), (1, 1, 21), (0x3f, 0x3d, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
for ext, bits in [("8b", [(1, 0, 30), ]),
("16b", [(1, 1, 30), ])]:
Opcode("bit", ext, [root011, (1, 0, 31), (1, 1, 29), (0x1f, 0x15, 21), (0x3f, 0x7, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("eor", ext, [root011, (1, 0, 31), (1, 1, 29), (0x1f, 0x11, 21), (0x3f, 0x7, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
#
Opcode("and", ext, [root011, (1, 0, 31), (1, 0, 29), (0x1f, 0x11, 21), (0x3f, 0x7, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("bic", ext, [root011, (1, 0, 31), (1, 0, 29), (0x1f, 0x13, 21), (0x3f, 0x7, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("orr", ext, [root011, (1, 0, 31), (1, 0, 29), (0x1f, 0x15, 21), (0x3f, 0x7, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("orn", ext, [root011, (1, 0, 31), (1, 0, 29), (0x1f, 0x17, 21), (0x3f, 0x7, 10)] + bits,
[OK.VREG_0_4, OK.VREG_5_9, OK.VREG_16_20], OPC_FLAG(0))
Opcode("movi", "2d", [root011, (1, 0, 31), (1, 1, 30), (1, 1, 29), (0x7f, 0x60, 19),
(0x3f, 0x39, 10)],
[OK.VREG_0_4, OK.IMM_BIT_EXPLODE_5_9_16_18], OPC_FLAG(0))
########################################
root100 = (7, 4, 26)
########################################
for ext, w_bit, w_bit2 in [("w", (1, 0, 31), (1, 0, 22)),
("x", (1, 1, 31), (1, 1, 22))]:
dst_reg = OK.XREG_0_4 if ext == "x" else OK.WREG_0_4
dst_reg_sp = OK.XREG_0_4_SP if ext == "x" else OK.WREG_0_4_SP
src1_reg = OK.XREG_5_9 if ext == "x" else OK.WREG_5_9
src1_reg_sp = OK.XREG_5_9_SP if ext == "x" else OK.WREG_5_9_SP
src2_reg = OK.XREG_16_20 if ext == "x" else OK.WREG_16_20
for name, bits in [("add", [(3, 0, 29), (3, 1, 24)]),
("sub", [(3, 2, 29), (3, 1, 24)])]:
Opcode(name, ext + "_imm", [root100, w_bit, (1, 0, 23)] + bits,
[dst_reg_sp, src1_reg_sp, OK.IMM_SHIFTED_10_21_22], OPC_FLAG.STACK_OPS)
for name, bits in [("adds", [(3, 1, 29), (3, 1, 24)]),
("subs", [(3, 3, 29), (3, 1, 24)])]:
Opcode(name, ext + "_imm", [root100, w_bit, (1, 0, 23)] + bits,
[dst_reg, src1_reg, OK.IMM_SHIFTED_10_21_22], OPC_FLAG.SR_UPDATE)
imm = OK.IMM_10_15_16_22_X if ext == "x" else OK.IMM_10_15_16_22_W
for name, bits in [("and", [(3, 0, 29), (7, 4, 23)]),
("eor", [(3, 2, 29), (7, 4, 23)]),
("orr", [(3, 1, 29), (7, 4, 23)])]:
Opcode(name, ext + "_imm", [root100, w_bit] + bits,
[dst_reg_sp, src1_reg, imm], OPC_FLAG.STACK_OPS)
for name, bits in [("ands", [(3, 3, 29), (7, 4, 23)])]:
Opcode(name, ext + "_imm", [root100, w_bit] + bits,
[dst_reg, src1_reg, imm], OPC_FLAG.SR_UPDATE)
for name, bits in [("bfm", [(3, 1, 29), (7, 6, 23)]),
("ubfm", [(3, 2, 29), (7, 6, 23)]),
("sbfm", [(3, 0, 29), (7, 6, 23)])]:
Opcode(name, ext, [root100, w_bit, w_bit2] + bits,
[dst_reg, src1_reg, OK.IMM_16_21, OK.IMM_10_15], OPC_FLAG(0))
Opcode("extr", ext, [w_bit, (3, 0, 29), root100, (7, 7, 23), (1, ext == "x", 22), (1, 0, 21)],
[dst_reg, src1_reg, src2_reg, OK.IMM_10_15], OPC_FLAG(0))
Opcode("movk", ext + "_imm", [w_bit, (3, 3, 29), root100, (7, 5, 23)],
[dst_reg, OK.IMM_5_20, OK.SHIFT_21_22_TIMES_16], OPC_FLAG(0))
Opcode("movz", ext + "_imm", [w_bit, (3, 2, 29), root100, (7, 5, 23)],
[dst_reg, OK.IMM_SHIFTED_5_20_21_22], OPC_FLAG(0))
Opcode("movn", ext + "_imm", [w_bit, (3, 0, 29), root100, (7, 5, 23)],
[dst_reg, OK.IMM_SHIFTED_5_20_21_22], OPC_FLAG(0))
Opcode("adr", "", [root100, (1, 0, 31), (3, 0, 24)],
[OK.XREG_0_4, OK.SIMM_PCREL_5_23_29_30], OPC_FLAG(0))
Opcode("adrp", "", [root100, (1, 1, 31), (3, 0, 24)],
[OK.XREG_0_4, OK.SIMM_PCREL_5_23_29_30], OPC_FLAG(0))
########################################
root101 = (7, 5, 26)
########################################
Opcode("b", "", [root101, (7, 0, 29)],
[OK.SIMM_PCREL_0_25], OPC_FLAG.BRANCH)
Opcode("bl", "", [root101, (7, 4, 29)],
[OK.SIMM_PCREL_0_25], OPC_FLAG.CALL | OPC_FLAG.IMPLICIT_LINK_REG)
Opcode("ret", "", [root101, (7, 6, 29), (0xffff, 0x97c0, 10), (0x1f, 0, 0)],
[OK.XREG_5_9], OPC_FLAG.BRANCH_INDIRECT)
Opcode("br", "", [root101, (7, 6, 29), (0xffff, 0x87c0, 10), (0x1f, 0, 0)],
[OK.XREG_5_9], OPC_FLAG(0))
Opcode("blr", "", [root101, (7, 6, 29), (0xffff, 0x8fc0, 10), (0x1f, 0, 0)],
[OK.XREG_5_9], OPC_FLAG.CALL_INDIRECT | OPC_FLAG.IMPLICIT_LINK_REG)
for cond_val, cond_name in enumerate(CONDITION_CODES):
Opcode("b." + cond_name, "", [root101, (7, 2, 29), (3, 0, 24), (0x1f, cond_val, 0)],
[OK.SIMM_PCREL_5_23], OPC_FLAG.COND_BRANCH)
for ext, w_bit in [("w", (1, 0, 31)), ("x", (1, 1, 31))]:
dst_reg = OK.XREG_0_4 if ext == "x" else OK.WREG_0_4
Opcode("cbnz", ext, [w_bit, root101, (3, 1, 29), (3, 1, 24)],
[dst_reg, OK.SIMM_PCREL_5_23], OPC_FLAG.COND_BRANCH)
Opcode("cbz", ext, [w_bit, root101, (3, 1, 29), (3, 0, 24)],
[dst_reg, OK.SIMM_PCREL_5_23], OPC_FLAG.COND_BRANCH)
Opcode("tbz", "", [root101, (3, 1, 29), (3, 2, 24)],
[OK.XREG_0_4, OK.IMM_19_23_31, OK.SIMM_PCREL_5_18], OPC_FLAG.COND_BRANCH)
Opcode("tbnz", "", [root101, (3, 1, 29), (3, 3, 24)],
[OK.XREG_0_4, OK.IMM_19_23_31, OK.SIMM_PCREL_5_18], OPC_FLAG.COND_BRANCH)
Opcode("hlt", "", [root101, (7, 6, 29), (0x1f, 2, 21), (0x1f, 0, 0)],
[OK.IMM_5_20], OPC_FLAG(0))
Opcode("brk", "", [root101, (7, 6, 29), (0x1f, 1, 21), (0x1f, 0, 0)],
[OK.IMM_5_20], OPC_FLAG(0))
Opcode("svc", "", [root101, (7, 6, 29), (0x1f, 0, 21), (0x1f, 1, 0)],
[OK.IMM_5_20], OPC_FLAG(0))
Opcode("yield", "", [root101, (7, 6, 29), (0x3ffffff, 0x103203f, 0)],
[], OPC_FLAG(0))
Opcode("nop", "", [root101, (7, 6, 29), (0x3ffffff, 0x103201f, 0)],
[], OPC_FLAG(0))
Opcode("eret", "", [root101, (7, 6, 29), (0x3ffffff, 0x29f03e0, 0)],
[], OPC_FLAG(0))
# atomic
Opcode("isb", "", [root101, (7, 6, 29), (0x3ffffff, 0x1033fdf, 0)],
[], OPC_FLAG(0))
Opcode("clrex", "", [root101, (7, 6, 29), (0x3ffffff, 0x1033f5f, 0)],
[], OPC_FLAG(0))
Opcode("dmb", "ish", [root101, (7, 6, 29), (0x3ffffff, 0x1033bbf, 0)],
[], OPC_FLAG.DOMAIN_PARAM)
Opcode("dmb", "ishld", [root101, (7, 6, 29), (0x3ffffff, 0x10339bf, 0)],
[], OPC_FLAG.DOMAIN_PARAM)
Opcode("dmb", "ishst", [root101, (7, 6, 29), (0x3ffffff, 0x1033abf, 0)],
[], OPC_FLAG.DOMAIN_PARAM)
Opcode("dsb", "ish", [root101, (7, 6, 29), (0x3ffffff, 0x1033b9f, 0)],
[], OPC_FLAG.DOMAIN_PARAM)
Opcode("dsb", "ishld", [root101, (7, 6, 29), (0x3ffffff, 0x103399f, 0)],
[], OPC_FLAG.DOMAIN_PARAM)
Opcode("dsb", "ishst", [root101, (7, 6, 29), (0x3ffffff, 0x1033a9f, 0)],
[], OPC_FLAG.DOMAIN_PARAM)
########################################
root110 = (7, 6, 26)
########################################
for ext, w_bit in [("w", (1, 0, 31)), ("x", (1, 1, 31))]:
dst_reg = OK.XREG_0_4 if ext == "x" else OK.WREG_0_4
src1_reg = OK.XREG_5_9 if ext == "x" else OK.WREG_5_9
src2_reg = OK.XREG_16_20 if ext == "x" else OK.WREG_16_20
src3_reg = OK.XREG_10_14 if ext == "x" else OK.WREG_10_14
for name, bits in [("madd", [(3, 0, 29), (1, 0, 15)]),
("msub", [(3, 0, 29), (1, 1, 15)])]:
Opcode(name, ext, [root110, w_bit, (3, 3, 24), (7, 0, 21)] + bits,
[dst_reg, src1_reg, src2_reg, src3_reg], OPC_FLAG(0))
for name, bits in [("udiv", [(3, 0, 29), (0x1f, 0x16, 21), (0x3f, 2, 10)]),
("sdiv", [(3, 0, 29), (0x1f, 0x16, 21), (0x3f, 3, 10)]),
("lslv", [(3, 0, 29), (0x1f, 0x16, 21), (0x3f, 8, 10)]),
("lsrv", [(3, 0, 29), (0x1f, 0x16, 21), (0x3f, 9, 10)]),
("asrv", [(3, 0, 29), (0x1f, 0x16, 21), (0x3f, 0xa, 10)]),
("rorv", [(3, 0, 29), (0x1f, 0x16, 21), (0x3f, 0xb, 10)]),
("adc", [(3, 0, 29), (0x1f, 0x10, 21), (0x3f, 0, 10)]),
("sbc", [(3, 2, 29), (0x1f, 0x10, 21), (0x3f, 0, 10)])]:
Opcode(name, ext, [root110, w_bit] + bits,
[dst_reg, src1_reg, src2_reg], | |
from utils.modulos import *
class Pesquisar:
def __init__(self, master=None, app=None):
img_pesquisar = PhotoImage(data=base64.b64decode(img_pesquisar_base64)) # imagem do botão Pesquisar
img_listar = PhotoImage(data=base64.b64decode(img_listar_doc_base64)) # imagem do botão Listar
img_calendario = PhotoImage(data=base64.b64decode(img_calendario_base64)) # imagem do botão calendário
img_editar = PhotoImage(data=base64.b64decode(img_editar_doc_base64)) # imagem do botão Editar
img_excluir = PhotoImage(data=base64.b64decode(img_excluir_doc_base64)) # imagem do botão Excluir
self.master = master
self.app = app
self.__framePesquisar = Frame(self.master, height=500, bg='LightSteelBlue3', bd=2, relief='ridge')
# Define o Estilo dos Notebook
style = ttk.Style()
style.theme_use('alt')
style.configure("TNotebook", background='LightSteelBlue3')
style.configure("TNotebook.Tab", background='LightSteelBlue3')
style.map('TNotebook.Tab', background=[('selected', '#4444ff')])
# Cria um Notebook
self.__notebook = ttk.Notebook(self.__framePesquisar, height=500)
self.__notebook.pack(side=BOTTOM, fill=X)
# Abas do Notebook
# Aba Pesquisar Processos
self.__tbProcessos = Frame(self.__notebook, bg='LightSteelBlue3')
self.__notebook.add(self.__tbProcessos, text='Pesquisar Processos')
self.__lblTitulo = Label(self.__tbProcessos, text='Pesquisar Processos', bg='LightSteelBlue3')
self.__lblTitulo['font'] = 'Serif', '16', 'bold'
self.__lblTitulo.place(x=325, y=10)
self.__lblCaso = Label(self.__tbProcessos, text='N° do Caso', bg='LightSteelBlue3')
self.__lblCaso['font'] = 'Serif', '12'
self.__lblCaso.place(x=100, y=50)
self.__txtCaso = Entry(self.__tbProcessos)
self.__txtCaso.place(relx=0.22, y=50, relwidth=0.1)
self.__lblProcesso = Label(self.__tbProcessos, text='N° do Processo', bg='LightSteelBlue3')
self.__lblProcesso['font'] = 'Serif', '12'
self.__lblProcesso.place(relx=0.45, y=50)
self.__txtProcesso = Entry(self.__tbProcessos)
self.__txtProcesso.place(relx=0.595, y=50, relwidth=0.3)
self.__lblAutor = Label(self.__tbProcessos, text='Autor', bg='LightSteelBlue3')
self.__lblAutor['font'] = 'Serif', '12'
self.__lblAutor.place(x=150, y=80)
self.__txtAutor = Entry(self.__tbProcessos)
self.__txtAutor.place(relx=0.22, y=80, relwidth=0.675)
self.__lblAdvExterno = Label(self.__tbProcessos, text='Adv Externo', bg='LightSteelBlue3')
self.__lblAdvExterno['font'] = 'Serif', '12'
self.__lblAdvExterno.place(x=100, y=110)
self.__txtAdvExterno = ttk.Combobox(self.__tbProcessos, values=['ANTÔNIO DOS ANZÓIS'])
self.__txtAdvExterno['justify'] = 'left'
self.__txtAdvExterno.place(relx=0.22, y=110, relwidth=0.674)
self.__lblDataInicio = Label(self.__tbProcessos, text='Inicio', bg='LightSteelBlue3')
self.__lblDataInicio['font'] = 'Serif', '12'
self.__lblDataInicio.place(x=150, y=140)
self.__txtDataInicio = Entry(self.__tbProcessos)
self.__txtDataInicio.place(relx=0.22, y=140, relwidth=0.09)
self.__lblDataFim = Label(self.__tbProcessos, text='Fim', bg='LightSteelBlue3')
self.__lblDataFim['font'] = 'Serif', '12'
self.__lblDataFim.place(relx=0.355, y=140)
self.__txtDataFim = Entry(self.__tbProcessos)
self.__txtDataFim.place(relx=0.4, y=140, relwidth=0.09)
self.__lblVaraTribunal = Label(self.__tbProcessos, text='Vara/Tribunal', bg='LightSteelBlue3')
self.__lblVaraTribunal['font'] = 'Serif', '12'
self.__lblVaraTribunal.place(relx=0.545, y=140)
self.__txtVaraTribunal = Entry(self.__tbProcessos)
self.__txtVaraTribunal.place(relx=0.675, y=140, relwidth=0.22)
self.__colunas = ('#1', '#2', '#3', '#4', '#5')
self.__tvProcessos = ttk.Treeview(self.__tbProcessos, columns=self.__colunas, selectmode='browse', height=5)
self.__tvProcessos.heading('#0', text='')
self.__tvProcessos.heading('#1', text='N° Caso')
self.__tvProcessos.heading('#2', text='N° Processo')
self.__tvProcessos.heading('#3', text='Autor')
self.__tvProcessos.heading('#4', text='Réu')
self.__tvProcessos.heading('#5', text='Situação Atual')
self.__tvProcessos.column('#0', width=0, stretch=NO)
self.__tvProcessos.column('#1', width=70, anchor='center')
self.__tvProcessos.column('#2', width=100, anchor='center')
self.__tvProcessos.column('#3', width=200, anchor='center')
self.__tvProcessos.column('#4', width=200, anchor='center')
self.__tvProcessos.column('#5', width=150, anchor='center')
self.__tvProcessos.column('#5', width=180, anchor='n')
self.__tvProcessos.place(x=105, y=200)
self.__btnPesquisar = criar_botao(self.__tbProcessos, 'Pesquisar', img_pesquisar,
lambda: self.pesquisar_processos(), 300, 350)
self.__btnListar = criar_botao(self.__tbProcessos, 'Listar', img_listar,
lambda: self.listar_processos(), 410, 350)
self.__btnEditar = criar_botao(self.__tbProcessos, 'Editar', img_editar,
lambda: self.editar(self.app.frameProcessos,
self.__tvProcessos,
'processos',
self.app.btnProcessos),
520, 350)
self.__btnExcluir = criar_botao(self.__tbProcessos, 'Excluir', img_excluir,
lambda: deletar(self.__tvProcessos, 'processos'), 630, 350)
self.__btnCalendario = Button(self.__tbProcessos,
image=img_calendario,
relief='flat',
bg='LightSteelBlue3',
highlightthickness=0
)
self.__btnCalendario.image = img_calendario
self.__btnCalendario['command'] = lambda: Calendario(self.__tbProcessos,
self.__txtDataInicio,
relx=self.__txtDataInicio.winfo_rootx(),
rely=self.__txtDataInicio.winfo_rooty())
self.__btnCalendario.place(relx=0.312, rely=0.312)
self.__btnCalendario = Button(self.__tbProcessos,
image=img_calendario,
relief='flat',
bg='LightSteelBlue3',
highlightthickness=0
)
self.__btnCalendario.image = img_calendario
self.__btnCalendario['command'] = lambda: Calendario(self.__tbProcessos,
self.__txtDataFim,
relx=self.__txtDataFim.winfo_rootx(),
rely=self.__txtDataFim.winfo_rooty())
self.__btnCalendario.place(relx=0.492, rely=0.312)
# Aba Pesquisar Ocorrências
self.__tbOcorrencias = Frame(self.__notebook, bg='LightSteelBlue3')
self.__notebook.add(self.__tbOcorrencias,
text='Pesquisar Ocorrências')
self.__lblTitulo = Label(self.__tbOcorrencias, text='Pesquisar Ocorrências', bg='LightSteelBlue3')
self.__lblTitulo['font'] = 'Serif', '16', 'bold'
self.__lblTitulo.place(x=320, y=10)
self.__lblCasoOcorrencia = Label(self.__tbOcorrencias, text='Caso', bg='LightSteelBlue3')
self.__lblCasoOcorrencia['font'] = 'Serif', '12'
self.__lblCasoOcorrencia.place(x=100, y=80)
self.__txtCasoOcorrencia = Entry(self.__tbOcorrencias)
self.__txtCasoOcorrencia.place(relx=0.166, y=80, relwidth=0.15)
self.__lblValorOcorrencia = Label(self.__tbOcorrencias, text='Valor R$', bg='LightSteelBlue3')
self.__lblValorOcorrencia['font'] = 'Serif', '12'
self.__lblValorOcorrencia.place(relx=0.37, y=80)
self.__txtValorOcorrencia = Entry(self.__tbOcorrencias)
self.__txtValorOcorrencia.place(relx=0.455, y=80, relwidth=0.15)
self.__lblDataOcorrencia = Label(self.__tbOcorrencias, text='Data', bg='LightSteelBlue3')
self.__lblDataOcorrencia['font'] = 'Serif', '12'
self.__lblDataOcorrencia.place(relx=0.65, y=80)
self.__txtDataOcorrencia = Entry(self.__tbOcorrencias)
self.__txtDataOcorrencia.place(relx=0.7, y=80, relwidth=0.09)
self.__colunas = ('#1', '#2', '#3', '#4', '#5')
self.__tvOcorrencias = ttk.Treeview(self.__tbOcorrencias, columns=self.__colunas, selectmode='browse',
height=8)
self.__tvOcorrencias.heading('#0', text='')
self.__tvOcorrencias.heading('#1', text='Caso')
self.__tvOcorrencias.heading('#2', text='Data')
self.__tvOcorrencias.heading('#3', text='Descrição')
self.__tvOcorrencias.heading('#4', text='Valor')
self.__tvOcorrencias.heading('#5', text='Valor Atual')
self.__tvOcorrencias.column('#0', width=0, stretch=NO)
self.__tvOcorrencias.column('#1', width=100, anchor='center')
self.__tvOcorrencias.column('#2', width=100, anchor='center')
self.__tvOcorrencias.column('#3', width=300, anchor='center')
self.__tvOcorrencias.column('#4', width=150, anchor='center')
self.__tvOcorrencias.column('#5', width=150, anchor='center')
self.__tvOcorrencias.place(x=80, y=150)
self.__btnPesquisar = criar_botao(self.__tbOcorrencias, 'Pesquisar', img_pesquisar,
lambda: self.pesquisar_ocorrencias(), 200, 350)
self.__btnListar = criar_botao(self.__tbOcorrencias, 'Listar', img_listar,
lambda: self.listar_ocorrencias(), 310, 350)
self.__btnEditar = criar_botao(self.__tbOcorrencias, 'Editar', img_editar,
lambda: self.editar(self.app.frameOcorrencias,
self.__tvOcorrencias,
'ocorrencias',
self.app.btnOcorrencias),
420, 350)
self.__btnExcluir = criar_botao(self.__tbOcorrencias, 'Excluir', img_excluir,
lambda: deletar(self.__tvOcorrencias, 'ocorrencias'), 530, 350)
self.__btnCalendario = Button(self.__tbOcorrencias,
image=img_calendario,
relief='flat',
bg='LightSteelBlue3',
highlightthickness=0
)
self.__btnCalendario.image = img_calendario
self.__btnCalendario['command'] = lambda: Calendario(self.__tbOcorrencias,
self.__txtDataOcorrencia,
relx=self.__txtDataOcorrencia.winfo_rootx(),
rely=self.__txtDataOcorrencia.winfo_rooty())
self.__btnCalendario.place(relx=0.792, rely=0.175)
# Aba Pesquisar Consultas
self.__tbConsultas = Frame(self.__notebook, bg='LightSteelBlue3')
self.__notebook.add(self.__tbConsultas,
text='Pesquisar Consultas')
self.__lblTitulo = Label(self.__tbConsultas, text='Pesquisar Consultas', bg='LightSteelBlue3')
self.__lblTitulo['font'] = 'Serif', '16', 'bold'
self.__lblTitulo.place(x=325, y=10)
self.__lblConsulta = Label(self.__tbConsultas, text='Consulta', bg='LightSteelBlue3')
self.__lblConsulta['font'] = 'Serif', '12'
self.__lblConsulta.place(x=225, y=80)
self.__txtConsulta = Entry(self.__tbConsultas)
self.__txtConsulta.place(relx=0.325, y=80, relwidth=0.1)
self.__lblPrioridade = Label(self.__tbConsultas, text='Prioridade', bg='LightSteelBlue3')
self.__lblPrioridade['font'] = 'Serif', '12'
self.__lblPrioridade.place(relx=0.45, y=80)
self.__txtPrioridade = ttk.Combobox(self.__tbConsultas, values=['ALTA', 'MÉDIA', 'BAIXA'])
self.__txtPrioridade['justify'] = 'center'
self.__txtPrioridade.place(relx=0.55, y=80, relwidth=0.127)
self.__lblEntrada = Label(self.__tbConsultas, text='Entrada', bg='LightSteelBlue3')
self.__lblEntrada['font'] = 'Serif', '12'
self.__lblEntrada.place(x=230, y=110)
self.__txtEntrada = Entry(self.__tbConsultas)
self.__txtEntrada.place(relx=0.325, y=110, relwidth=0.1)
self.__lblSaida = Label(self.__tbConsultas, text='Saída', bg='LightSteelBlue3')
self.__lblSaida['font'] = 'Serif', '12'
self.__lblSaida.place(relx=0.49, y=110)
self.__txtSaida = Entry(self.__tbConsultas)
self.__txtSaida.place(relx=0.55, y=110, relwidth=0.1)
self.__colunas = ('#1', '#2', '#3', '#4', '#5', '#6', '#7')
self.__tvConsultas = ttk.Treeview(self.__tbConsultas, columns=self.__colunas, selectmode='browse',
height=8)
self.__tvConsultas.heading('#0', text='')
self.__tvConsultas.heading('#1', text='Consulta')
self.__tvConsultas.heading('#2', text='Prioridade')
self.__tvConsultas.heading('#3', text='Entrada')
self.__tvConsultas.heading('#4', text='Saída')
self.__tvConsultas.heading('#5', text='Origem')
self.__tvConsultas.heading('#6', text='Destino')
self.__tvConsultas.heading('#7', text='Assunto')
self.__tvConsultas.column('#0', width=0, stretch=NO)
self.__tvConsultas.column('#1', width=100, anchor='center')
self.__tvConsultas.column('#2', width=100, anchor='center')
self.__tvConsultas.column('#3', width=100, anchor='center')
self.__tvConsultas.column('#4', width=100, anchor='center')
self.__tvConsultas.column('#5', width=150, anchor='center')
self.__tvConsultas.column('#6', width=150, anchor='center')
self.__tvConsultas.column('#7', width=200, anchor='center')
self.__tvConsultas.place(x=25, y=160)
self.__btnPesquisar = criar_botao(self.__tbConsultas, 'Pesquisar', img_pesquisar,
lambda: self.pesquisar_consultas(), 250, 360)
self.__btnListar = criar_botao(self.__tbConsultas, 'Listar', img_listar,
lambda: self.listar_consultas(), 360, 360)
self.__btnEditar = criar_botao(self.__tbConsultas, 'Editar', img_editar,
lambda: self.editar(self.app.frameConsultas,
self.__tvConsultas,
'consultas',
self.app.btnConsultas),
470, 360)
self.__btnExcluir = criar_botao(self.__tbConsultas, 'Excluir', img_excluir,
lambda: deletar(self.__tvConsultas, 'consultas'), 580, 360)
self.__btnCalendario = Button(self.__tbConsultas,
image=img_calendario,
relief='flat',
bg='LightSteelBlue3',
highlightthickness=0
)
self.__btnCalendario.image = img_calendario
self.__btnCalendario['command'] = lambda: Calendario(self.__tbConsultas,
self.__txtEntrada,
relx=self.__txtEntrada.winfo_rootx(),
rely=self.__txtEntrada.winfo_rooty())
self.__btnCalendario.place(relx=0.4285, rely=0.243)
self.__btnCalendario = Button(self.__tbConsultas,
image=img_calendario,
relief='flat',
bg='LightSteelBlue3',
highlightthickness=0
)
self.__btnCalendario.image = img_calendario
self.__btnCalendario['command'] = lambda: Calendario(self.__tbConsultas,
self.__txtSaida,
relx=self.__txtSaida.winfo_rootx(),
rely=self.__txtSaida.winfo_rooty())
self.__btnCalendario.place(relx=0.652, rely=0.243)
@property
def caso(self):
return self.__txtCaso.get()
@property
def processo(self):
return self.__txtProcesso.get()
@property
def autor(self):
return self.__txtAutor.get()
@property
def adv_externo(self):
return self.__txtAdvExterno.get()
@adv_externo.setter
def adv_externo(self, valor):
self.__txtAdvExterno['values'] = valor
@property
def inicio(self):
return self.__txtDataInicio.get()
@property
def fim(self):
return self.__txtDataFim.get()
@property
def vara_tribunal(self):
return self.__txtVaraTribunal.get()
@property
def caso_ocorrencia(self):
return self.__txtCasoOcorrencia.get()
@property
def valor_ocorrencia(self):
return self.__txtValorOcorrencia.get()
@property
def data_ocorrencia(self):
return self.__txtDataOcorrencia.get()
@property
def consulta(self):
return self.__txtConsulta.get()
@property
def prioridade(self):
return self.__txtPrioridade.get()
@property
def entrada(self):
return self.__txtEntrada.get()
@property
def saida(self):
return self.__txtSaida.get()
def listar_processos(self):
self.__tvProcessos.delete(*self.__tvProcessos.get_children())
processos = view('processos')
for processo in processos:
self.__tvProcessos.insert('', END, iid=None,
values=(processo[1], processo[6],
processo[2], processo[3], processo[12]))
def listar_ocorrencias(self):
self.__tvOcorrencias.delete(*self.__tvOcorrencias.get_children())
ocorrencias = view('ocorrencias')
for ocorrencia in ocorrencias:
self.__tvOcorrencias.insert('', END, iid=None,
values=(ocorrencia[1], ocorrencia[2],
ocorrencia[3], ocorrencia[4], ocorrencia[5]))
def listar_consultas(self):
self.__tvConsultas.delete(*self.__tvConsultas.get_children())
consultas = view('consultas')
for consulta in consultas:
self.__tvConsultas.insert('', END, iid=None,
values=(consulta[1], consulta[3], consulta[5], consulta[11],
consulta[6], consulta[12], consulta[7]))
def pesquisar_processos(self):
self.__tvProcessos.delete(*self.__tvProcessos.get_children())
if (self.caso != '') and (self.processo != '') and (self.autor != '') and (self.adv_externo != '') and \
(self.inicio != '') and (self.fim != '') and (self.vara_tribunal != ''):
processos = search('processos', clause=f'where caso="{self.caso}" and processo="{self.processo}" and '
f'autor="{self.autor}" and adv_externo="{self.adv_externo}" and '
f'inicio="{self.inicio}" and fim="{self.fim}" and '
f'vara_tribunal="{self.vara_tribunal}"')
elif (self.caso != '') or (self.processo != '') or (self.autor != '') or (self.adv_externo != '') or \
(self.inicio != '') or (self.fim != '') or (self.vara_tribunal != ''):
processos = search('processos', clause=f'where caso="{self.caso}" or processo="{self.processo}" or '
f'autor="{self.autor}" or adv_externo="{self.adv_externo}" or '
f'inicio="{self.inicio}" or fim="{self.fim}" or '
f'vara_tribunal="{self.vara_tribunal}"')
elif (self.caso != '') and ((self.processo != '') or (self.autor != '') or (self.adv_externo != '') or
(self.inicio != '') or (self.fim != '') or (self.vara_tribunal != '')):
processos = search('processos', clause=f'where caso="{self.caso}" and (processo="{self.processo}" or '
f'autor="{self.autor}" or adv_externo="{self.adv_externo}" or '
f'inicio="{self.inicio}" or fim="{self.fim}" or '
f'vara_tribunal="{self.vara_tribunal}")')
elif (self.processo != '') and (self.autor != '') and (self.adv_externo != '') and \
(self.inicio != '') and (self.fim != '') and (self.vara_tribunal != ''):
processos = search('processos', clause=f'where processo="{self.processo}" and '
f'autor="{self.autor}" and adv_externo="{self.adv_externo}" and '
f'inicio="{self.inicio}" and fim="{self.fim}" and '
f'vara_tribunal="{self.vara_tribunal}"')
elif (self.processo != '') and ((self.autor != '') or (self.adv_externo != '') or
(self.inicio != '') or (self.fim != '') or (self.vara_tribunal != '')):
processos = search('processos', clause=f'where processo="{self.processo}" and '
f'(autor="{self.autor}" or adv_externo="{self.adv_externo}" or '
f'inicio="{self.inicio}" or fim="{self.fim}" or '
f'vara_tribunal="{self.vara_tribunal}")')
elif (self.autor != '') and (self.adv_externo != '') and \
(self.inicio != '') and (self.fim != '') and (self.vara_tribunal != ''):
processos = search('processos', clause=f'where autor="{self.autor}" and adv_externo="{self.adv_externo}" '
f'and inicio="{self.inicio}" and fim="{self.fim}" and '
f'vara_tribunal="{self.vara_tribunal}"')
elif (self.autor != '') and ((self.adv_externo != '') or (self.inicio != '') or (self.fim != '')
or (self.vara_tribunal != '')):
processos = search('processos', clause=f'where autor="{self.autor}" and (adv_externo="{self.adv_externo}" '
f'or inicio="{self.inicio}" or fim="{self.fim}" or '
f'vara_tribunal="{self.vara_tribunal}")')
elif (self.adv_externo != '') and (self.inicio != '') and (self.fim != '') and (self.vara_tribunal != ''):
processos = search('processos', clause=f'where adv_externo="{self.adv_externo}" and inicio="{self.inicio}"'
f' and fim="{self.fim}" and vara_tribunal="{self.vara_tribunal}"')
elif (self.adv_externo != '') and ((self.inicio != '') or (self.fim != '') or (self.vara_tribunal != '')):
processos = search('processos', | |
<reponame>oplatek/e2end<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json, logging, os, pickle, bisect
import numpy as np
from collections import OrderedDict
from . import Vocabulary
logger = logging.getLogger(__name__)
class Dstc2DB:
def __init__(self, filename, first_n=None):
logger.info('\nLoading DB %s', filename)
raw_data = json.load(open(filename))[:first_n]
self._col_names = col_names = sorted(list(set([k for r in raw_data for k in r.keys()])))
self._col_name_vocab = Vocabulary([], extra_words=col_names, unk=None)
self._col_vocabs = col_vocabs = []
for c in col_names:
col_occur = [r[c] for r in raw_data]
col_vocabs.append(Vocabulary(col_occur, max_items=len(col_occur)))
self._table = table = np.empty((len(raw_data), len(col_names)), dtype=np.int64)
for j, (cv, cn) in enumerate(zip(col_vocabs, col_names)):
for i, r in enumerate(raw_data):
table[i, j] = cv.get_i(r[cn])
logger.info('\nLoaded DB %s.shape = %s', filename, self.table.shape)
@property
def column_names(self):
return self._col_names
@property
def col_names_vocab(self):
return self._col_name_vocab
def get_col_idx(self, col_name):
return self._col_name_vocab.get_i(col_name)
def get_col_name(self, idx):
return self._col_name_vocab.get_w(idx)
@property
def col_vocabs(self):
return self._col_vocabs
def matching_rows(self, col_val_dict):
return self.table[self._row_mask(col_val_dict)]
def _row_mask(self, col_val_dict):
# See http://stackoverflow.com/questions/1962980/selecting-rows-from-a-numpy-ndarray
return np.logical_and.reduce([self.table[:, c] == v for c, v in col_val_dict.items()])
def matching_rest_names(self, col_val_dict):
match_rows = self.matching_rows(col_val_dict)
name_idx = self.get_col_idx('name')
restaurant_names = match_rows[:, name_idx]
return restaurant_names
def get_col_vocab(self, col_name):
idx = self.column_names.index(col_name)
return self.col_vocabs[idx]
@property
def num_rows(self):
return self.table.shape[0]
@property
def num_cols(self):
return self.table.shape[1]
@property
def table(self):
return self._table
def extract_entities(self, sentence):
'''Returns array of arrays of flags (0 or 1) indicating
if a word is part if some value for in a column'''
def mask_ent(sentence, vocab):
mask = [0] * len(sentence)
for i, w in enumerate(sentence):
for ent in vocab.words:
e = ent.strip().split()
if w == e[0] and sentence[i:i + len(e)] == e:
logger.debug('found an entity %s in %s', ent, sentence)
mask[i:i + len(e)] = [1] * len(e)
break
return mask
return [mask_ent(sentence, vocab) for vocab in self.col_vocabs]
class Dstc2:
''' TODO '''
def __init__(self, filename, db, row_targets=False, dst=False,
max_turn_len=None, max_dial_len=None, max_target_len=None, max_row_len=None,
first_n=None, words_vocab=None, sample_unk=0, history_prefix=False):
assert not dst or (not row_targets), 'implication dst -> not row_targets'
self.row_targets = row_targets
self.restaurant_name_vocab_id = db.get_col_idx('name')
logger.info('\nLoading dataset %s', filename)
self.hello_token = hello_token = '<PASSWORD>' # Default user history for first turn
self.EOS = EOS = 'EOS' # Symbol which the decoder should produce as last one'
assert isinstance(db, Dstc2DB), type(db)
raw_data = json.load(open(filename))
first_n = min(first_n, len(raw_data)) if first_n else len(raw_data)
dialogs = [[(turn[0] + ' ' + turn[1]).strip().split() for turn in dialog['turns']] for dialog in raw_data]
self._speak_vocab = Vocabulary([], extra_words=['usr', 'sys'], unk=None)
usr, ss = self._speak_vocab.get_i('usr'), self._speak_vocab.get_i('sys')
speakers = [[[ss] * len(turn[0].strip().split()) + [usr] * len(turn[1].strip().split()) for turn in dialog['turns']] for dialog in raw_data]
self.session_ids = ids = [dialog['session-id'] for dialog in raw_data]
if dst: # dialog state tracking
logger.info('Hacking targets so it contains DST labels and are NOT shifted by one')
targets = [[(turn[4]).strip().split() + [EOS] for turn in dialog['turns']] for dialog in raw_data]
else:
targets = [[(turn[0]).strip().split() + [EOS] for turn in dialog['turns']] for dialog in raw_data]
dialogs, speakers, targets, ids = dialogs[:first_n], speakers[:first_n], targets[:first_n], ids[:first_n]
self.history_prefix = history_prefix
if history_prefix:
hp_dialogs, hp_speakers = [], []
for d, s in zip(dialogs, speakers):
hp_d, hp_s = [], []
pd, ps = [], []
for turn_ws, turn_ss in zip(d, s):
pd.extend(turn_ws)
ps.extend(turn_ss)
hp_d.append(list(pd))
hp_s.append(list(ps))
hp_dialogs.append(hp_d)
hp_speakers.append(hp_s)
dialogs, speakers = hp_dialogs, hp_speakers
self._vocab = words_vocab = words_vocab or Vocabulary([w for turns in dialogs for turn in turns for w in turn], extra_words=[hello_token, EOS], unk='UNK')
s = sorted([len(t) for turns in dialogs for t in turns])
max_turn, perc95t = s[-1], s[int(0.95 * len(s))]
self._max_turn_len = mtl = max_turn_len or max_turn
logger.info('Turn length: %4d.\nMax turn len %4d.\n95-percentil %4d.\n', mtl, max_turn, perc95t)
d = sorted([len(d) for d in dialogs])
max_dial, perc95d = d[-1], d[int(0.95 * len(d))]
self._max_dial_len = mdl = max_dial_len or max_dial
logger.info('Dial length: %4d.\nDial turn len %4d.\n95-percentil %4d.\n', mdl, max_dial, perc95d)
entities = [[db.extract_entities(turn) for turn in d] for d in dialogs]
if not dst:
logger.debug('Maximum decoder length increasing by 1, since targets are shifted by one')
mdl += 1
self._turn_lens_per_dialog = np.zeros((len(dialogs), mdl), dtype=np.int64)
self._dials = np.zeros((len(dialogs), mdl, mtl), dtype=np.int64)
self._word_ent = np.zeros((len(dialogs), mdl, len(db.column_names), mtl), dtype=np.int64)
self._turn_lens = np.zeros((len(dialogs), mdl), dtype=np.int64)
t = sorted([len(turn_target) for dialog_targets in targets for turn_target in dialog_targets])
maxtarl, perc95t = t[-1], t[int(0.95 * len(s))]
self._max_target_len = mtarl = (max_target_len or maxtarl)
logger.info('Target len: %4d.\nMax target len %4d.\n95-percentil %4d.\n', maxtarl, mtarl, perc95t)
self._turn_targets = ttarg = words_vocab.get_i(EOS) * np.ones((len(dialogs), mdl, mtarl), dtype=np.int64)
self._turn_target_lens = np.zeros((len(dialogs), mdl), dtype=np.int64)
self._word_speakers = w_spk = np.zeros((len(dialogs), mdl, mtl), dtype=np.int64)
self._match_rows_props = np.zeros((len(dialogs), mdl, db.num_rows), dtype=np.int64)
self._match_row_lens = np.zeros((len(dialogs), mdl), dtype=np.int64)
tmp1, tmp2 = db.column_names + ['words'], db.col_vocabs + [words_vocab]
self.target_vocabs = OrderedDict(zip(tmp1, tmp2))
self.word_vocabs_uplimit = OrderedDict(
zip(self.target_vocabs.keys(),
np.cumsum([len(voc) for voc in self.target_vocabs.values()])))
self.word_vocabs_downlimit = OrderedDict(
zip(self.target_vocabs.keys(),
[0] + list(self.word_vocabs_uplimit.values())[:-1]))
dial_lens, this_max_row = [], 0
for i, (d, spkss, entss, dtargss) in enumerate(zip(dialogs, speakers, entities, targets)):
assert len(d) == len(dtargss)
dial_len = 0
if not dst:
logger.debug('Shifting targets and turns by one. First context is empty turn')
d, spkss, entss = [[hello_token]] + d, [[usr]] + spkss, [db.extract_entities([hello_token])] + entss
for j, (turn, spks, ents, targets) in enumerate(zip(d, spkss, entss, dtargss)):
sys_word_ids, vocab_names = self._extract_vocab_ids(targets)
restaurants = self._row_all_prop_match(sys_word_ids, vocab_names, db)
num_match = restaurants.shape[0]
this_max_row = max(num_match, this_max_row)
if j > mdl or len(turn) > mtl or len(sys_word_ids) > mtarl or (max_row_len is not None and num_match > max_row_len):
logger.debug("Keep prefix of turns, discard following turns because:"
"a) num_turns too big "
"b) current turn too long. "
"c) current target too long.")
break
else:
dial_len += 1
assert len(turn) == len(spks), str((len(turn), len(spks), turn, spks))
self._turn_lens[i, j] = len(turn)
for k, (w, s_id) in enumerate(zip(turn, spks)):
self._dials[i, j, k] = words_vocab.get_i(w, unk_chance_smaller=sample_unk)
w_spk[i, j, k] = s_id
for l, e in enumerate(ents):
self._word_ent[i, j, l, k] = e[k]
self._turn_target_lens[i, j] = len(sys_word_ids)
for k in range(num_match):
self._match_rows_props[i, j, k] = restaurants[k]
self._match_row_lens[i, j] = num_match
for k, w_id in enumerate(sys_word_ids):
ttarg[i, j, k] = w_id
if dial_len > 0:
dial_lens.append(dial_len)
else:
logger.debug('Discarding whole dialog: %d', i)
self._max_match_rows = max(1, max_row_len or this_max_row)
self._match_rows_props = self._match_rows_props[:, :, :self._max_match_rows]
logger.info('Max row len this set %d vs max_row_len %d', this_max_row, self._max_match_rows)
self._dial_lens = np.array(dial_lens)
logger.info('\nLoaded dataset len(%s): %d', filename, len(self))
# FIXME use it
def _row_mention_match(self, word_ids, vocab_names, db):
'''If a system response - word_ids contains an restaurant name, we know exact row which to output,
but we can also output any row with the same properties which were mentioned.
Args
word_ids:
vocab_names:
db:
Returns: numpy array representing mask of matching rows.'''
if 'name' not in vocab_names:
return np.array([])
else:
const = dict([(db.get_col_idx(vn), wid - self.word_vocabs_downlimit[vn]) for wid, vn in zip(word_ids, vocab_names) if vn in ['area', 'food', 'pricerange']])
return db.matching_rest_names(const)
def _row_all_prop_match(self, word_ids, vocab_names, db):
'''If a system response - word_ids contains an restaurant name, we know exact row which to output,
but we can also output any row with the same properties.
Args
word_ids:
vocab_names:
db:
Returns: (num_match_rows, ids of restaurant names determining the matching rows).'''
if 'name' not in vocab_names:
return np.array([])
else:
name_idx = vocab_names.index('name')
restaurant_idx = word_ids[name_idx] - self.word_vocabs_downlimit['name']
name_vocab_id = self.restaurant_name_vocab_id # id for name
restaurant_row = db.matching_rows({name_vocab_id: restaurant_idx})
assert len(restaurant_row) == 1, str(restaurant_row)
restaurant_row = restaurant_row[0]
col_idx = [db.get_col_idx(vn) for vn in ['area', 'food', 'pricerange']]
filter_col_val = dict([(c, restaurant_row[c]) for c in col_idx])
restaurants = db.matching_rest_names(filter_col_val)
return restaurants
def _extract_vocab_ids(self, target_words):
'''Heuristic how to recognize named entities from DB in sentence and
insert user their ids instead "regular words".'''
skip_words_of_entity = 0
target_ids, vocab_names = [], []
for i, w in enumerate(target_words):
if skip_words_of_entity > 0:
skip_words_of_entity -= 1
continue
w_found = False
for vocab_name, vocab in self.target_vocabs.items():
if vocab_name == 'words':
continue
for ent in vocab.words:
e = ent.strip().split()
if w == e[0] and target_words[i:i + len(e)] == e:
logger.debug('found an entity "%s" from column %s in target_words %s', ent, vocab_name, target_words)
skip_words_of_entity = len(e) - 1
w_id = self.get_target_surface_id(vocab_name, vocab, ent)
if self.row_targets:
if vocab_name == 'name':
target_ids.append(w_id)
vocab_names.append(vocab_name)
else:
logger.debug('Reporting just restaurants names')
else:
target_ids.append(w_id)
vocab_names.append(vocab_name)
w_found = True
break
if w_found:
break
if not w_found:
logger.debug('Target word | |
Set to zero to find representative profile on all resource, not
just included.
n_profiles : int
Number of representative profiles to output.
rerank : bool
Flag to rerank representative generation profiles after removing
excluded generation pixels.
cluster_kwargs : dict
RPMClusters kwargs
max_workers : int, optional
Number of parallel workers. 1 will run serial, None will use all
available., by default None
trg_bins : str | list | None
TRG bins as an ordered list of bin edge values or string to a csv
containing a single column with bin edge values. None will ignore
trgs.
trg_dset : str
Dataset associated with TRG bins that can be found in the cf_fpath
file.
pre_extract_inclusions : bool
Flag to pre-extract the inclusion mask using excl_fpath and
excl_dict. This is advantageous if the excl_dict is highly complex
and if you're processing a lot of points. Default is False.
"""
logger.info('Initializing RPM output processing...')
self._clusters = self._parse_cluster_arg(rpm_clusters)
self._excl_fpath = excl_fpath
self._excl_dict = excl_dict
self._techmap_dset = techmap_dset
self._cf_fpath = cf_fpath
self.excl_area = excl_area
self.include_threshold = include_threshold
self.n_profiles = n_profiles
self.rerank = rerank
if self.excl_area is None and self._excl_fpath is not None:
with ExclusionLayers(self._excl_fpath) as excl:
self.excl_area = excl.pixel_area
if max_workers is None:
max_workers = os.cpu_count()
self.max_workers = max_workers
if cluster_kwargs is None:
self.cluster_kwargs = {}
else:
self.cluster_kwargs = cluster_kwargs
self.trg_bins = trg_bins
self.trg_dset = trg_dset
self.trg_labels = None
if isinstance(self.trg_bins, str):
self.trg_bins = pd.read_csv(self.trg_bins)
msg = 'trg csv can only have one column'
assert len(self.trg_bins.columns.values) == 1, msg
col = self.trg_bins.columns.values[0]
self.trg_bins = self.trg_bins[col].values.tolist()
if self.trg_bins is not None:
# bins must be in monotonic ascending order for pd.cut but labels
# should be ordered however the input is received
self.trg_labels = [i + 1 for i in range(len(self.trg_bins) - 1)]
incr = (np.diff(self.trg_bins) > 0).all()
if not incr:
self.trg_bins = self.trg_bins[::-1]
self.trg_labels.reverse()
self._excl_lat = None
self._excl_lon = None
self._full_lat_slice = None
self._full_lon_slice = None
self._init_lat_lon()
self._inclusion_mask = None
self._techmap_data = None
if pre_extract_inclusions:
logger.info('Pre-extracting exclusions mask, '
'this could take a while...')
with ExclusionMaskFromDict(self._excl_fpath) as excl:
self._techmap_data = excl.excl_h5[self._techmap_dset]
self._techmap_data = self._techmap_data.astype(np.int32)
self._inclusion_mask = \
ExclusionMaskFromDict.extract_inclusion_mask(
self._excl_fpath, self._techmap_dset,
excl_dict=self._excl_dict)
@classmethod
def _parse_cluster_arg(cls, rpm_clusters):
"""Parse dataframe from cluster input arg.
Parameters
----------
rpm_clusters : pd.DataFrame | str
Single DataFrame with (gid, gen_gid, cluster_id, rank),
or str to file.
Returns
-------
clusters : pd.DataFrame
Single DataFrame with (gid, gen_gid, cluster_id, rank,
latitude, longitude)
"""
clusters = None
if isinstance(rpm_clusters, pd.DataFrame):
clusters = rpm_clusters
elif isinstance(rpm_clusters, str):
if rpm_clusters.endswith('.csv'):
clusters = pd.read_csv(rpm_clusters)
elif rpm_clusters.endswith('.json'):
clusters = pd.read_json(rpm_clusters)
if clusters is None:
raise RPMTypeError('Expected a DataFrame or str but received {}'
.format(type(rpm_clusters)))
cls._check_cluster_cols(clusters)
return clusters
@staticmethod
def _check_cluster_cols(df, required=('gen_gid', 'gid', 'latitude',
'longitude', 'cluster_id', 'rank')):
"""Check for required columns in the rpm cluster dataframe.
Parameters
----------
df : pd.DataFrame
Single DataFrame with columns to check
"""
missing = []
for c in required:
if c not in df:
missing.append(c)
if any(missing):
raise RPMRuntimeError('Missing the following columns in RPM '
'clusters input df: {}'.format(missing))
def _init_lat_lon(self):
"""Initialize the lat/lon arrays and reduce their size."""
if self._excl_fpath is not None:
self._full_lat_slice, self._full_lon_slice = \
self._get_lat_lon_slices(cluster_id=None)
logger.debug('Initial lat/lon shape is {} and {} and '
'range is {} - {} and {} - {}'
.format(self.excl_lat.shape, self.excl_lon.shape,
self.excl_lat.min(), self._excl_lat.max(),
self.excl_lon.min(), self._excl_lon.max()))
self._excl_lat = self._excl_lat[self._full_lat_slice,
self._full_lon_slice]
self._excl_lon = self._excl_lon[self._full_lat_slice,
self._full_lon_slice]
logger.debug('Reduced lat/lon shape is {} and {} and '
'range is {} - {} and {} - {}'
.format(self.excl_lat.shape, self.excl_lon.shape,
self.excl_lat.min(), self._excl_lat.max(),
self.excl_lon.min(), self._excl_lon.max()))
@staticmethod
def _get_tm_data(excl, techmap_dset, lat_slice, lon_slice):
"""Get the techmap data.
Parameters
----------
excl : ExclusionMask | ExclusionMaskFromDict
Pre-initialized exclusions mask object.
techmap_dset : str
Dataset name in the exclusions file containing the
exclusions-to-resource mapping data.
lat_slice : slice
The latitude (row) slice to extract from the exclusions or
techmap 2D datasets.
lon_slice : slice
The longitude (col) slice to extract from the exclusions or
techmap 2D datasets.
Returns
-------
techmap : np.ndarray
Techmap data mapping exclusions grid to resource gid (flattened).
"""
if isinstance(excl, (ExclusionMask, ExclusionMaskFromDict)):
techmap = excl.excl_h5[techmap_dset, lat_slice, lon_slice]
else:
e = 'Cannot recognize exclusion type: {}'.format(type(excl))
logger.error(e)
raise TypeError(e)
techmap = techmap.astype(np.int32).flatten()
return techmap
@staticmethod
def _get_incl_mask(excl, lat_slice, lon_slice):
"""Get the exclusions data from a geotiff file.
Parameters
----------
excl : ExclusionMask | ExclusionMaskFromDict
Pre-initialized exclusions mask object.
lat_slice : slice
The latitude (row) slice to extract from the exclusions or
techmap 2D datasets.
lon_slice : slice
The longitude (col) slice to extract from the exclusions or
techmap 2D datasets.
Returns
-------
incl_data : np.ndarray
Inclusions data mask flattened and normalized
from 0 to 1 (1 is incld).
"""
if isinstance(excl, (ExclusionMask, ExclusionMaskFromDict)):
incl_data = excl[lat_slice, lon_slice]
else:
e = 'Cannot recognize exclusion type: {}'.format(type(excl))
logger.error(e)
raise TypeError(e)
# infer exclusions that are scaled percentages from 0 to 100
if incl_data.max() > 1:
incl_data = incl_data.astype(np.float32)
incl_data /= 100
return incl_data.flatten()
def _get_lat_lon_slices(self, cluster_id=None, margin=0.1):
"""Get the slice args to locate exclusion/techmap data of interest.
Parameters
----------
cluster_id : str | None
Single cluster ID of interest or None for full region.
margin : float
Extra margin around the cluster lat/lon box.
Returns
-------
lat_slice : slice
The latitude (row) slice to extract from the exclusions or
techmap 2D datasets.
lon_slice : slice
The longitude (col) slice to extract from the exclusions or
techmap 2D datasets.
"""
box = self._get_coord_box(cluster_id)
mask = ((self.excl_lat > np.min(box['latitude']) - margin)
& (self.excl_lat < np.max(box['latitude']) + margin)
& (self.excl_lon > np.min(box['longitude']) - margin)
& (self.excl_lon < np.max(box['longitude']) + margin))
if not mask.any():
msg = ('Lat Lon box retrieval failed for cluster "{}". The '
'exclusion lat min/max is {:.2f}/{:.2f} and lon min/max '
'is {:.2f}/{:.2f} while the cluster box is: {}'
.format(cluster_id,
self.excl_lat.min(), self.excl_lat.max(),
self.excl_lon.min(), self.excl_lon.max(),
box))
logger.error(msg)
raise RPMRuntimeError(msg)
lat_locs, lon_locs = np.where(mask)
if self._full_lat_slice is None and self._full_lon_slice is None:
lat_slice = slice(np.min(lat_locs), 1 + np.max(lat_locs))
lon_slice = slice(np.min(lon_locs), 1 + np.max(lon_locs))
else:
lat_slice = slice(
self._full_lat_slice.start + np.min(lat_locs),
1 + self._full_lat_slice.start + np.max(lat_locs))
lon_slice = slice(
self._full_lon_slice.start + np.min(lon_locs),
1 + self._full_lon_slice.start + np.max(lon_locs))
return lat_slice, lon_slice
def _get_all_lat_lon_slices(self, margin=0.1, free_mem=True):
"""Get the slice args for all clusters.
Parameters
----------
margin : float
Extra margin around the cluster lat/lon box.
free_mem : bool
Flag to free lat/lon arrays from memory to clear space for later
exclusion processing.
Returns
-------
slices : dict
Dictionary of tuples - (lat_slice, lon_slice) slices
keyed by cluster id.
"""
slices = {}
for cid in self._clusters['cluster_id'].unique():
slices[cid] = self._get_lat_lon_slices(cluster_id=cid,
margin=margin)
if free_mem:
# free up memory
self._excl_lat = None
self._excl_lon = None
self._full_lat_slice = None
self._full_lon_slice = None
return slices
def _get_coord_box(self, cluster_id=None):
"""Get the RPM cluster latitude/longitude range.
Parameters
----------
cluster_id : str | None
Single cluster ID of interest or None for all clusters in
self._clusters.
Returns
-------
coord_box : dict
Bounding box of the cluster or region:
{'latitude': (lat_min, lat_max),
'longitude': (lon_min, lon_max)}
"""
if cluster_id is not None:
mask = (self._clusters['cluster_id'] == cluster_id)
else:
mask = len(self._clusters) * [True]
lat_range = (self._clusters.loc[mask, 'latitude'].min(),
self._clusters.loc[mask, 'latitude'].max())
lon_range = (self._clusters.loc[mask, 'longitude'].min(),
self._clusters.loc[mask, 'longitude'].max())
box = {'latitude': lat_range, 'longitude': lon_range}
return box
@property
def excl_lat(self):
"""Get the full 2D array of latitudes of the exclusion grid.
Returns
-------
_excl_lat : np.ndarray
2D array representing the latitudes at each exclusion grid cell
"""
if self._excl_lat is None and self._excl_fpath is not None:
with Outputs(self._excl_fpath) as f:
logger.debug('Importing Latitude data from techmap...')
self._excl_lat = f['latitude']
return self._excl_lat
@property
def excl_lon(self):
"""Get the full 2D array of longitudes of the exclusion grid.
Returns
-------
_excl_lon : np.ndarray
2D array representing the latitudes at each exclusion grid cell
"""
if self._excl_lon is None and self._excl_fpath is not None:
with Outputs(self._excl_fpath) as f:
logger.debug('Importing Longitude data from techmap...')
self._excl_lon = f['longitude']
return self._excl_lon
@classmethod
def _single_excl(cls, cluster_id, clusters, excl_fpath, excl_dict,
techmap_dset, lat_slice, lon_slice, techmap_subset=None,
incl_mask_subset=None):
"""Calculate the exclusions for each resource GID in a cluster.
Parameters
----------
cluster_id | |
self.control)
self.ddLuu_path_fn = casadi.Function('ddLuu_path',
[self.state, self.control, costate, v_path, w_path, self.auxvar],
[self.ddLuu_path])
self.ddLue_path = jacobian(self.dLu_path, self.auxvar)
self.ddLue_path_fn = casadi.Function('ddHue_path',
[self.state, self.control, costate, v_path, w_path, self.auxvar],
[self.ddLue_path])
# Define the final Hamiltonian-Lagrangian function (final_H)
v_final = casadi.SX.sym('v_final', self.n_final_inequ_cstr)
w_final = casadi.SX.sym('nu_final', self.n_final_equ_cstr)
# specifically handle the none inequality and equality cases
self.L_final = self.final_cost
if self.final_inequ_cstr is not None:
self.L_final = self.L_final + dot(self.final_inequ_cstr, v_final)
if self.final_equ_cstr is not None:
self.L_final = self.L_final + dot(self.final_equ_cstr, w_final)
# First-order derivative of final Hamiltonian-Lagrangian
self.dLx_final = jacobian(self.L_final, self.state).T
self.dLx_final_fn = casadi.Function('dLx_final',
[self.state, v_final, w_final, self.auxvar],
[self.dLx_final])
# Second order differential of final Hamiltonian-Lagrangian
self.ddLxx_final = jacobian(self.dLx_final, self.state)
self.ddLxx_final_fn = casadi.Function('ddLxx_final', [self.state, v_final, w_final, self.auxvar],
[self.ddLxx_final])
self.ddLxe_final = jacobian(self.dLx_final, self.auxvar)
self.ddLxe_final_fn = casadi.Function('ddLxe_final', [self.state, v_final, w_final, self.auxvar],
[self.ddLxe_final])
# differentiate the path equality constraint if exist
if self.path_equ_cstr is not None:
self.dHx_path = jacobian(self.path_equ_cstr, self.state)
self.dHx_path_fn = Function('dHx_path_fn', [self.state, self.control, self.auxvar],
[self.dHx_path])
self.dHu_path = jacobian(self.path_equ_cstr, self.control)
self.dHu_path_fn = Function('dHu_path_fn', [self.state, self.control, self.auxvar],
[self.dHu_path])
self.dHe_path = jacobian(self.path_equ_cstr, self.auxvar)
self.dHe_path_fn = Function('dHe_path_fn', [self.state, self.control, self.auxvar],
[self.dHe_path])
# differentiate the final equality constraint if exist
if self.final_equ_cstr is not None:
self.dHx_final = jacobian(self.final_equ_cstr, self.state)
self.dHx_final_fn = Function('dHx_final_fn', [self.state, self.auxvar],
[self.dHx_final])
self.dHe_final = jacobian(self.final_equ_cstr, self.auxvar)
self.dHe_final_fn = Function('dHe_final_fn', [self.state, self.auxvar],
[self.dHe_final])
# differentiate the path inequality constraint if exist
if self.path_inequ_cstr is not None:
self.dGx_path = jacobian(self.path_inequ_cstr, self.state)
self.dGx_path_fn = Function('dGx_path_fn', [self.state, self.control, self.auxvar],
[self.dGx_path])
self.dGu_path = jacobian(self.path_inequ_cstr, self.control)
self.dGu_path_fn = Function('dGu_path_fn', [self.state, self.control, self.auxvar],
[self.dGu_path])
self.dGe_path = jacobian(self.path_inequ_cstr, self.auxvar)
self.dGe_path_fn = Function('dGe_path_fn', [self.state, self.control, self.auxvar],
[self.dGe_path])
# differentiate the final inequality constraint if exist
if self.final_inequ_cstr is not None:
self.dGx_final = jacobian(self.final_inequ_cstr, self.state)
self.dGx_final_fn = Function('dHx_final_fn', [self.state, self.auxvar],
[self.dGx_final])
self.dGe_final = jacobian(self.final_inequ_cstr, self.auxvar)
self.dGe_final_fn = Function('dHe_final_fn', [self.state, self.auxvar],
[self.dGe_final])
# differentiate the initial condition if parameterized
if self.init_condition is not None:
self.dX0 = jacobian(self.init_condition, self.state)
self.dx0_fn = Function('dx0_fn', [self.auxvar], [self.dX0])
else:
self.dX0_fn = Function('dx0_fn', [self.auxvar], [SX.zeros(self.n_state, self.n_auxvar)])
# get the auxiliary control system (here the threshold is to determine the active inequality constraints
def getAuxSys(self, opt_sol, threshold=1e-2):
# parse the optimal solution argument opt_sol
state_traj_opt = opt_sol['state_traj_opt']
control_traj_opt = opt_sol['control_traj_opt']
costate_traj = opt_sol['costate_traj_opt']
auxvar_value = opt_sol['auxvar_value']
v_path = opt_sol['v_path']
g_path = opt_sol['inequ_path']
v_final = opt_sol['v_final']
g_final = opt_sol['inequ_final']
w_path = opt_sol['w_path']
w_final = opt_sol['w_final']
# in case of not differentiating the PMP
if not hasattr(self, 'dLx_path'):
self.diffCPMP()
# Initialize the coefficient matrices of the auxiliary control system: note that all the notations used here are
# consistent with the notations defined in the constraint PDP paper.
dynFx_t, dynFu_t, dynFe_t = [], [], []
Lxx_t, Lxu_t, Lxe_t, Lux_t, Luu_t, Lue_t = [], [], [], [], [], []
GbarHx_t, GbarHu_t, GbarHe_t = [], [], [] # this is the concatenated matrix G_bar_t and H_t in the constraint PDP paper
GbarHx_T, GbarHe_T, = [], [] # this is the concatenated matrix G_bar_T and H_T in the constraint PDP paper
horizon = numpy.size(control_traj_opt, 0)
for t in range(horizon):
curr_x = state_traj_opt[t, :]
curr_u = control_traj_opt[t, :]
next_lambda = costate_traj[t, :]
curr_v = v_path[t, :]
curr_w = w_path[t, :]
curr_g = g_path[t, :]
dynFx_t += [self.dfx_fn(curr_x, curr_u, auxvar_value).full()]
dynFu_t += [self.dfu_fn(curr_x, curr_u, auxvar_value).full()]
dynFe_t += [self.dfe_fn(curr_x, curr_u, auxvar_value).full()]
Lxx_t += [self.ddLxx_path_fn(curr_x, curr_u, next_lambda, curr_v, curr_w, auxvar_value).full()]
Lxu_t += [self.ddLxu_path_fn(curr_x, curr_u, next_lambda, curr_v, curr_w, auxvar_value).full()]
Lxe_t += [self.ddLxe_path_fn(curr_x, curr_u, next_lambda, curr_v, curr_w, auxvar_value).full()]
Lux_t += [self.ddLux_path_fn(curr_x, curr_u, next_lambda, curr_v, curr_w, auxvar_value).full()]
Luu_t += [self.ddLuu_path_fn(curr_x, curr_u, next_lambda, curr_v, curr_w, auxvar_value).full()]
Lue_t += [self.ddLue_path_fn(curr_x, curr_u, next_lambda, curr_v, curr_w, auxvar_value).full()]
# generate the G_bar_t and H_t, where B_bar_t is identified using the threshold
if self.path_inequ_cstr is not None:
Gbarx_t = self.dGx_path_fn(curr_x, curr_u, auxvar_value).full()[(curr_g > -threshold)]
Gbaru_t = self.dGu_path_fn(curr_x, curr_u, auxvar_value).full()[(curr_g > -threshold)]
Gbare_t = self.dGe_path_fn(curr_x, curr_u, auxvar_value).full()[(curr_g > -threshold)]
else:
Gbarx_t = np.empty((0, self.n_state))
Gbaru_t = np.empty((0, self.n_control))
Gbare_t = np.empty((0, self.n_auxvar))
if self.path_equ_cstr is not None:
Hx_t = self.dHx_path_fn(curr_x, curr_u, auxvar_value).full()
Hu_t = self.dHu_path_fn(curr_x, curr_u, auxvar_value).full()
He_t = self.dHe_path_fn(curr_x, curr_u, auxvar_value).full()
else:
Hx_t = np.empty((0, self.n_state))
Hu_t = np.empty((0, self.n_control))
He_t = np.empty((0, self.n_auxvar))
GbarHx_t += [np.vstack((Gbarx_t, Hx_t))]
GbarHu_t += [np.vstack((Gbaru_t, Hu_t))]
GbarHe_t += [np.vstack((Gbare_t, He_t))]
# handle the final cost, inequality, equality constraints
Lxx_T = [self.ddLxx_final_fn(state_traj_opt[-1, :], v_final, w_final, auxvar_value).full()]
Lxe_T = [self.ddLxe_final_fn(state_traj_opt[-1, :], v_final, w_final, auxvar_value).full()]
if self.final_inequ_cstr is not None:
Gbarx_T = self.dGx_final_fn(state_traj_opt[-1, :], auxvar_value).full()[(v_final > -threshold)]
Gbare_T = self.dGe_final_fn(state_traj_opt[-1, :], auxvar_value).full()[(g_final > -threshold)]
else:
Gbarx_T = np.empty((0, self.n_state))
Gbare_T = np.empty((0, self.n_auxvar))
if self.final_equ_cstr is not None:
Hx_T = self.dHx_final_fn(state_traj_opt[-1, :], auxvar_value).full()
He_T = self.dHe_final_fn(state_traj_opt[-1, :], auxvar_value).full()
else:
Hx_T = np.empty((0, self.n_state))
He_T = np.empty((0, self.n_auxvar))
GbarHx_T += [np.vstack((Gbarx_T, Hx_T))]
GbarHe_T += [np.vstack((Gbare_T, He_T))]
# print(GbarHx_T, GbarHe_T)
# return the axuliary control system
X0 = self.dX0_fn(auxvar_value).full()
auxSys = {"dynFx_t": dynFx_t,
"dynFu_t": dynFu_t,
"dynFe_t": dynFe_t,
"Lxx_t": Lxx_t,
"Lxu_t": Lxu_t,
"Lxe_t": Lxe_t,
"Lux_t": Lux_t,
"Luu_t": Luu_t,
"Lue_t": Lue_t,
"Lxx_T": Lxx_T,
"Lxe_T": Lxe_T,
"GbarHx_t": GbarHx_t,
"GbarHu_t": GbarHu_t,
"GbarHe_t": GbarHe_t,
"GbarHx_T": GbarHx_T,
"GbarHe_T": GbarHe_T,
"X0": X0,
"horizon": horizon
}
return auxSys
'''
The following it to solve a constrained optimal control by converting to an unconstrained optimal control then
solve it using PDP.
'''
# This function is to convert a constrained optimal control system into an unconstrained optimal control then
# using PDP
def convert2BarrierOC(self, gamma=1e-2):
# in case of not differentiating the PMP
if not hasattr(self, 'dLx_path'):
self.diffCPMP()
if not hasattr(self, 'path_equ_cstr'):
self.setPathEquCstr()
if not hasattr(self, 'path_inequ_cstr'):
self.setPathInequCstr()
if not hasattr(self, 'final_inequ_cstr'):
self.setFinalInequCstr()
if not hasattr(self, 'final_equ_cstr'):
self.setFinalEquCstr()
self.barrier_oc = PDP.OCSys()
self.barrier_oc.setAuxvarVariable(self.auxvar)
self.barrier_oc.setStateVariable(self.state)
self.barrier_oc.setControlVariable(self.control)
self.barrier_oc.setDyn(self.dyn)
# natural log barrier for the inequality path constraints
path_inequ_barrier = 0
if self.n_path_inequ_cstr == 1:
path_inequ_barrier += -log(-self.path_inequ_cstr)
else:
for k in range(self.n_path_inequ_cstr):
path_inequ_barrier += -log(-self.path_inequ_cstr[k])
# second-order barrier for the equality path constraints
path_equ_barrier = 0
if self.n_path_equ_cstr == 1:
path_equ_barrier += (self.path_inequ_cstr) ** 2
else:
for k in range(self.n_path_equ_cstr):
path_equ_barrier += (self.path_inequ_cstr[k]) ** 2
# overall cost plus the barrier in path
path_costbarrier = self.path_cost + gamma * path_inequ_barrier + 0.5 / gamma * path_equ_barrier
self.barrier_oc.setPathCost(path_costbarrier)
# natural log barrier for the inequality final constraints
final_inequ_barrier = 0
if self.n_final_inequ_cstr == 1:
final_inequ_barrier += -log(-self.final_inequ_cstr)
else:
for k in range(self.n_final_inequ_cstr):
final_inequ_barrier += -log(-self.final_inequ_cstr[k])
# second-order barrier for the equality final constraints
final_equ_barrier = 0
if self.n_final_equ_cstr == 1:
final_equ_barrier += (self.final_equ_cstr) ** 2
else:
for k in range(self.n_final_equ_cstr):
final_equ_barrier += (self.final_equ_cstr[k]) ** 2
# overall cost plus the barrier at final
final_costbarrier = self.final_cost + gamma * final_inequ_barrier + 0.5 / gamma * final_equ_barrier
self.barrier_oc.setFinalCost(final_costbarrier)
# differentiating PDP for the barrier optimal control
self.barrier_oc.diffPMP()
# create the equality constraints lqr solver object
self.lqr_solver_barrierOC = PDP.LQR()
# compute the unconstrained optimal control using PDP
def solveBarrierOC(self, horizon, init_state=None, auxvar_value=1):
if init_state is None:
init_state = self.init_condition_fn(auxvar_value).full().flatten().tolist()
else:
init_state = casadi.DM(init_state).full().flatten().tolist()
opt_sol = self.barrier_oc.ocSolver(ini_state=init_state, horizon=horizon, auxvar_value=auxvar_value)
return opt_sol
# generate the auxiliary control system using the optimal trajectory
def auxSysBarrierOC(self, opt_sol):
horizon = numpy.size(opt_sol['control_traj_opt'], 0)
auxsys_barrierOC = self.barrier_oc.getAuxSys(state_traj_opt=opt_sol['state_traj_opt'],
control_traj_opt=opt_sol['control_traj_opt'],
costate_traj_opt=opt_sol['costate_traj_opt'],
auxvar_value=opt_sol['auxvar_value'])
self.lqr_solver_barrierOC.setDyn(dynF=auxsys_barrierOC['dynF'], dynG=auxsys_barrierOC['dynG'],
dynE=auxsys_barrierOC['dynE'])
self.lqr_solver_barrierOC.setPathCost(Hxx=auxsys_barrierOC['Hxx'], Huu=auxsys_barrierOC['Huu'],
Hxu=auxsys_barrierOC['Hxu'], Hux=auxsys_barrierOC['Hux'],
Hxe=auxsys_barrierOC['Hxe'], Hue=auxsys_barrierOC['Hue'])
self.lqr_solver_barrierOC.setFinalCost(hxx=auxsys_barrierOC['hxx'], hxe=auxsys_barrierOC['hxe'])
X0 = self.dX0_fn(opt_sol['auxvar_value']).full()
aux_sol = self.lqr_solver_barrierOC.lqrSolver(X0, horizon)
return aux_sol
# This equality constraint LQR solver is mainly based on the paper
# Efficient Computation of Feedback Control for Equality-Constrained LQR by <NAME>.
class EQCLQR:
def __init__(self, project_name='my constraint lqr solver'):
self.project_name = project_name
self.threshold = 1e-5 # this threshold is used to detect the rank of the matrix
def setDyn(self, dynFx_t, dynFu_t, dynFe_t):
self.dynFx_t = dynFx_t
self.dynFu_t = dynFu_t
self.dynFe_t = dynFe_t
self.n_state = dynFx_t[0].shape[1]
self.n_control = dynFu_t[0].shape[1]
def setPathCost(self, Lxx_t, Lxu_t, Lxe_t, Lux_t, Luu_t, Lue_t):
self.Lxx_t = Lxx_t
self.Lxu_t = Lxu_t
self.Lxe_t = Lxe_t
self.Lux_t = Lux_t
self.Luu_t = Luu_t
self.Lue_t = Lue_t
def setFinalCost(self, Lxx_T, Lxe_T):
self.Lxx_T = Lxx_T
self.Lxe_T = Lxe_T
def setPathConstraints(self, Gx_t, Gu_t, Ge_t):
self.Gx_t = Gx_t
self.Gu_t = Gu_t
self.Ge_t = Ge_t
def setFinalConstraints(self, Gx_T, Ge_T):
self.Gx_T = Gx_T
self.Ge_T = Ge_T
def eqctlqrSolver(self, init_state=None, horizon=None, threshold=None):
if init_state is None:
init_state = self.init_state
if horizon is None:
horizon = self.horizon
if | |
#!/usr/local/bin/python3
# -*- coding: UTF-8 -*-
import os
import re
import sys
import time
import subprocess
import multiprocessing
import git
import typing
import itertools
import argparse
import string
import math
import tty
import ctypes
import termios
import xml.etree.ElementTree as ET
COPYRIGHT = b"""/**
* Copyright 2022 <NAME>(<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/\n\n"""
# tqdm has bugs.
# CSI sequence: https://www.liquisearch.com/ansi_escape_code/csi_codes
class ProgressBar:
"""
Progress Bar
...
Attributes
----------
disable: bool
True to disable progress bar (default False)
position: tuple(int, int)
the number of legs the animal has (default 4)
progress: float[0.0, 100.0]
percentage of progress
max_width: int
max width of entire progress bar (default terminal.columns)
bar_width: int
bar width of progress bar (default max_width // 2)
Methods
-------
advance(progress)
Inc progress and refresh display
set_progress(progress)
Set progress and refresh display
set_args(**kwargs)
Set arguments and refresh display
add_args(**kwargs)
Add arguments and refresh display
clear()
Clear display once time
"""
def __init__(
self,
format: str = "{progress} {elapsed} | {bar} | {remaining}",
position: tuple[int, int] = None,
disable: bool = False,
leave: bool = True,
total: float = 100.0,
max_width: int = 0,
bar_width: int = 0,
file=sys.stdout,
keep_cursor_hidden: bool = False,
manager: multiprocessing.Manager = None,
**kwargs,
):
"""
format: progress bar format, internal keys: progress, elapsed, bar, remaining
position: position of bar
disable: True to hide the progress bar
leave: True to leave the progress bar after exit
total: max size to advance
max_width: max width of entire progress bar
bar_width: bar width
file: destination of progress bar (default sys.stdout)
keep_cursor_hidden: True to keep cursor hidden after exit
manager: multiprocessing support
kwargs: custom key-value pairs
"""
self.__file = file
self.__leave = leave
self.__total = total
self.__format = format
self.__kwargs = kwargs
self.__starttime = time.time()
self.__keep_cursor_hidden = keep_cursor_hidden
self.__lock = None if manager is None else manager.Lock()
self.__progress = 0.0 if manager is None else manager.Value(ctypes.c_float, 0.0)
self.disable = disable
self.position = position
self.max_width = max_width if max_width else os.get_terminal_size().columns - 8
self.bar_width = bar_width if bar_width != 0 and bar_width < self.max_width else self.max_width // 2
self.__display() # CSI?25l: Hides the cursor
def __del__(self):
if not self.disable:
outputs = ""
if self.position:
# `CSI n ; m H`: Moves the cursor to row n, column m
outputs += f"\x1B[{self.position[0]};{self.position[1]}H"
if not self.__leave:
# `CSI n k`: Erases part of the line
outputs += "\x1B[2K" # n=2, clear entire line
else:
outputs += "\x1B[E"
if not self.__keep_cursor_hidden:
# `CSI ? 25 h``: Shows the cursor
outputs += "\x1B[?25h"
self.__write(outputs + "\r")
def clear(self):
"""
clear display once time
"""
if not self.disable:
outputs = ""
if self.position:
# `CSI n ; m H`: Moves the cursor to row n, column m
outputs += f"\x1B[{self.position[0]};{self.position[1]}H"
outputs += "\x1B[2K\r" # n=2, clear entire line
self.__write(outputs)
def advance(self, step):
"""
advance progress by <100.0 * step / total>
"""
if self.__lock:
self.__lock.acquire()
last_progress = self.progress
if last_progress != self.__inc_progress(100.0 * step / self.__total):
self.__display()
if self.__lock:
self.__lock.release()
@property
def progress(self):
"""
get progress
"""
if isinstance(self.__progress, float):
return self.__progress
else:
return self.__progress.value
@progress.setter
def progress(self, val):
self.set_progress(val)
def __set_progress(self, progress):
if isinstance(self.__progress, float):
self.__progress = min(progress, 100.0)
return self.__progress
else:
self.__progress.value = min(progress, 100.0)
return self.__progress.value
def __inc_progress(self, progress):
if isinstance(self.__progress, float):
self.__progress = min(self.__progress + progress, 100.0)
return self.__progress
else:
self.__progress.value = min(self.__progress.value + progress, 100.0)
return self.__progress.value
def set_progress(self, progress):
"""
set progress to <progress>
"""
if self.__lock:
self.__lock.acquire()
if self.progress != self.__set_progress(progress):
self.__display()
if self.__lock:
self.__lock.release()
def set_args(self, **kwargs):
if kwargs != self.__kwargs:
self.__kwargs = kwargs
self.__display()
def add_args(self, **kwargs):
self.__kwargs.update(kwargs)
self.__display()
def __display(self):
def format_time(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if mins == 0 and t > 0.1 and s == 0:
s = 1
if h:
return "{0:d}:{1:02d}:{2:02d}".format(h, m, s)
else:
return "{0:02d}:{1:02d}".format(m, s)
def format_bar(p, w):
l = int(w * p / 100.0) if p < 99.9 else w
return ">" * l + " " * (w - l)
def format_all(format, **kwargs):
class BlankFormatter(string.Formatter):
def __init__(self, default=""):
self.default = default
def get_value(self, key, args, kwds):
if isinstance(key, str):
return kwds.get(key, self.default)
else:
return string.Formatter.get_value(key, args, kwds)
text = BlankFormatter().format(format, **kwargs).rstrip()
while text.endswith(":"):
text = text[: len(text) - 1].rstrip()
return text
if not self.disable:
now = time.time()
self.__kwargs["progress"] = "{0:5.1f}%".format(self.progress)
self.__kwargs["bar"] = format_bar(self.progress, self.bar_width)
self.__kwargs["elapsed"] = format_time(now - self.__starttime)
self.__kwargs["remaining"] = (
format_time((now - self.__starttime) * (100 - self.progress) / self.progress)
if self.progress
else "--:--"
)
outputs = "\x1B[?25l"
if self.position:
# `CSIn;mH`: Moves the cursor to row n, column m
outputs += f"\x1B[s\x1B[{self.position[0]};{self.position[1]}H"
else:
outputs += "\r"
line = format_all(self.__format, **self.__kwargs)
if self.max_width != 0:
if len(line) > self.max_width:
truncated = "..."
if self.max_width >= len(truncated):
line = line[: (self.max_width - len(truncated))] + truncated + "\x1b[0m"
else:
line = line[: self.max_width] + "\x1b[0m"
outputs += line + "\x1B[0K"
if self.position:
outputs += "\x1B[u"
self.__write(outputs)
def __write(self, str):
file = self.__file if self.__file else sys.stdout
file.write(str)
file.flush()
@staticmethod
def getpos():
buf = ""
stdin = sys.stdin.fileno()
tattr = termios.tcgetattr(stdin)
try:
tty.setcbreak(stdin, termios.TCSANOW)
sys.stdout.write("\x1b[6n")
sys.stdout.flush()
while True:
buf += sys.stdin.read(1)
if buf[-1] == "R":
break
finally:
termios.tcsetattr(stdin, termios.TCSANOW, tattr)
# reading the actual values, but what if a keystroke appears while reading
# from stdin? As dirty work around, getpos() returns if this fails: None
try:
matches = re.match(r"^\x1b\[(\d*);(\d*)R", buf)
groups = matches.groups()
except AttributeError:
return None
return (int(groups[0]), int(groups[1]))
class ProgressBars:
def __init__(
self,
format,
size,
max_width=os.get_terminal_size().columns,
bar_width=0,
disable=False,
leave=False,
initial_pos=None,
overall_total=0,
manager: multiprocessing.Manager = None,
):
offset = 0
if overall_total != 0:
self.overall = ProgressBar(
leave=True,
disable=disable,
total=overall_total,
file=None,
max_width=max_width,
bar_width=bar_width,
position=initial_pos,
keep_cursor_hidden=True,
manager=manager,
)
offset = 1
else:
self.overall = None
self.bars = [
ProgressBar(
format,
disable=disable,
file=None,
max_width=max_width,
bar_width=bar_width,
leave=True, # keep bar if not all bars end
position=((initial_pos[0] + i), initial_pos[1]) if initial_pos else None,
keep_cursor_hidden=True,
manager=manager,
)
for i in range(offset, size + offset)
]
self.__leave = leave
self.disable = disable
self.initial_pos = initial_pos
def __del__(self):
off = 0 if self.overall is None else 1
if self.__leave:
off += len(self.bars)
self.bars = []
else:
[bar.clear() for bar in self.bars]
if not self.disable:
outputs = ""
if self.initial_pos:
# `CSI n ; m H`: Moves the cursor to row n, column m
outputs += f"\x1B[2K\x1B[{self.initial_pos[0] + off};{self.initial_pos[1]}H"
outputs += "\x1B[?25h"
sys.stdout.write(outputs)
sys.stdout.flush()
def __getitem__(self, index):
return self.bars[index % len(self.bars)]
"""a replacement instance within a single file"""
class Replacement:
def __init__(self, offset, length, content, path=None):
self.offset = offset
self.length = length
self.content = content
self.path = path
def __hash__(self):
return hash((self.offset, self.length))
def __eq__(self, other):
return (self.offset, self.length) == (other.offset, other.length)
def __repr__(self):
return f"offset={self.offset} " f"length={self.length} content={self.content}"
def get_affected_commit(self, file, repo: git.Repo, blame_info, bar: ProgressBar):
def offset_to_line(file, offset):
file.seek(0)
content = file.read(offset)
return content.decode("utf-8", "ignore").count("\n") + 1
def map_of_line_commit(blame_info):
result = {}
current_line = 0
for one_blame in blame_info:
for one_line in one_blame[1]:
current_line += 1
result[current_line] = one_blame
return result
first_line_offset = offset_to_line(file, self.offset)
last_line_offset = offset_to_line(file, max(self.offset, self.offset + self.length - 1))
# fetch blame information for this replacement.
original_blames = []
line_commit_map = map_of_line_commit(blame_info)
for i in range(first_line_offset, last_line_offset + 1):
if i >= len(line_commit_map):
# apply blame of first line if out of range, the last line mostly
original_blames.append(line_commit_map[1][0])
continue
if line_commit_map[i][0] not in original_blames:
original_blames.append(line_commit_map[i][0])
if len(original_blames) > 1 and original_blames[0].author != original_blames[1].author:
bar.add_args(
desc=f"\x1B[33m{first_line_offset}-{last_line_offset} modified by {list(map(lambda b: str(b.author), original_blames))}. regard as modified by {original_blames[-1].author}\x1B[0m"
)
return original_blames[-1]
"""apply |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.