metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jfposton/Stars-CMS",
"score": 2
}
|
#### File: management/commands/checkmemberstatus.py
```python
from django.core.management.base import BaseCommand
from django.core.management import CommandError
from django.conf import settings
from django.db.models import Q
from django.contrib.auth.models import User
from cms.models import Member
from sys import stdin
import re
ncsu_email_regex = re.compile(r'(?P<unity_id>[a-zA-Z0-9._%+-]+)@ncsu\.edu')
name_regex = re.compile(r'^[-a-zA-z]+$')
class Command(BaseCommand):
args = '<file>'
help = 'Checks that all members listed in <file> are active/empty and that all others are inactive'
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('Missing <file> argument')
if args[0] == '-':
f = stdin
else:
try:
f = open(args[0], 'r')
except IOError as e:
raise CommandError('Unable to open file "%s": %s' % (args[0], e.message))
listed_members = []
for l in f.readlines():
l = l.strip()
# try to extract a Unity id
match = re.search(ncsu_email_regex, l)
if match:
user_id = match.groupdict()['unity_id']
# retrieve User using Unity id
try:
user = User.objects.get(username=user_id)
# found
listed_members += [ user.get_profile() ]
print 'found %s by user id %s' % (user.get_full_name(), user_id)
# and done
continue
except User.DoesNotExist:
# not found, try another method
pass
# try to extract a name
names = filter(lambda x: re.match(name_regex, x), l.split(' '))
# require at least first and last name
if len(names) >= 2:
# search by last name
try:
user = User.objects.get(last_name=names[-1])
# found with unique last name
listed_members += [ user.get_profile() ]
print 'found %s by unique last name %s' % (user.get_full_name(), names[-1])
# and done
continue
except User.MultipleObjectsReturned:
# check first name
candidates = filter(lambda x: names[0] in x.first_name, User.objects.filter(last_name=names[-1]))
if len(candidates) == 1:
# found with unique first and last name pair
user = candidates[0]
listed_members += [ user.get_profile() ]
print 'found %s by unique first and last name pair %s %s' % (user.get_full_name(), names[0], names[-1])
# and done
continue
else:
# not found, try another method
pass
except User.DoesNotExist:
# not found, try another method
pass
# give up and ask operator
user = None
while user is None:
user_id = raw_input('Which username belongs with %s?: ' % (l, ))
try:
user = User.objects.get(username=user_id)
# found with operator assistance
listed_members += [ user.get_profile() ]
print 'found %s with operator assistance' % (user.get_full_name(),)
# and done
break
except User.DoesNotExist:
print 'username %s not found' % (user_id,)
listed_pks = map(lambda x: x.pk, listed_members)
members_to_activate = Member.objects.filter(Q(pk__in=listed_pks) & (Q(status=Member.STATUS_ARCHIVED) | Q(user__is_active=False)))
members_to_archive = Member.objects.filter(~Q(pk__in=listed_pks) & (~Q(status=Member.STATUS_ARCHIVED) | Q(user__is_active=True)))
print '====== ACTIVATE ======'
for m in members_to_activate:
print m.user.get_full_name()
print '====== ARCHIVE ======='
for m in members_to_archive:
print m.user.get_full_name()
if len(members_to_activate) == 0 and len(members_to_archive) == 0:
print 'Nothing to do'
raise SystemExit
if raw_input('Does this look right? [y/N] ').lower() != 'y':
print 'Aborted'
raise SystemExit
for m in members_to_activate:
m.status = Member.STATUS_ACTIVE
m.save()
m.user.is_active = True
m.user.save()
for m in members_to_archive:
m.status = Member.STATUS_ARCHIVED
m.save()
m.user.is_active = False
m.user.save()
print 'Done'
```
#### File: Stars-CMS/cms/permissions.py
```python
from django.contrib.auth.models import User, Group
from cms.models import Member, Project, ProjectMember, BlogPost
from django.conf import settings
def is_user_slc_leader(user):
if user is None or user.is_anonymous():
return False
else:
return (user.get_full_name() in settings.SLC_LEADERS)
def can_user_create_project(user):
# only the SLC leader can create projects through the main interface
return is_user_slc_leader(user)
def can_user_edit_project(user, project):
# only project coordinators (active/empty projects) and the SLC leader
# can edit projects through the main interface
# editing of archived projects is disallowed except for SLC leader
if user.is_anonymous():
return False
if is_user_slc_leader(user):
return True
try:
return (project.status != Project.STATUS_ARCHIVED) and project.is_member_coordinator(user.get_profile())
except Member.DoesNotExist:
return False
def can_user_delete_project(user, project):
# only the SLC leader can delete projects through the main interface
# project status is not checked since the SLC leader user overrides
return is_user_slc_leader(user)
def can_user_demote_project_coordinators(user, project):
# only the SLC leader can demote project coordinators through the main interface
return is_user_slc_leader(user)
def can_user_create_member(user):
# only the SLC leader can create members through the main interface
return is_user_slc_leader(user)
def can_user_edit_member(user, member):
# only the user that owns a member profile and the SLC leader can perform edits
# through the main interface
# but only if that member is not archived for normal users (strictly not necessary since inactive user cannot log in)
return ((member.status != Member.STATUS_ARCHIVED) and (user == member.user)) or is_user_slc_leader(user)
def can_user_delete_member(user, member):
# only the SLC leader can delete members through the main interface
# member status is not checked since the SLC leader user overrides
return is_user_slc_leader(user)
def can_user_archive_member(user, member):
# only the SLC leader can delete members through the main interface
# member status is not checked since the SLC leader user overrides
return is_user_slc_leader(user)
def can_user_reactivate_member(user, member):
# only the SLC leader can reactivate members through the main interface
return is_user_slc_leader(user)
def can_user_post_as_member(user, member):
# only the user that owns a member profile can post to that member's blog
# but only if that member is not archived for normal users (strictly not necessary since inactive user cannot log in)
return ((member.status != Member.STATUS_ARCHIVED) and (user == member.user))
def can_user_edit_blogpost(user, blogpost):
# only blogpost authors and the SLC leader can edit blogposts
# through the main interface
# but only if that member is not archived for normal users (strictly not necessary since inactive user cannot log in)
return ((blogpost.author.status != Member.STATUS_ARCHIVED) and (user == blogpost.author.user)) or is_user_slc_leader(user)
def can_user_create_page(user):
# only the SLC leader can create pages through the main interface
return is_user_slc_leader(user)
def can_user_edit_page(user, page):
# only the SLC leader can edit pages through the main interface
return is_user_slc_leader(user)
def can_user_delete_page(user, page):
# only the SLC leader can delete pages through the main interface
return is_user_slc_leader(user)
def can_user_delete_sponsor(user):
# only the SLC leader can delete pages through the main interface
return is_user_slc_leader(user)
def can_user_create_sponsor(user):
# only the SLC leader can delete pages through the main interface
return is_user_slc_leader(user)
```
#### File: Stars-CMS/cms/signals.py
```python
from django.conf import settings
from django.dispatch import Signal, receiver
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save, post_save
from django.contrib.sites.models import Site
from cms.models import Member, ProjectMember, Project
create_profile = Signal(providing_args=['user', 'group', 'classification'])
assign_coordinators = Signal(providing_args=['project', 'members'])
# Create a member profile and a project member profile
def create_profile_handler(sender, **kwargs):
user = kwargs.get('user', None)
group = kwargs.get('group', None)
classification = kwargs.get('classification', None)
member = Member(user=user, group=group, status=Member.STATUS_EMPTY, classification=classification)
member.save()
context = {
'name': member.user.get_full_name(),
'url': Site.objects.get_current().domain + reverse('cms:activate_member_url', kwargs={'key': member.generate_hashed_email()})
}
subject = 'Welcome to STARS'
message = render_to_string('emails/new_member_email.txt', context)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])
# I don't know why project_member is returned here but it definitely isn't defined here
#return (member, project_member,)
return (member,)
# Create project member profiles and send each coordinator an email
def assign_coordinators_handler(sender, **kwargs):
members = kwargs.get('members', None)
project = kwargs.get('project', None)
for member in members:
project_member = ProjectMember(member=member, project=project, is_coordinator=True, role='Coordinator')
project_member.save()
context = {
'project_url': Site.objects.get_current().domain + reverse('cms:edit_project_url', kwargs={'pk': project.pk}),
'coordinator': project_member.member.user.get_full_name()
}
subject = 'Project created: %s' % project.title
message = render_to_string('emails/project_coordinator_email.txt', context)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [project_member.member.user.email])
return members
create_profile.connect(create_profile_handler)
assign_coordinators.connect(assign_coordinators_handler)
```
#### File: cms/templatetags/filters.py
```python
import re
import calendar
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from cms import permissions
register = template.Library()
@register.filter
@stringfilter
def stripjs(value):
stripped = re.compile(r'<script(?:\s[^>]*)?(>(?:.(?!/script>))*</script>|/>)', re.S).sub('', force_unicode(value))
return mark_safe(stripped)
@register.filter
def logged_in(user):
if user is None or user.id is None:
return False
return user.id != -1
@register.filter
def is_slc_leader(user):
return permissions.is_user_slc_leader(user)
@register.filter
def can_edit_project(user, project):
return permissions.can_user_edit_project(user, project)
@register.filter
def can_delete_project(user, project):
return permissions.can_user_delete_project(user, project)
@register.filter
def can_edit_member(user, member):
return permissions.can_user_edit_member(user, member)
@register.filter
def can_archive_member(user, member):
return permissions.can_user_archive_member(user, member)
@register.filter
def can_reactivate_member(user, member):
return permissions.can_user_reactivate_member(user, member)
@register.filter
def can_delete_member(user, member):
return permissions.can_user_delete_member(user, member)
@register.filter
def can_edit_blogpost(user, blogpost):
return permissions.can_user_edit_blogpost(user, blogpost)
@register.filter
def can_edit_page(user, page):
return permissions.can_user_edit_page(user, page)
@register.filter
def can_delete_page(user, page):
return permissions.can_user_delete_page(user, page)
@register.filter
def can_user_post_as_member(user, member):
return permissions.can_user_post_as_member(user, member)
@register.filter
def is_project_coordinator(member, project):
return project.is_member_coordinator(member)
# ref: http://stackoverflow.com/questions/7385751/how-to-display-month-name-by-number
@register.filter
def month_name(month_number):
return calendar.month_name[month_number]
@register.filter
def projects_by_year(member, year):
return member.project_set.filter(year=year)
@register.filter
def field(object, fieldname):
return getattr(object, fieldname)
@register.filter
def can_delete_sponsor(user):
return permissions.can_user_delete_sponsor(user)
```
|
{
"source": "jfpowell/skift",
"score": 3
}
|
#### File: skift/toolbox/ipc-compiler.py
```python
import sys
import os
import pprint
from utils.Lexer import Lexer
from utils.Generator import Generator
from ipc import Parser
from ipc import Emit
pp = pprint.PrettyPrinter(indent=4)
def read_file_to_string(path: str) -> str:
with open(path, 'r') as file:
return file.read()
return ""
lexer = Lexer(read_file_to_string(sys.argv[1]))
prot = Parser.protocol(lexer)
# pp.pprint(prot)
gen = Generator()
Emit.protocol(gen, prot["properties"]["name"], prot)
print(gen.finalize())
```
|
{
"source": "jfpuget/STFT_Transformer",
"score": 2
}
|
#### File: jfpuget/STFT_Transformer/data_final.py
```python
import pandas as pd
import numpy as np
import librosa
from pathlib import Path
import soundfile as sf
from tqdm import tqdm
from pqdm.processes import pqdm
train = pd.read_csv('../input/train_metadata.csv')
train
def ogg2np(ebird, filename):
input_path = Path('../input/')
output_path = Path('../data/')
input_path = input_path / 'train_short_audio' / ebird / filename
filename = '.'.join(filename.split('.')[:-1])
output_path = output_path
record, sr = sf.read(input_path)
record = librosa.to_mono(record)
record = record.astype('float32')
length = record.shape[0]
period = int(np.ceil(5 * sr))
if period == 0:
filename = '%s_%d.npy' % (filename, 0)
np.save(output_path / filename, record)
else:
for i in range(int(np.ceil(length/period))):
filename_i = '%s_%d.npy' % (filename, i)
record_i = record[i*period : (i+3)*period]
np.save(output_path / filename_i, record_i)
return length, sr
res = pqdm(zip(train.primary_label, train.filename), ogg2np, n_jobs=8, argument_type='args')
train['sr'] = [r[1] for r in res]
train['length'] = [r[0] for r in res]
train['duration'] = train['length'] / train['sr']
train['filename'] = [fname[:-4]+'.npy' for fname in train.filename]
train.to_csv('../input/train_001.csv', index=False)
train_ff1010 = pd.read_csv('../input/freefield1010/ff1010bird_metadata.csv')
train_ff1010
train_ff1010 = train_ff1010.sort_values(by='itemid').reset_index(drop=True)
train_ff1010
def get_clip(itemid):
data_path = Path('../input/freefield1010/wav')
path = data_path / ('%d.wav' % itemid)
clip, sr_native = librosa.load(path, sr=None, mono=True, dtype=np.float32)
sr = 32000
if sr_native != 0:
clip = librosa.resample(clip, sr_native, sr, res_type='kaiser_best')
else:
print('null sr_native')
return clip, sr, sr_native
train_ff1010 = train_ff1010[train_ff1010.hasbird == 0].reset_index(drop=True)
train_ff1010
def work_sub(itemid):
output_path = Path('../data/')
clip, sr, sr_native = get_clip(itemid)
clip = clip.astype('float32')
length = clip.shape[0]
filename = 'ff1010_%d_0.npy' % (itemid)
np.save(output_path / filename, clip)
return sr, sr_native, length
res = pqdm(train_ff1010.itemid, work_sub, n_jobs=8)
train_ff1010['primary_label'] = ''
train_ff1010['secondary_labels'] = None
train_ff1010['sr'] = [r[0] for r in res]
train_ff1010['sr_native'] = [r[1] for r in res]
train_ff1010['length'] = [r[2] for r in res]
train_ff1010['duration'] = train_ff1010['length'] / 32000
train_ff1010['filename'] = ['ff1010_%d_0.npy' % (itemid) for itemid in train_ff1010['itemid']]
train_ff1010
train_ff1010['secondary_labels'] = [[]] * len(train_ff1010)
columns = ['duration', 'length', 'primary_label', 'secondary_labels', 'filename']
train_ff1010[columns].to_csv('../input/train_ff1010.csv', index=False)
```
|
{
"source": "jfq3/python_3_scripts",
"score": 3
}
|
#### File: python_3_scripts/FunGene_Pipeline/titanium_options.py
```python
import sys
import pipeline
import configparser
import os
import copy
def options_from_file(filename):
config = configparser.ConfigParser()
config.read(filename)
if not config.has_section("general"):
raise IOError("Option file must have a general section")
if not config.has_option("general", "genes_of_interest"):
raise IOError("general section must have genes_of_interest option")
if not config.has_option("general", "seq_file_pattern"):
raise IOError("general section must have seq_file_pattern option")
if not config.has_option("general", "qual_file_pattern"):
raise IOError("general section must have qual_file_pattern option")
opts = dict()
opts["genes_of_interest"] = [t.strip() for t in config.get("general", "genes_of_interest").split(",")]
opts["seq_file_pattern" ] = config.get("general", "seq_file_pattern")
opts["qual_file_pattern" ] = config.get("general", "qual_file_pattern")
process_notag = True
default_fprimer = "fake_primer"
default_rprimer = None
max_ns = "0"
if config.has_option("general", "default_fprimer"):
default_fprimer = config.get("general", "default_fprimer").replace(" ", "")
config.remove_option("general", "default_fprimer")
if config.has_option("general", "default_rprimer"):
default_rprimer = config.get("general", "default_rprimer").replace(" ", "")
config.remove_option("general", "default_rprimer")
if config.has_option("general", "process_notag"):
process_notag = config.get("general", "process_notag") == "true"
config.remove_option("general", "process_notag")
if config.has_option("general", "max_ns"):
max_ns = config.get("general", "max_ns")
config.remove_option("general", "max_ns")
config.remove_option("general", "genes_of_interest")
config.remove_option("general", "seq_file_pattern")
config.remove_option("general", "qual_file_pattern")
opts["general"] = parse_section(Opts(), config, "general")
for section in config.sections():
if section == "general":
continue
if not section in opts["genes_of_interest"]:
print("WARNING: gene %s present in options but not listed as gene of interest" % section)
opts[section] = parse_section(opts["general"], config, section)
for gene_of_interest in opts["genes_of_interest"]:
if gene_of_interest not in opts:
print("WARNING: gene %s is listed as a gene of interest but not listed in option file, default options will be used" % gene_of_interest)
opts[gene_of_interest] = parse_section(opts["general"], None, gene_of_interest)
opts["general"].process_notag = process_notag
opts["general"].forward_primers = default_fprimer
opts["general"].reverse_primers = default_rprimer
opts["general"].max_ns = max_ns
return opts
def parse_section(default_opts, config, section):
nucl_control = None
prot_control = None
ret = copy.deepcopy(default_opts)
ret.gene_name = section
if not config or not config.has_section(section):
pipeline.setup_controls(ret, nucl_control, prot_control)
return ret
if config.has_option(section, "nucl_control"):
nucl_control = config.get(section, "nucl_control")
config.remove_option(section, "nucl_control")
if config.has_option(section, "prot_control"):
prot_control = config.get(section, "prot_control")
config.remove_option(section, "prot_control")
pipeline.setup_controls(ret, nucl_control, prot_control)
if config.has_option(section, "fedit"):
ret.fedit = config.get(section, "fedit")
config.remove_option(section, "fedit")
if config.has_option(section, "redit"):
ret.redit = config.get(section, "redit")
config.remove_option(section, "redit")
if config.has_option(section, "min_length"):
ret.min_length = config.get(section, "min_length")
config.remove_option(section, "min_length")
if config.has_option(section, "min_qual"):
ret.min_qual = config.get(section, "min_qual")
config.remove_option(section, "min_qual")
if config.has_option(section, "max_ns"):
ret.max_ns = config.get(section, "max_ns")
config.remove_option(section, "max_ns")
if config.has_option(section, "keep_primers"):
ret.keep_primers = config.get(section, "keep_primers").lower() == "true"
config.remove_option(section, "keep_primers")
if config.has_option(section, "gene_name"):
ret.gene_name = config.get(section, "gene_name")
config.remove_option(section, "gene_name")
if config.has_option(section, "framebot_ref_ident"):
ret.framebot_ref_ident = config.get(section, "framebot_ref_ident")
config.remove_option(section, "framebot_ref_ident")
if config.has_option(section, "framebot_minlength"):
ret.framebot_minlength = config.get(section, "framebot_minlength")
config.remove_option(section, "framebot_minlength")
if config.has_option(section, "decontam_cutoff"):
ret.decontam_cutoff = config.get(section, "decontam_cutoff")
config.remove_option(section, "decontam_cutoff")
if config.has_option(section, "chop"):
ret.chop = config.get(section, "chop").lower() == "true"
config.remove_option(section, "chop")
if config.has_option(section, "model_chop_start"):
ret.model_chop_start = int(config.get(section, "model_chop_start"))
config.remove_option(section, "model_chop_start")
else:
ret.model_chop_end = 0
if config.has_option(section, "model_chop_end"):
ret.model_chop_end = int(config.get(section, "model_chop_end"))
config.remove_option(section, "model_chop_end")
else:
raise IOError("Must specify model chop end if you turn on chopping")
else:
ret.chop = False
if config.has_option(section, "use_reverse"):
ret.use_reverse = config.get(section, "use_reverse") == "true"
config.remove_option(section, "use_reverse")
if len(config.items(section)) != 0:
raise IOError("Unknown options %s in section %s" % (config.items(section), section))
return ret
class Opts:
def __init__(self, gene_name = "not_a_gene", fedit = 2, redit = 0, use_reverse_primer = True, keep_primers = False, max_ns = 0, min_length = 300, min_qual = 20, framebot_ref_ident = 0.3, framebot_minlength = 100, nucl_control = None, prot_control = None, decontam_cutoff = 0.1, process_notag=True):
self.gene_name = gene_name
self.fedit = str(fedit)
self.redit = str(redit)
self.min_length = str(min_length)
self.max_ns = str(max_ns)
self.min_qual = str(min_qual)
self.keep_primers = keep_primers
self.framebot_ref_ident = str(framebot_ref_ident)
self.framebot_minlength = str(framebot_minlength)
self.decontamination_cutoff = str(decontam_cutoff)
self.use_reverse = use_reverse_primer
self.process_notag = process_notag
pipeline.setup_controls(self, nucl_control, prot_control)
```
#### File: python_3_scripts/FunGene_Pipeline/titanium_run_processor.py
```python
import sys
import os
import re
import copy
import threading
import shutil
from Bio import SeqIO
import configparser
sys.path.append("/work/fishjord/other_projects/new_fgp_scripts")
import pipeline
from pipeline_core import SequenceFile
import pipeline_core
import titanium_options
seq_file_pattern = ".TCA.454Reads.fna"
qual_file_pattern = ".TCA.454Reads.qual"
sample_clean_regex = re.compile("[^A-Za-z0-9\_\-\.]")
class PipelineThread ( threading.Thread ):
def __init__(self, options, in_seqfiles, trace_stream):
threading.Thread.__init__(self)
self.options = copy.copy(options)
self.in_seq_files = in_seqfiles
self.trace_stream = trace_stream
def run(self):
pipeline.run_pipeline(self.options, self.in_seq_files, False, self.trace_stream)
print(self.options.gene_name, "processing completed")
self.trace_stream.close()
class TitaniumDataParser:
def __init__(self, data_stream):
self.stream = data_stream
self.headers = list()
for header in self.stream.readline().strip().split("\t"):
self.headers.append(header.lower())
def read_next(self):
while True:
line = self.stream.readline()
if line == "":
return None
elif line.strip() != "":
break
lexemes = line.strip().split("\t")
if len(lexemes) > len(self.headers):
raise IOError("Line " + line + " has too many fields")
ret = dict()
for i in range(len(lexemes)):
lexeme = lexemes[i].lower()
if self.headers[i] == "bar code name":
ret[self.headers[i]] = lexeme.replace(" ", "")
else:
ret[self.headers[i]] = lexeme
ret["control"] = "control" in ret and ret["control"] == "yes"
return ret
def __iter__(self):
return self
def __next__(self):
ret = self.read_next()
if ret == None:
raise StopIteration
else:
return ret
def close():
self.stream.close()
def relative_to_abs(path):
if "PWD" in os.environ:
return os.path.join(os.environ["PWD"], path)
else:
return relative_to_abs(path)
def do_init_process(region, lines, trace_stream, seq_file_dir, options):
workdir = "region_%s" % region
if not os.path.exists(workdir):
os.mkdir(workdir)
init_process_opts = options["general"]
tag_file = os.path.join(workdir, "tags.txt")
tag_stream = open(tag_file, "w")
for line in lines:
gene_name = line["gene"]
if gene_name not in options:
gene_opts = init_process_opts
print("INFO: gene %s has no custom options, using defaults" % gene_name)
else:
gene_opts = options[gene_name]
print("INFO: gene %s has custom options" % gene_name)
extended_opts = []
extended_opts.append("fprimer=" + line["target sequence"].upper())
if gene_opts.use_reverse:
extended_opts.append("rprimer=" + line["reverse primer sequence"].upper())
else:
extended_opts.append("rprimer=")
if init_process_opts.fedit != gene_opts.fedit:
extended_opts.append("fedit=" + gene_opts.fedit)
if init_process_opts.redit != gene_opts.redit:
extended_opts.append("redit=" + gene_opts.redit)
if init_process_opts.min_length != gene_opts.min_length:
extended_opts.append("min_length=" + gene_opts.min_length)
if init_process_opts.min_qual != gene_opts.min_qual:
extended_opts.append("min_qual=" + gene_opts.min_qual)
if init_process_opts.max_ns != gene_opts.max_ns:
extended_opts.append("max_ns=" + gene_opts.max_ns)
if gene_opts.gene_name == "16s":
extended_opts.append("gene=RRNA16S")
else:
extended_opts.append("gene=OTHER")
tag_stream.write("%s\t%s\t%s\n" % (line["bar code sequence"], line["bar code name"], ",".join(extended_opts)))
tag_stream.close()
init_process_opts.workdir = workdir
init_process_opts.tag_file = tag_file
seq_files = [SequenceFile(os.path.join(seq_file_dir, region + options["seq_file_pattern"]), os.path.join(seq_file_dir, region + options["qual_file_pattern"]))]
pipeline.run_init_process(init_process_opts, seq_files, trace_stream)
def split_to_samples(gene_name, lines):
sample_to_seqs = dict()
for line in lines:
sample_name = sample_clean_regex.sub("_", line["sample name"])
seqfile = "region_" + line["region"] + "/initial_process/" + line["bar code name"] + "/" + line["bar code name"] + "_trimmed.fasta"
qualfile = "region_" + line["region"] + "/initial_process/" + line["bar code name"] + "/" + line["bar code name"] + "_trimmed.qual"
#Since initial processing can delete empty files...we have to account for this
if not os.path.exists(seqfile):
continue
if sample_name == "":
sample_name = "no_sample_name"
if not sample_name in sample_to_seqs:
sample_to_seqs[sample_name] = dict()
sample_to_seqs[sample_name]["seqs"] = []
sample_to_seqs[sample_name]["qual"] = []
sample_to_seqs[sample_name]["seqs"].extend(SeqIO.parse(open(seqfile), "fasta"))
if os.path.exists(qualfile):
sample_to_seqs[sample_name]["qual"].append(qualfile)
ret = []
for sample_name in list(sample_to_seqs.keys()):
seqs = sample_to_seqs[sample_name]["seqs"]
qualfiles = sample_to_seqs[sample_name]["qual"]
if len(seqs) > 0:
seqfile = os.path.join(gene_name, sample_name + ".fasta")
qualfile = None
out = open(seqfile, "w")
SeqIO.write(seqs, out, "fasta")
out.close()
if len(qualfiles) > 0:
qualfile = relative_to_abs(os.path.join(gene_name, sample_name + ".qual"))
pipeline_core.cat_files(qualfiles, qualfile, False)
ret.append(SequenceFile(relative_to_abs(seqfile), qualfile))
return ret
def process_run(data_file, custom_opts_file, seq_file_dir):
parser = TitaniumDataParser(open(data_file))
options = titanium_options.options_from_file(custom_opts_file)
region_map = dict()
for line in parser:
if not line["region"] in region_map:
region_map[line["region"]] = list()
region_map[line["region"]].append(line)
gene_map = dict()
trace_stream = open("region_trace.txt", "w")
for region in region_map:
lines = region_map[region]
do_init_process(region, lines, trace_stream, seq_file_dir, options)
for line in lines:
gene_name = line["gene"]
if not gene_name in gene_map:
gene = dict()
gene["control"] = []
gene["experimental"] = []
gene_map[gene_name] = gene
if line["control"]:
gene_map[gene_name]["control"].append(line)
else:
gene_map[gene_name]["experimental"].append(line)
trace_stream.close()
for gene_name in list(gene_map.keys()):
if not gene_name in options["genes_of_interest"]:
print("Not interested in %s" % gene_name)
continue
if os.path.exists(gene_name):
print("Already a directory for %s, skipping (manually delete if you want to rerun)" % gene_name)
continue
print("Processing %s" % gene_name)
os.mkdir(gene_name)
control_files = split_to_samples(gene_name, gene_map[gene_name]["control"])
experimental_files = split_to_samples(gene_name, gene_map[gene_name]["experimental"])
if len(control_files) > 0 or len(experimental_files) > 0:
if len(control_files) > 0:
workdir = relative_to_abs(os.path.join(gene_name, "control"))
os.mkdir(workdir)
trace_stream = open(os.path.join(workdir, "trace.txt"), "w")
options[gene_name].is_control = True
options[gene_name].workdir = workdir
print("Spawning pipeline for %s control" % gene_name)
PipelineThread(options[gene_name], control_files, trace_stream).start()
if len(experimental_files) > 0:
workdir = relative_to_abs(os.path.join(gene_name, "experimental"))
os.mkdir(workdir)
trace_stream = open(os.path.join(workdir, "trace.txt"), "w")
options[gene_name].is_control = False
options[gene_name].workdir = workdir
print("Spawning pipeline for %s experimental" % gene_name)
PipelineThread(options[gene_name], experimental_files, trace_stream).start()
else:
shutil.rmtree(gene_name)
if __name__ == "__main__":
print(os.getcwd())
if len(sys.argv) != 4:
print("USAGE: titanium_run_processor.py <run data file> <custom_config_ini> <run seq file directory>")
else:
process_run(sys.argv[1], sys.argv[2], sys.argv[3])
# process_run("run_data.txt", "/scratch/wangqion/qiong_titanium/titanium_run_04222010/20100422_reads")
```
#### File: python_3_scripts/RDPTools/getUniqueStarts.py
```python
import random
import sys
import os
from operator import itemgetter, attrgetter
kmerset = dict()
def sortStartKmer(data):
starts = []
for line in data:
lexems = line.split()
tuple = (line, int(lexems[7]))
starts.append(tuple)
starts.sort(key=lambda tup: tup[1], reverse=True)
return starts
def getUnique(startsfile):
infile = open(startsfile, "r")
lines = infile.readlines()
infile.close()
for l in lines:
if l.startswith("#"):
continue;
lexems = l.split();
kmer_pos = lexems[3] + "_" + lexems[7]
if kmer_pos not in kmerset:
kmerset[kmer_pos] = l.strip();
return list(kmerset.values())
if __name__ == "__main__":
usage = "Usage: starts.txt starts.txt ... > uniq_starts.txt"
if len(sys.argv) < 2:
sys.exit("need at least one input file. " + usage);
for infile in sys.argv[1:]:
getUnique(infile)
ret = list(kmerset.values())
sort_data = sortStartKmer(ret)
for s in sort_data:
print("%s" %(s[0]))
```
#### File: python_3_scripts/RDPTools/mcupgma.py
```python
import subprocess
import time
import sys
import os
def run_upgma(fasta_file, workdir=".", cluster_method="upgma", clust_file=None, id_mapping_file=None, sample_mapping_file=None, derep_file=None, matrix_file=None, mask_seq=None, jar_loc="Clustering.jar"):
ret = dict()
abs_start_time = time.time()
file_stem = os.path.split(fasta_file)[1].split(".")[0]
if id_mapping_file == None:
id_mapping_file = os.path.join(workdir, file_stem + ".id_mapping")
if sample_mapping_file == None:
sample_mapping_file = os.path.join(workdir, file_stem + ".sample_mapping")
if derep_file == None:
derep_file = os.path.join(workdir, file_stem + ".derep")
if matrix_file == None:
matrix_file = os.path.join(workdir, file_stem + ".matrix")
if clust_file == None:
clust_file = os.path.join(workdir, file_stem + ".clust")
start_time = time.time()
derep_stream = open(derep_file, "w")
if mask_seq == None:
derep_process = subprocess.Popen(['java', '-Xmx2g', '-jar', jar_loc, "derep", "--aligned", id_mapping_file, sample_mapping_file, fasta_file], stdout=derep_stream)
else:
derep_process = subprocess.Popen(['java', '-Xmx2g', '-jar', jar_loc, "derep", "--model-only=" + mask_seq, id_mapping_file, sample_mapping_file, fasta_file], stdout=derep_stream)
derep_process.wait()
derep_stream.close()
end_time = time.time()
ret["derep"] = end_time - start_time
print("Derep completed\t" + str(end_time - start_time))
start_time = time.time()
subprocess.check_call(['java', '-Xmx2g', '-jar', jar_loc, "dmatrix", "-i", id_mapping_file, "-o", matrix_file, "-in", derep_file, "-w", workdir])
end_time = time.time()
ret["matrix"] = end_time - start_time
print("Distance matrix computed in\t" + str(end_time - start_time))
start_time = time.time()
subprocess.check_call(['java', '-Xmx2g', '-jar', jar_loc, 'cluster', '-m', cluster_method, '-i', id_mapping_file, '-s', sample_mapping_file, '-d', matrix_file, '-o', clust_file])
end_time = time.time()
ret["cluster"] = end_time - start_time
print("Clustering completed in\t" + str(end_time - start_time))
abs_end_time = time.time()
ret["total"] = abs_end_time - abs_start_time
print("Completed in\t" + str(abs_end_time - abs_start_time))
if __name__ == "__main__":
if len(sys.argv) > 2:
method = sys.argv[2];
if not method in ['single', 'upgma', 'complete']:
print("Valid methods are single, upgma, or complete")
else:
workdir = "."
mask_seq = None
if len(sys.argv) > 3:
if "-mask=" in sys.argv[3]:
mask_seq = sys.argv[3].replace("-mask=", "")
if len(sys.argv) > 4:
workdir = sys.argv[4]
else:
workdir = sys.argv[3]
run_upgma(sys.argv[1], cluster_method=method, workdir=workdir, mask_seq=mask_seq)
else:
print("USAGE: mcupgma.py <fasta_file> <single,upgma,complete> [-mask=<maskseq>] [working_directory]")
```
|
{
"source": "jfqd/python-beaver",
"score": 2
}
|
#### File: beaver/transports/__init__.py
```python
import sys
def create_transport(beaver_config, logger):
"""Creates and returns a transport object"""
transport_str = beaver_config.get('transport')
if '.' not in transport_str:
# allow simple names like 'redis' to load a beaver built-in transport
module_path = 'beaver.transports.%s_transport' % transport_str.lower()
class_name = '%sTransport' % transport_str.title()
else:
# allow dotted path names to load a custom transport class
try:
module_path, class_name = transport_str.rsplit('.', 1)
except ValueError:
raise Exception('Invalid transport {0}'.format(beaver_config.get('transport')))
level = -1 if sys.version_info < (3, 3) else 0
_module = __import__(module_path, globals(), locals(), class_name, level)
transport_class = getattr(_module, class_name)
transport = transport_class(beaver_config=beaver_config, logger=logger)
return transport
```
|
{
"source": "jfqueija/caspertools",
"score": 3
}
|
#### File: jfqueija/caspertools/test.py
```python
import argparse
from caspertools.hiddenav import TorIp
parser = argparse.ArgumentParser(description='DNS Information')
parser.add_argument('-ps','--password',help='Tor password')
parser.add_argument('-pt','--port',help='Tor Port. Sample: 9051')
parser.add_argument('-lp','--proxy',help='Local proxy. Sample: 127.0.0.1:8118')
parser.add_argument('-u','--url',help='Url for test library.')
parser = parser.parse_args()
def main():
if parser.password:
if parser.port:
if parser.proxy:
if parser.url:
testTorIp(parser.password,parser.port,parser.proxy,parser.url)
else:
print('Url test is required. Please, look help with parameter -h')
else:
print('Local Proxy is required. Please, look help with parameter -h')
else:
print('Tor port is required. Please, look help with parameter -h')
else:
print('Tor password is required. Please, look help with parameter -h')
def testTorIp(password,port,proxy,urlTest):
tor = TorIp(password,port,proxy)
print(tor.renew_ip())
print(tor.request_get(urlTest).text)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
```
|
{
"source": "jfrabetti/trie",
"score": 4
}
|
#### File: jfrabetti/trie/trie.py
```python
def strip_punctuation( token ):
""" Given a token remove any leading or trailing punctuation or white space """
puncStr = ",.()"
# TODO: use a regex to strip leading and trailing punctuation
while(1):
n = len(token)
token = token.strip(puncStr).strip()
if len(token) == n:
break;
return token
def tokenize( text ):
""" Given a block of text remove any punctuation and return a list of tokens """
# split the text into candidate tokens based on spaces
tokenL = text.split(" ")
# strip punctuation and white space and drop an resulting empty tokens
tokenL = [ strip_punctuation(token) for token in tokenL if strip_punctuation(token) ]
return tokenL
def new_node( value ):
# Create a new node.
# TODO: Use a class instead of a dictionary
return {
"children":{}, # Children nodes.
"value":value, # Complete prefix represented by this node
"term_fl":False # True if this node represents the end of a complete term.
}
def insert_node( node, tokenL ):
""" Insert a term into the trie."""
# for each token in the term
for i,token in enumerate(tokenL):
# If the prefix for this term has not yet been inserted ...
if token not in node['children']:
# ... then insert it.
node['children'][token] = new_node(" ".join(tokenL[0:i+1]))
# Traverse to the node assoc'd with 'token'.
node = node['children'][ token ]
node['value'] = " ".join(tokenL) # Store the complete term in the final node.
node['term_fl'] = True # Set 'term_fl' to indicate that this is the termination of a complete term.
def build_trie( termL ):
""" Given a list of terms (token lists) create a trie. """
trie = new_node(None)
for tokenL in termL:
insert_node( trie, tokenL )
return trie
def print_node( node, indent ):
""" Print the value assoc'd with this node and then the values assoc'd with the children of this node. """
indentStr = "".join([ " " for i in range(indent) ])
print(indentStr, node['term_fl'], " value:", node['value'] )
for k,v in node['children'].items():
print_node(v,indent+2)
def print_trie( trie ):
""" Recursively print the trie. """
print_node( trie, 0 )
def find_term(node, tokenL ):
""" Given a trie, rooted on 'node', locate a matching term beginning on the first token in 'tokenL'
Returns (matchNode,term_fl) where 'matchNode' is the node matching the last matched token
and 'term_fl' is true if a complete term was matched.
"""
matchNode = None
for token in tokenL:
if token not in node['children']:
break
matchNode = node = node['children'][token]
return (matchNode['value'], matchNode['term_fl']) if matchNode else (None,False)
def find_terms_in_text( trie, text ):
""" Given a trie, and a string ('text') locate all the encoded in the trie in the string."""
# Tokenize the text and strip it of punctuation.
tokenL = tokenize( text )
print(len(tokenL),tokenL[-1])
for i in range(len(tokenL)):
match_term, term_fl = find_term( trie, tokenL[i:] )
if match_term:
print("Complete" if term_fl else "Partial", "term: ", match_term, "found at token index ", i )
if __name__ == "__main__":
dataText = """
The operations of each Borrower, and the activities of the officers and directors and, to the knowledge of each Borrower,
any Subsidiaries of the Borrowers, employees, agents and representatives of each Borrower, while acting on behalf of such
Borrower, and to the knowledge of each Borrower the operations of each Material Project Party in relation to the Project,
have been conducted at all times in compliance with all applicable Anti-Money Laundering Laws, Sanctions, and Anti-Corruption
Laws. Neither Borrower, nor any Subsidiaries of the Borrowers, nor any officer or director or, to the knowledge of any Borrower,
Affiliates, employee, agent or representative of either Borrower has engaged, directly or indirectly, in any activity or conduct
which would violate any Anti-Corruption Laws or Anti-Money Laundering Laws. Neither Borrower nor any Subsidiaries of the Borrowers,
nor any officer or director or, to the knowledge of any Borrower, Affiliates, employee, agent or representative of either Borrower
has engaged, directly or indirectly, in any dealings or transactions with, involving or for the benefit of a Sanctioned Person,
or in or involving a Sanctioned Country, where such dealings or transactions would violate Sanctions, in the five (5) year period
immediately preceding the date hereof.
"""
# Assume the list is already cleaned of punctuation.
termL = [
["Borrower"],
["Subsidiaries"],
["Material", "Project", "Party"],
["Project"],
["Project Manager"],
["Anti-Money", "Laundering", "Laws" ],
["Sanctions"],
["Anti-Corruption", "Laws"],
["Affiliates"],
["Sanctioned", "Person"],
["Sanctioned", "Country"],
["Person"],
["Officer"],
["Director"],
["Agents"]
]
trie = build_trie(termL)
print_trie(trie)
find_terms_in_text(trie,dataText)
```
|
{
"source": "jfraj/soundeval",
"score": 3
}
|
#### File: jfraj/soundeval/player_recording.py
```python
import os
import time
# This project
import recorder
import check_signal
def record_playing(**kwargs):
"""Record audio signal and save it."""
save_dir = kwargs.get('save_dir', 'test/')
show_audio = kwargs.get('show_audio', False)
countdown = kwargs.get('countdown', 3)
max_length = kwargs.get('max_length', 60)
basename = kwargs.get('basename', None)
if basename is None:
basename = raw_input('Type a name for the file? ')
save_name = '{}_{}.wav'.format(basename, time.strftime("%Y%m%d"))
save_name = os.path.join(save_dir, save_name)
wait4enter = kwargs.get('wait4enter', True)
if wait4enter:
raw_input('Press enter when ready to start...')
rec = recorder.AudioRecorder(max_length=max_length, countdown=countdown)
rec.start_record(savename=save_name)
if show_audio:
check_signal.audio_report(save_name)
return save_name
def multi_recording(**kwargs):
"""Record multiple audio signal and save them."""
file_list = []
while True:
if raw_input('Another one?') not in ('y', 'Y', 'yes', 'Yes', 'YES'):
break
file_list.append(record_playing())
check_signal.show_multiaudio(file_list)
if __name__ == "__main__":
#record_playing(show_audio=True)
multi_recording()
```
#### File: jfraj/soundeval/recorder.py
```python
import pyaudio
import wave
import time
from baseaudio import BaseAudio
class AudioRecorder(BaseAudio):
"""Tools to record audio data."""
def __init__(self, **kwargs):
"""Initializing for recording"""
super(AudioRecorder, self).__init__(**kwargs)
self.frames_perbuff = kwargs.get('chunk', 2048)
self.channels = kwargs.get('channels', 1)
self.format = pyaudio.paInt16 # paInt8
# if recording is longer than max_length, it stops
self.max_length = kwargs.get('max_length', 60) # in seconds
def start_record(self, **kwargs):
countdown = kwargs.get('countdown', 3)
savename = kwargs.get('savename', None)
# Countdown before recording
for isec_left in reversed(range(countdown)):
print(isec_left + 1)
time.sleep(0.8)
# Record
print('start recording')
audio_api = pyaudio.PyAudio()
stream = audio_api.open(format=self.format,
channels=self.channels,
rate=self.sampling_rate,
input=True,
frames_per_buffer=self.frames_perbuff)
frames = []
nchunks = int(self.max_length *
self.sampling_rate / self.frames_perbuff)
try:
for i in range(0, nchunks):
data = stream.read(self.frames_perbuff)
frames.append(data)
print('max length ({}sec) reached...stop!'.format(self.max_length))
except KeyboardInterrupt:
print('\nStopped by user')
print("* done recording")
stream.stop_stream()
stream.close()
audio_api.terminate()
if savename is not None:
print('saving as {}'.format(savename))
wf = wave.open(savename, 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(audio_api.get_sample_size(self.format))
wf.setframerate(self.sampling_rate)
wf.writeframes(b''.join(frames))
wf.close()
if __name__ == "__main__":
rec = AudioRecorder(max_length=20)
rec.start_record(savename='test.wav')
#print(rec)
```
|
{
"source": "jframos/fiware-sdc",
"score": 3
}
|
#### File: acceptance/commons/utils.py
```python
__author__ = 'arobres'
import xmldict
import string
import random
from constants import AUTH_TOKEN_HEADER, TENANT_ID_HEADER, CONTENT_TYPE, CONTENT_TYPE_XML, ACCEPT_HEADER, \
CONTENT_TYPE_JSON
def dict_to_xml(dict_to_convert):
return xmldict.dict_to_xml(dict_to_convert)
def xml_to_dict(xml_to_convert):
return xmldict.xml_to_dict(xml_to_convert)
def set_default_headers(token_id, tenant_id):
headers = dict()
headers[AUTH_TOKEN_HEADER] = token_id
headers[TENANT_ID_HEADER] = tenant_id
headers[CONTENT_TYPE] = CONTENT_TYPE_XML
headers[ACCEPT_HEADER] = CONTENT_TYPE_JSON
return headers
def id_generator(size=10, chars=string.ascii_letters + string.digits):
"""Method to create random ids
:param size: define the string size
:param chars: the characters to be use to create the string
return ''.join(random.choice(chars) for x in range(size))
"""
return ''.join(random.choice(chars) for x in range(size))
def delete_keys_from_dict(dict_del, key):
"""
Method to delete keys from python dict
:param dict_del: Python dictionary with all keys
:param key: key to be deleted in the Python dictionary
:returns a new Python dictionary without the rules deleted
"""
if key in dict_del.keys():
del dict_del[key]
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, key)
return dict_del
```
#### File: acceptance/tools/utils.py
```python
__author__ = '<EMAIL>'
from lettuce import world
from tools import body_message
import http
def errorLabel (value, error):
if error == "wrong":
return '1234567890'
elif error == "empty":
return ''
else:
return value
def errorUrl (value, error):
"""
:param value:
:param error:
:param label:
:return:
"""
REST = "rest/"
if error == "Not Found":
pos = value.find(REST)+len(REST) # add "error_" text after "rest/" resource
value = value[:pos] + "error_" + value[pos:] # ex: http://172.16.31.10:8082/sdc/rest/error_catalog/product/Product_test_0001
return value
def getPosition(operation, content):
"""
return the position where insert element
:param operation:
:param content:
"""
for ID in body_message.position:
if ID['operation'] == operation and ID['content'] == content:
return -ID['position']
def body_oneElement (request, values, operation, content):
"""
Add a element into a request
:param request:
:param value:
:param operation:
:param content:
"""
body="" #str(values['label'])+ " -- "+str(values['value'])
positionToInsert = getPosition(operation, content)
if content == 'xml':
body=body+'<'+str(values['label'])+'>'+str(values['value'])+'</'+str(values['label'])+'>'
elif content == 'json':
body=body+'\"'+str(values['label']) +'\": \"'+str(values['value'])+'\"'
if request != '{}': body = ','+body
return insert_text(request,positionToInsert, body)
def body_elements(request, values, elementName, operation, content):
"""
Add new multi elements into a request
:param request: Request before to insert new element
:param values: labels and values that will be inserted
:param elementName: element name
:param operation: operation in use, ex:
"installProduct"
:param content: specify media types which are acceptable for the response, ex:
"xml", "json"
:return: new request with new element added
"""
body = ""
positionToInsert = getPosition(operation, content)
if content == 'xml':
for ID in values:
if ID['value'] is not None:
body=body+"<"+ID['label']+">"+ID['value']+"</"+ID['label']+">"
if body != "": body = '<'+elementName+'>'+body+'</'+elementName+'>'
elif content == 'json':
for ID in values:
if ID['value'] is not None:
body=body+'"'+ID['label']+'": "'+ID['value']+'",'
if body != "": body = '"'+elementName+'":{'+body[:-1]+'}'
if request != '{}': body = ','+body
return insert_text(request,positionToInsert, body)
def request(method, url, headers, body, error):
headers['X-Auth-Token'] = errorLabel (headers['X-Auth-Token'], error)
url = errorUrl(url, error)
if error == "GET" or error == "PUT" or error == "POST" or error == "DELETE":
method = error
if method == "GET":
response = http.get(url, headers)
elif method == "POST":
response = http.post(url, headers, body)
elif method == "PUT":
response = http.put(url, headers, body)
elif method == "DELETE":
response = http.delete(url, headers)
printRequest(method,url,headers,body)
#printResponse(response)
return response
def insert_label (string, stringBeforeToInsert, newSubString):
pos = string.find(stringBeforeToInsert)
return string[:pos] + newSubString + string[pos:]
def insert_text (string, positionBeforeToInsert, newSubString):
return string[:positionBeforeToInsert] + newSubString + string[positionBeforeToInsert:]
def printRequest(method, url, headers, body):
print "------------------------------ Request ----------------------------------------------"
print "url: "+ str(method) + " "+str(url)
print "\nHeader: "+ str (headers)+"\n"
if body is not None:
print "\nBody: init("+str (body)+")end\n\n"
print "----------------------------------------------------------------------------------------\n\n\n\n"
def printResponse(response):
print "---------------------------------- Response ----------------------------------------------"
print "status code: "+str(response.status)
print "\nHeader: "+ str(response.msg)
print "\nBody: init("+str(response.read())+")end\n\n\n"
print "----------------------------------------------------------------------------------------"
def get_body_expected(response_type, operation):
for ID in body_message.Catalog_body:
if ID["operation"] == operation and ID["code"] == response_type:
return ID["body"]
def check_response_status(response, expected_status_code):
"""
Checks that the response status is the expected one.
:param response: Response to be checked.
:param expected_status_code: Expected status code of the response.
"""
assert response.status == expected_status_code, \
"Wrong status code received: %d. Expected: %d. \n\nBody content: %s" \
% (response.status, expected_status_code, response.read())
def check_response_body(response, expected_body):
"""
Checks that the response body is the expected one.
:param response: Response to be checked.
:param expected_body: Expected body of the response.
"""
resp = str(response.read())
#print "\n\n\n respuesta: "+ resp+ "\n\n\n"
#print "\n esperado: "+ expected_body + "\n\n\n"
#print "\n\n------------------------------------------------------------------------------------------------------------------------------------------------- "+str(resp.find(expected_body))+"\n\n"
assert resp.find(expected_body) >= 0, \
"Wrong body received: %s \n\n Expected: %s" \
% (resp, expected_body)
```
|
{
"source": "jframos/sdklib",
"score": 2
}
|
#### File: sdklib/behave/requests.py
```python
import json
from behave import given, when
from sdklib.http import HttpRequestContext, HttpSdk
from sdklib.http.authorization import BasicAuthentication, X11PathsAuthentication
from sdklib.http.renderers import FormRenderer, JSONRenderer
__all__ = ('set_default_host', 'set_default_proxy', 'set_url_path', 'set_url_path_with_params',
'set_authorization_basic', 'set_11path_authorization', 'set_headers', 'set_query_parameters',
'set_body_parameters', 'set_form_parameters', 'set_body_files', 'send_http_request',
'send_http_request_with_query_parameters', 'send_http_request_with_form_parameters',
'send_http_request_with_body_parameters')
def safe_add_http_request_context_to_behave_context(context):
if not hasattr(context, "http_request_context"):
context.http_request_context = HttpRequestContext()
@given('The API endpoint "{host}"')
def set_default_host(context, host):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.host = host
@given('The API proxy "{host}"')
def set_default_proxy(context, host):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.proxy = host
@given('The API resource "{url_path}"')
def set_url_path(context, url_path):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.url_path = url_path
@given('The parameterized API resource "{url_path_str_format}" with these parameter values')
def set_url_path_with_params(context, url_path_str_format):
"""
Parameters:
+------+--------+
| key | value |
+======+========+
| key1 | value1 |
+------+--------+
| key2 | value2 |
+------+--------+
"""
safe_add_http_request_context_to_behave_context(context)
table_as_json = dict(context.table)
url_path = url_path_str_format % table_as_json
context.http_request_context.url_path = url_path
@given('Authorization-Basic with username "{username}" and password "{password}"')
def set_authorization_basic(context, username, password):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.authentication_instances.append(BasicAuthentication(username=username, password=password))
@given('11Paths-Authorization with application id "{app_id}" and secret "{secret}"')
def set_11path_authorization(context, app_id, secret):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.authentication_instances.append(X11PathsAuthentication(app_id=app_id, secret=secret))
@given('The headers')
def set_headers(context):
"""
Parameters:
+--------------+---------------+
| header_name | header_value |
+==============+===============+
| header1 | value1 |
+--------------+---------------+
| header2 | value2 |
+--------------+---------------+
"""
safe_add_http_request_context_to_behave_context(context)
headers = dict()
for row in context.table:
headers[row["header_name"]] = row["header_value"]
context.http_request_context.headers = headers
@given('The query parameters')
def set_query_parameters(context):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.query_params = get_parameters(context)
@given('The body parameters')
def set_body_parameters(context):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = get_parameters(context)
@given('The form parameters')
def set_form_parameters(context):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = get_parameters(context)
context.http_request_context.renderer = FormRenderer()
def get_parameters(context):
"""
Reads parameters from context table
:param context: behave context
:return: dict with parameters names and values
"""
return {row['param_name']: row['param_value'] for row in context.table}
@given('The body files')
def set_body_files(context):
"""
Parameters:
+-------------+--------------+
| param_name | path_to_file |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
files = dict()
for row in context.table:
files[row["param_name"]] = row["path_to_file"]
context.http_request_context.files = files
@given('The default renderer')
def set_default_renderer(context):
"""
Set default renderer
:param context: behave context
"""
context.http_request_context.renderer = None
@when('I send a HTTP "{method}" request')
def send_http_request(context, method):
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.method = method
context.api_response = HttpSdk.http_request_from_context(context.http_request_context)
context.http_request_context.clear()
@when('I send a HTTP "{method}" request with query parameters')
def send_http_request_with_query_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_query_parameters(context)
send_http_request(context, method)
@when('I send a HTTP "{method}" request with body parameters')
def send_http_request_with_body_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_body_parameters(context)
send_http_request(context, method)
@when('I send a HTTP "{method}" request with form parameters')
def send_http_request_with_form_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_form_parameters(context)
send_http_request(context, method)
@when('I send a HTTP "{method}" request with body parameters encoded "{encoding_type}"')
def send_http_request_with_body_parameters_encoded(context, method, encoding_type):
pass
@when('I send a HTTP "{method}" request with this body "{resource_file}"')
def send_http_request_with_body_resource_file(context, method, resource_file):
pass
@when('I send a HTTP "{method}" request with this JSON')
def send_http_request_with_json(context, method):
"""
Parameters:
.. code-block:: json
{
"param1": "value1",
"param2": "value2",
"param3": {
"param31": "value31"
}
}
"""
safe_add_http_request_context_to_behave_context(context)
context.http_request_context.body_params = json.loads(context.text)
context.http_request_context.renderer = JSONRenderer()
send_http_request(context, method)
@when('I send a HTTP "{method}" request with this XML')
def send_http_request_with_xml(context, method):
pass
```
#### File: sdklib/http/har.py
```python
import json
import copy
from sdklib.compat import str, unquote_plus
from sdklib import http
from sdklib.http import HttpRequestContext
from sdklib.http.renderers import get_renderer
from sdklib.http.response import Response as HttpResponse
from sdklib.util.urls import urlsplit
class Cookie(object):
def __init__(self, j):
self._dict =json.loads(j) if isinstance(j, str) else j
@property
def name(self):
return self._dict.get("name", None)
@property
def value(self):
return self._dict.get("value", None)
@property
def expires(self):
return self._dict.get("expires", None)
@property
def http_only(self):
return self._dict.get("httpOnly", None)
@property
def secure(self):
return self._dict.get("secure", None)
class Request(object):
def __init__(self, j):
self._dict =json.loads(j) if isinstance(j, str) else j
@property
def method(self):
return self._dict.get("method", None)
@property
def url(self):
return self._dict.get("url", None)
@property
def http_version(self):
return self._dict.get("httpVersion", None)
@property
def headers(self):
headers = self._dict.get("headers", None)
return {h["name"]: h["value"] for h in headers}
@property
def query_string(self):
query_string = self._dict.get("queryString", None)
return {unquote_plus(h["name"]): unquote_plus(h["value"]) for h in query_string}
@property
def post_data(self):
post_data = self._dict.get("postData", {})
params = {unquote_plus(h["name"]): unquote_plus(h["value"]) for h in post_data.get("params", [])}
mime_type = post_data.get("mimeType", None)
return params, get_renderer(mime_type=mime_type)
@property
def cookies(self):
return [Cookie(c) for c in self._dict.get("cookies", [])]
@property
def headers_size(self):
return self._dict.get("headersSize", None)
@property
def body_size(self):
return self._dict.get("bodySize", None)
def as_http_request_context(self):
scheme, domain_or_ip, port, path, _ = urlsplit(self.url)
host = scheme + "://" + domain_or_ip
if port:
host += ":" + str(port)
body_params, renderer = self.post_data
return HttpRequestContext(
host=host, method=self.method, url_path=path, query_params=self.query_string, headers=self.headers,
renderer=renderer, body_params=body_params
)
class Content(object):
def __init__(self, j):
self._dict =json.loads(j) if isinstance(j, str) else j
@property
def size(self):
return self._dict.get("size", None)
@property
def mime_type(self):
return self._dict.get("mimeType", None)
@property
def compression(self):
return self._dict.get("compression", None)
@property
def text(self):
return self._dict.get("text", None)
class Response(object):
def __init__(self, j):
self._dict =json.loads(j) if isinstance(j, str) else j
@property
def status(self):
return self._dict.get("status", None)
@property
def status_text(self):
return self._dict.get("statusText", None)
@property
def http_version(self):
return self._dict.get("httpVersion", None)
@property
def headers(self):
headers = self._dict.get("headers", None)
return {h["name"]: h["value"] for h in headers}
@property
def cookies(self):
return [Cookie(c) for c in self._dict.get("cookies", [])]
@property
def redirect_url(self):
return self._dict.get("redirectURL", None)
@property
def headers_size(self):
return self._dict.get("headersSize", None)
@property
def body_size(self):
return self._dict.get("bodySize", None)
@property
def transfer_size(self):
return self._dict.get("_transferSize", None)
@property
def content(self):
c = self._dict.get("content", None)
return None if c is None else Content(c)
def as_http_response(self):
return HttpResponse(
headers=self.headers, status_text=self.status_text, status=self.status, http_version=self.http_version,
body=self.content.text
)
class Entry(object):
def __init__(self, j):
self._dict =json.loads(j) if isinstance(j, str) else j
@property
def started_date_time(self):
return self._dict.get("startedDateTime", None)
@property
def time(self):
return self._dict.get("time", None)
@property
def request(self):
r = self._dict.get("request", None)
return None if r is None else Request(r)
@property
def response(self):
r = self._dict.get("response", None)
return None if r is None else Response(r)
@property
def cache(self):
return self._dict.get("cache", None)
@property
def timings(self):
return self._dict.get("timings", None)
@property
def server_ip_address(self):
return self._dict.get("serverIPAddress", None)
@property
def connection(self):
return self._dict.get("connection", None)
@property
def pageref(self):
return self._dict.get("pageref", None)
class Log(object):
def __init__(self, j):
self._dict =json.loads(j) if isinstance(j, str) else j
@property
def version(self):
return self._dict.get("version", None)
@property
def creator(self):
return self._dict.get("creator", None)
@property
def pages(self):
return self._dict.get("pages", None)
@property
def entries(self):
return [Entry(e) for e in self._dict.get("entries", [])]
class HAR(object):
def __init__(self, j):
self._dict = json.loads(j)
@property
def log(self):
l = self._dict.get("log", None)
return None if l is None else Log(l)
def _find_value_in_new_response(value, prev_response_ctx, new_response_ctx):
try:
elem = prev_response_ctx.html.find_element_by_xpath("//*[@*='{}']".format(value))
name = elem.get("name")
new_elem = new_response_ctx.html.find_element_by_name(name)
return new_elem.get("value")
except:
pass
def _update_dynamic_elements(prev_response_ctx, response_ctx, request_ctx):
ctx = copy.deepcopy(request_ctx)
if response_ctx is not None and not response_ctx.cookie.is_empty():
ctx.headers["Cookie"] = response_ctx.cookie.as_cookie_header_value()
for k, v in request_ctx.body_params.items():
value = _find_value_in_new_response(v, prev_response_ctx, response_ctx)
if value is not None:
ctx.body_params[k] = value
return ctx
def sequential_requests(entries, update_dynamic_elements=False, **kwargs):
req_res = []
prev_response_context = None
response = None
for entry in entries:
context = entry.request.as_http_request_context()
if update_dynamic_elements:
context = _update_dynamic_elements(
prev_response_ctx=prev_response_context,
response_ctx=response,
request_ctx=context
)
for k, v in kwargs.items():
setattr(context, k, v)
response = http.request_from_context(context=context)
prev_response_context = entry.response.as_http_response()
req_res.append((context, response))
return req_res
```
#### File: sdklib/http/response.py
```python
import json
from xml.etree import ElementTree
from sdklib.compat import convert_bytes_to_str
from sdklib.http.session import Cookie
from sdklib.util.structures import xml_string_to_dict, CaseInsensitiveDict
from sdklib.html import HTML
class JsonResponseMixin(object):
_body = ""
@property
def json(self):
try:
return json.loads(convert_bytes_to_str(self._body))
except:
return dict()
@property
def case_insensitive_dict(self):
return CaseInsensitiveDict(self.json)
class Response(JsonResponseMixin):
def __init__(self, headers=None, status=None, status_text=None, http_version=None, body=None):
self.headers = headers
self.status = status
self.status_text = status_text
self.http_version = http_version
self.body = body
self._cookie = None
@property
def headers(self):
"""
Returns a dictionary of the response headers.
"""
return self._headers
@headers.setter
def headers(self, value):
self._headers = value or {}
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def status_text(self):
return self._status_text
@status_text.setter
def status_text(self, value):
self._status_text = value
@property
def http_version(self):
return self._http_version
@http_version.setter
def http_version(self, value):
self._http_version = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def xml(self):
return ElementTree.fromstring(self.body)
@property
def raw(self):
"""
Returns urllib3 response data.
"""
return self.body
@property
def html(self):
"""
Returns HTML response data.
"""
return HTML(self.body)
@property
def data(self):
data = self.body
try:
data = data.decode()
except:
pass
try:
return json.loads(data)
except:
pass
try:
return xml_string_to_dict(data)
except:
return data
class AbstractBaseHttpResponse(object):
"""
Wrapper of Urllib3 HTTPResponse class needed to implement any HttpSdk response class.
See `Urllib3 <http://urllib3.readthedocs.io/en/latest/user-guide.html#response-content>`_.
"""
urllib3_response = None
_cookie = None
def __init__(self, resp):
self.urllib3_response = resp
@property
def cookie(self):
if not self._cookie:
self._cookie = Cookie(self.headers)
else:
self._cookie.load_from_headers(self.headers)
return self._cookie
@property
def headers(self):
"""
Returns a dictionary of the response headers.
"""
return self.urllib3_response.getheaders()
class HttpResponse(Response, AbstractBaseHttpResponse):
"""
Wrapper of Urllib3 HTTPResponse class compatible with AbstractBaseHttpResponse.
See `Urllib3 <http://urllib3.readthedocs.io/en/latest/user-guide.html#response-content>`_.
"""
def __init__(self, resp):
self.urllib3_response = resp
super(HttpResponse, self).__init__(
headers=self.urllib3_response.getheaders(),
status=self.urllib3_response.status,
status_text=self.urllib3_response.reason,
body=self.urllib3_response.data
)
@property
def reason(self):
return self.status_text
class Error(object):
def __init__(self, json_data):
self.json = json_data
self.case_insensitive_dict = CaseInsensitiveDict(self.json)
@property
def code(self):
return self.case_insensitive_dict['code'] if "code" in self.case_insensitive_dict else None
@property
def message(self):
return self.case_insensitive_dict['message'] if "message" in self.case_insensitive_dict else None
@property
def json(self):
return self._json
@json.setter
def json(self, value):
self._json = value if isinstance(value, dict) else dict()
def __repr__(self):
return json.dumps(self.json)
def __str__(self):
return self.__repr__()
class Api11PathsResponse(AbstractBaseHttpResponse, JsonResponseMixin):
"""
This class models a response from any of the endpoints in most of 11Paths APIs.
It consists of a "data" and an "error" elements. Although normally only one of them will be present, they are not
mutually exclusive, since errors can be non fatal, and therefore a response could have valid information in the data
field and at the same time inform of an error.
"""
def __init__(self, resp):
super(Api11PathsResponse, self).__init__(resp)
self._body = self.urllib3_response.data
@property
def data(self):
"""
:return: data part of the API response into a dictionary
"""
return self.case_insensitive_dict.get("data", None)
@property
def error(self):
"""
@return Error the error part of the API response, consisting of an error code and an error message
"""
return Error(self.case_insensitive_dict["error"]) if self.case_insensitive_dict.get("error", None) is not None \
else None
```
#### File: sdklib/http/session.py
```python
from urllib3._collections import HTTPHeaderDict
from sdklib.compat import cookies
class Cookie(object):
"""
Wrapper of python Cookie class.
See https://docs.python.org/2/library/cookie.html
"""
def __init__(self, headers=None):
self._cookie = cookies.SimpleCookie()
self.load_from_headers(headers)
def load_from_headers(self, headers):
if not headers:
return
elif not isinstance(headers, HTTPHeaderDict):
headers = HTTPHeaderDict(headers)
set_cookie_headers = headers.getlist("Set-Cookie")
if set_cookie_headers:
self._cookie.load("; ".join(set_cookie_headers))
def as_cookie_header_value(self):
if self.is_empty():
return ""
items = list(self.items())
name, morsel = items[0]
output = "%s=%s" % (name, morsel.value)
for name, morsel in items[1:]:
output += "; "
output += "%s=%s" % (name, morsel.value)
return output
def is_empty(self):
return (self._cookie is None) or (len(self._cookie.items()) == 0)
def getcookie(self):
return self._cookie
def items(self):
return self._cookie.items()
def get(self, key, default=None):
return self._cookie.get(key, default)
def update(self, cookie):
for key, morsel in cookie.items():
self._cookie[key] = morsel.value
```
#### File: sdklib/util/files.py
```python
import ntpath
def guess_filename_stream(path_to_file):
f = open(path_to_file, 'rb')
buf = f.read()
f.close()
filename = ntpath.basename(path_to_file)
return filename, buf
```
#### File: sdklib/util/structures.py
```python
import collections
from sdklib.util.xmltodict import parse as parse_xml
def contains_subdict(d1, d2):
for elem in d1:
if elem not in d2 or d1[elem] != d2[elem]:
return False
return True
def get_dict_from_list(l, **kwargs):
for e in l:
if contains_subdict(kwargs, e):
return e
def to_key_val_list(value, sort=False, insensitive=False):
"""
Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list({'key': 'val'}, sort=True)
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
if sort and not insensitive:
values = sorted(value)
elif sort:
values = sorted(value, key=lambda t: t[0].lower())
else:
values = value
return list(values)
def to_key_val_dict(values):
"""
Take an object and test to see if it can be represented as a
dictionary. If it can be, return a dict, e.g.,
::
>>> to_key_val_dict([('key', 'val')])
{'key': 'val'}
>>> to_key_val_dict({'key': 'val'})
{'key': 'val'}
>>> to_key_val_dict('string')
ValueError: dictionary update sequence element.
"""
if values is None:
return {}
if isinstance(values, collections.Mapping):
values = values.items()
elif isinstance(values, (str, bytes, bool, int)) or \
not all([isinstance(value, (list, tuple)) and len(value) == 2 for value in values]):
raise ValueError('cannot encode objects that are not 2-tuples')
dict_to_return = dict()
for k, v in values:
if k in dict_to_return and isinstance(dict_to_return[k], list) and isinstance(v, list):
dict_to_return[k].extend(v)
elif k in dict_to_return and isinstance(dict_to_return[k], list):
dict_to_return[k].append(v)
elif k in dict_to_return:
dict_to_return[k] = [dict_to_return[k], v]
else:
dict_to_return[k] = v
return dict_to_return
def xml_string_to_dict(xml_to_parse):
return parse_xml(xml_to_parse)
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
This class is a copy of `requests <https://github.com/kennethreitz/requests/blob/master/requests/structures.py>`_.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
```
#### File: sdklib/util/times.py
```python
import time
import datetime
from functools import wraps
from multiprocessing import TimeoutError
from multiprocessing.pool import ThreadPool
import threading
import weakref
from sdklib.compat import thread
thread_pool = None
def get_current_utc(time_format="%Y-%m-%d %H:%M:%S"):
"""
@return a string representation of the current time in UTC.
"""
return time.strftime(time_format, time.gmtime())
def today_strf(format="%d/%m/%Y"):
t = datetime.date.today()
return t.strftime(format)
def tomorrow_strf(format="%d/%m/%Y"):
t = datetime.date.today() + datetime.timedelta(days=1)
return t.strftime(format)
def yesterday_strf(format="%d/%m/%Y"):
t = datetime.date.today() - datetime.timedelta(days=1)
return t.strftime(format)
def seconds_to_milliseconds_timestamp(seconds_timestamp):
return int(round(seconds_timestamp * 1000))
def current_milliseconds_timestamp():
return seconds_to_milliseconds_timestamp(time.time())
def datetime_to_milliseconds_timestamp(datetime_obj):
seconds_timestamp = time.mktime(datetime_obj.timetuple())
return seconds_to_milliseconds_timestamp(seconds_timestamp)
def get_thread_pool():
global thread_pool
if thread_pool is None:
# fix for python <2.7.2
if not hasattr(threading.current_thread(), "_children"):
threading.current_thread()._children = weakref.WeakKeyDictionary()
thread_pool = ThreadPool(processes=1)
return thread_pool
def timeout(milliseconds=10000, silent=False):
def wrap_function(func):
@wraps(func)
def __wrapper(*args, **kwargs):
try:
async_result = get_thread_pool().apply_async(func, args=args, kwds=kwargs)
return async_result.get(float(milliseconds) / 1000)
except thread.error:
return func(*args, **kwargs)
except TimeoutError:
pass
return __wrapper
return wrap_function
```
#### File: behave/steps/steps.py
```python
from behave import given, when, then
from sdklib.behave.steps import *
@given('The cleanId as query param')
def step_impl(context):
context.http_request_context.query_params = {"cleanId": context.clean_id}
@given('The analyzeId as query param')
def step_impl(context):
context.http_request_context.query_params = {"analyzeId": context.analyze_id}
@then('The response contains a cleanId')
def step_impl(context):
assert "Data" in context.api_response.json
assert "cleanId" in context.api_response.json["Data"]
context.clean_id = context.api_response.json["Data"]["cleanId"]
@then('The response contains an analyzeId')
def step_impl(context):
assert "Data" in context.api_response.json
assert "analyzeId" in context.api_response.json["Data"]
context.analyze_id = context.api_response.json["Data"]["analyzeId"]
@then('The response contains some data')
def step_impl(context):
assert "Data" in context.api_response.json
```
#### File: sdklib/tests/sample_sdk.py
```python
import time
from sdklib.http import HttpSdk
from sdklib.util.parser import parse_args, safe_add_end_slash
from sdklib.http.authorization import X11PathsAuthentication
from sdklib.shortcuts import cache
class SampleHttpSdk(HttpSdk):
"""
Sample Sdk for testing purposes.
"""
DEFAULT_HOST = "https://www.google.es"
LOGIN_URL_PATH = "/login/" # not exist
API_ITEMS_URL_PATH = "/items/"
API_FILE_URL_PATH = "/files/"
def get_items(self):
"""
Get all items.
:return: SdkResponse
"""
return self.get(self.API_ITEMS_URL_PATH)
@cache(maxsize=None)
def get_items_with_cache(self):
"""
Get all items.
:return: SdkResponse
"""
time.sleep(8)
return self.get(self.API_ITEMS_URL_PATH)
def get_items_with_empty_query_params_parameter(self):
"""
Get all items.
:return: SdkResponse
"""
params = {}
return self.get(self.API_ITEMS_URL_PATH, query_params=params)
def create_item(self, name, description=None, city=None):
"""
Create an item.
:return: SdkResponse
"""
params = parse_args(name=name, description=description)
return self.post(self.API_ITEMS_URL_PATH, body_params=params)
def update_item(self, item_id, name, description=None):
"""
Update an item.
:return: SdkResponse
"""
params = parse_args(name=name, description=description)
return self.put(self.API_ITEMS_URL_PATH + safe_add_end_slash(item_id), body_params=params)
def partial_update_item(self, item_id, name=None, description=None):
"""
Update partially an item.
:return: SdkResponse
"""
params = parse_args(name=name, description=description)
return self.patch(self.API_ITEMS_URL_PATH + safe_add_end_slash(item_id), body_params=params)
def delete_item(self, item_id):
"""
Delete an item.
:return: SdkResponse
"""
return self.delete(self.API_ITEMS_URL_PATH + safe_add_end_slash(item_id))
def create_file_11paths_auth(self, filename, file_stream, app_id, secret, description=None, name=None):
"""
Create a file using 11paths authentication.
:return: SdkResponse
"""
auth = (X11PathsAuthentication(app_id, secret),)
params = parse_args(name=name, description=description)
return self.post(self.API_FILE_URL_PATH, body_params=params, files={"file": (filename, file_stream)},
authentication_instances=auth, host="https://latch.elevenpaths.com")
```
#### File: sdklib/tests/test_sdklib.py
```python
import unittest
import pytest
import time
from sdklib.compat import bytes, is_py2, is_py3, exceptions
from sdklib.util.files import guess_filename_stream
from tests.sample_sdk import SampleHttpSdk
class TestSampleSdk(unittest.TestCase):
@classmethod
def setUpClass(cls):
# SampleHttpSdk.set_default_proxy("http://localhost:8080")
cls.api = SampleHttpSdk()
@classmethod
def tearDownClass(cls):
pass
def test_get_items(self):
response = self.api.get_items()
#self.assertEqual(response.status, 200)
#self.assertTrue(isinstance(response.data, list))
@pytest.mark.skipif(is_py2, reason="Cache not available in python 2.")
def test_get_items_with_cache_py3(self):
begin_timestamp = time.time()
response = self.api.get_items_with_cache()
elapsed_time = time.time() - begin_timestamp
self.assertEqual(404, response.status)
#self.assertTrue(isinstance(response.data, list))
self.assertGreater(elapsed_time, 8)
begin_timestamp = time.time()
response = self.api.get_items_with_cache()
elapsed_time = time.time() - begin_timestamp
self.assertEqual(404, response.status)
#self.assertTrue(isinstance(response.data, list))
self.assertLess(elapsed_time, 8)
@pytest.mark.skipif(is_py3, reason="Cache decorator raises exception in python 2.")
def test_get_items_with_cache_py2(self):
with self.assertRaises(exceptions.NotImplementedError):
self.api.get_items_with_cache()
def test_get_items_with_empty_query_params_parameter(self):
response = self.api.get_items_with_empty_query_params_parameter()
#self.assertEqual(response.status, 200)
#self.assertTrue(isinstance(response.data, list))
def test_create_item(self):
response = self.api.create_item("mi nombre", "algo")
self.assertEqual(404, response.status)
def test_update_item(self):
response = self.api.update_item(1, "mi nombre", "algo")
self.assertEqual(404, response.status)
def test_partial_update_item(self):
response = self.api.partial_update_item(1, "mi nombre")
self.assertEqual(404, response.status)
def test_delete_item(self):
response = self.api.delete_item(1)
self.assertEqual(404, response.status)
def test_login(self):
response = self.api.login(username="user", password="password")
self.assertEqual(404, response.status)
def test_create_file(self):
fname, fstream = guess_filename_stream("tests/resources/file.pdf")
response = self.api.create_file_11paths_auth(fname, fstream, "235hWLEETQ46KWLnAg48",
"lBc4BSeqtGkidJZXictc3yiHbKBS87hjE078rswJ")
self.assertEqual(404, response.status)
def test_get_json_response(self):
response = self.api.get_items()
#self.assertEqual(response.status, 200)
#self.assertTrue(isinstance(response.json, list))
def test_get_raw_response(self):
response = self.api.get_items()
self.assertEqual(404, response.status, 200)
self.assertTrue(isinstance(response.raw, bytes))
```
|
{
"source": "jfrancis71/GenBrix",
"score": 2
}
|
#### File: GenBrix/Experimental/PixelVAE.py
```python
from GenBrix import VariationalAutoencoder as vae
from GenBrix import NBModel as nb
from GenBrix import PixelCNN as cnn
class PixelVAE(nb.Model):
def __init__( self, distribution, image_dims, vae_model=None ):
super(PixelVAE, self).__init__()
self.cnn = cnn.ConditionalPixelCNN( distribution, image_dims )
if vae_model is None:
self.vae = vae.VariationalAutoencoder( self.cnn, image_dims )
else:
self.vae = vae.VariationalAutoencoder( self.cnn, image_dims, vae_model )
def loss( self, samples, logging_context=None, epoch=None ):
return self.vae.loss( samples, logging_context, epoch )
def sample( self, test_z=None ):
return self.vae.sample( test_z )
def get_trainable_variables( self ):
return self.vae.get_trainable_variables() + self.cnn.get_trainable_variables()
```
#### File: GenBrix/Experimental/TFPNBModel.py
```python
import tensorflow as tf
import tensorflow_probability as tfp
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
BernoulliDistribution = ( 1 , lambda t: tfd.Independent(
tfd.Bernoulli( logits=t[...,0] ),
reinterpreted_batch_ndims=3 ) )
NormalDistribution = ( 2 , lambda t: tfd.Independent(
tfd.Normal( loc=t[...,0], scale=.05 + tf.nn.softplus( t[...,1] ) ),
reinterpreted_batch_ndims=3 ) )
class GBNBModel:
def __init__( self, dims, distribution ):
self.model = tf.keras.Sequential([
tfp.layers.VariableLayer(shape=[ dims[0], dims[1], dims[2], distribution[0] ] ),
tfp.layers.DistributionLambda( distribution[1] )
])
self.model.compile( loss = negloglik, optimizer=tf.keras.optimizers.Adam(lr=0.03) )
def fit( self, examples ):
self.model.fit( examples, examples )
def log_prob( self, sample ):
return self.model(0).log_prob( sample )
def sample( self ):
return self.model(0).sample()
#mymodel = GBNBModel( [ 28, 28, 1 ], NormalDistribution )
#mymodel.fit( train_bin_images )
#mymodel.sample()[:,:,0]
```
#### File: GenBrix/Experimental/VariationalAutoencoder.py
```python
import tensorflow as tf
import numpy as np
from GenBrix import NBModel as nb
class VAEModel():
def generative_net( image_dims, no_of_parameters ):
return "unimplemented"
def inference_net():
return "unimplemented"
def sample_latent():
return "unimplemented"
class DefaultVAEModel( VAEModel ):
def __init__( self, latent=64 ):
super(VAEModel, self).__init__()
self.latent = latent
def generative_net( self, image_dims, no_distribution_parameters ):
return tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=500, kernel_size=1, padding='SAME', activation='relu' ),
tf.keras.layers.Conv2D(
filters=500, kernel_size=1, padding='SAME', activation='relu' ),
tf.keras.layers.Conv2D(
filters=image_dims[0]*image_dims[1]*image_dims[2]*no_distribution_parameters, kernel_size=1, padding='SAME' ),
tf.keras.layers.Reshape( target_shape=(image_dims[0],image_dims[1],image_dims[2]*no_distribution_parameters) )
])
def inference_net( self ):
return tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=500, kernel_size=1, padding='SAME', activation='relu' ),
tf.keras.layers.Conv2D(
filters=500, kernel_size=1, padding='SAME', activation='relu' ),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense( units=self.latent*2, activation=None),
tf.keras.layers.Reshape( target_shape=(1,1,self.latent*2))
])
def sample_latent( self ):
return np.random.normal( np.zeros( [ 1, 1, 1, self.latent ] ), np.ones( [ 1, 1, 1, self.latent ] ) ).astype( np.float32 )
#This is a convolution latent variable version of Tensorflow demo example
class ConvVAEModel( VAEModel ):
def __init__( self ):
super(VAEModel, self).__init__()
def inference_net( self ):
return tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, padding='SAME',strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, padding='SAME',strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense( units=50*2, activation=None),
tf.keras.layers.Reshape( target_shape=(1,1,50*2))
])
def generative_net( self, image_dims, no_distribution_parameters ):
return tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=(image_dims[0]//4)*(image_dims[1]//4)*32, kernel_size=1, padding='SAME',strides=(1, 1), activation='relu'),
tf.keras.layers.Reshape( target_shape=(image_dims[0]//4,image_dims[1]//4,32) ),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
tf.keras.layers.Conv2DTranspose(
filters=32, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
tf.keras.layers.Conv2DTranspose(
filters=image_dims[2]*no_distribution_parameters, kernel_size=3, strides=(1, 1), padding="SAME", activation=None)
])
def sample_latent( self ):
return np.random.normal( np.zeros( [ 1, 1, 1, 50 ] ), np.ones( [ 1, 1, 1, 50 ] ) ).astype( np.float32 )
#Model taken from https://github.com/yzwxx/vae-celebA/blob/master/model_vae.py
class YZVAEModel( VAEModel ):
def __init__( self ):
super(VAEModel, self).__init__()
self.trunk_inference = \
tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=64, kernel_size=(5,5), padding='SAME',strides=(2, 2), activation=None),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
filters=128, kernel_size=(5,5), padding='SAME',strides=(2, 2), activation=None),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
filters=256, kernel_size=(5,5), padding='SAME',strides=(2, 2), activation=None),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
filters=512, kernel_size=(5,5), padding='SAME',strides=(2, 2), activation=None),
tf.keras.layers.ReLU() ] )
inf_dense_mean = tf.keras.layers.Conv2D(
filters=512, kernel_size=(1,1), padding='SAME',strides=(1, 1), activation=None )
inf_dense_logvar = tf.keras.layers.Conv2D(
filters=512, kernel_size=(1,1), padding='SAME',strides=(1, 1), activation=None )
def inference_net( self ):
input = tf.keras.layers.Input( [ 64, 64, 3 ] )
net = self.trunk_inference( input )
reshaped = tf.keras.layers.Reshape( target_shape = ( 1, 1, 4*4*512 ) )( net )
mean = tf.keras.layers.Conv2D(
filters=128, kernel_size=(1,1), padding='SAME',strides=(1, 1), activation=None )( reshaped )
logvar = tf.keras.layers.Conv2D(
filters=128, kernel_size=(1,1), padding='SAME',strides=(1, 1), activation=None )( reshaped )
out = tf.stack( [ mean, logvar ], axis=4 )
reshaped1 = tf.keras.layers.Reshape( target_shape = ( 1, 1, 128*2 ) )( out )
model = tf.keras.Model( inputs = input, outputs = reshaped1 )
return model
def generative_net( self, image_dims, no_distribution_parameters ):
return tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=64*4*8*8, kernel_size=(1,1), padding='SAME',strides=(1, 1), activation=None),
tf.keras.layers.Reshape( [ 8, 8, 256 ] ),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=64*4, kernel_size=(5,5), strides=(2, 2), padding="SAME", activation=None),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=64*2, kernel_size=(5,5), strides=(2, 2), padding="SAME", activation=None),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=(5,5), strides=(2, 2), padding="SAME", activation=None),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2DTranspose(
filters=3*no_distribution_parameters, kernel_size=(5,5), strides=(1, 1), padding="SAME", activation=None) ] )
def sample_latent( self ):
return np.random.normal( np.zeros( [ 1, 1, 1, 128 ] ), np.ones( [ 1, 1, 1, 128 ] ) ).astype( np.float32 )
class VariationalAutoencoder(nb.Model):
def __init__( self, distribution, image_dims, vae_model=DefaultVAEModel() ):
super(VariationalAutoencoder, self).__init__()
self.xinference_net = vae_model.inference_net()
self.xgenerative_net = vae_model.generative_net( image_dims, distribution.no_of_parameters() )
self.distribution = distribution
self.vae_model = vae_model
self.latent_distribution = nb.RealGauss()
def kl_loss( self, sample_z, z_params ):
kl_loss = 0.5 * ( -z_params[:,:,:,:,1] + tf.exp( z_params[:,:,:,:,1] ) + z_params[:,:,:,:,0]*z_params[:,:,:,:,0] - 1 )
return tf.reduce_mean( tf.reduce_sum( kl_loss, axis = [ 1, 2, 3 ] ) )
def loss( self, samples, logging_context=None, epoch=None ):
inf = self.xinference_net( samples )
inf_params = nb.reshape_channel_to_parameters( inf, 2 )
sample_z = self.latent_distribution.sample( inf )
gen_params = self.xgenerative_net( sample_z )
reconstruction_loss = self.distribution.loss( gen_params, samples )
kl_loss = self.kl_loss( sample_z, inf_params )
loss = tf.reduce_mean( reconstruction_loss + kl_loss )
if logging_context is not None:
tf.summary.scalar( logging_context+"_kl_loss", kl_loss, step=epoch )
tf.summary.scalar( logging_context+"_reconstruction_loss", reconstruction_loss, step=epoch )
return loss
def sample( self, test_z=None ):
if test_z is None:
test_z = self.vae_model.sample_latent()
return self.distribution.sample( self.xgenerative_net( test_z ) )
def get_trainable_variables( self ):
return self.xinference_net.trainable_variables + self.xgenerative_net.trainable_variables
```
|
{
"source": "jfrancis71/ML-Sources",
"score": 3
}
|
#### File: jfrancis71/ML-Sources/jwfdump.py
```python
import os
import sys
import argparse
import numpy as np
from PIL import Image, ImageDraw
# Make sure that caffe is on the python path:
caffe_root = './'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))
import caffe
import h5py
from google.protobuf import text_format
from caffe.proto import caffe_pb2
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
class CaffeDetection:
def __init__(self, gpu_id, model_def, model_weights, image_resize, labelmap_file):
# caffe.set_device(gpu_id)
# caffe.set_mode_gpu()
self.image_resize = image_resize
# Load the net in the test phase for inference, and configure input preprocessing.
self.net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
# the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_raw_scale('data', 255)
# the reference model has channels in BGR order instead of RGB
self.transformer.set_channel_swap('data', (2, 1, 0))
# load PASCAL VOC labels
file = open(labelmap_file, 'r')
self.labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), self.labelmap)
print( self.net.params['conv1_1'][0].data )
file = h5py.File( '/Users/julian/CZModels/SSD300VGGReference20180920.hdf', 'w')
file.create_dataset("/conv1_1W", data = self.net.params['conv1_1'][0].data )
file.create_dataset("/conv1_2W", data = self.net.params['conv1_2'][0].data )
file.create_dataset("/conv1_1B", data = self.net.params['conv1_1'][1].data )
file.create_dataset("/conv1_2B", data = self.net.params['conv1_2'][1].data )
file.create_dataset("/conv2_1W", data = self.net.params['conv2_1'][0].data )
file.create_dataset("/conv2_2W", data = self.net.params['conv2_2'][0].data )
file.create_dataset("/conv2_1B", data = self.net.params['conv2_1'][1].data )
file.create_dataset("/conv2_2B", data = self.net.params['conv2_2'][1].data )
file.create_dataset("/conv3_1W", data = self.net.params['conv3_1'][0].data )
file.create_dataset("/conv3_2W", data = self.net.params['conv3_2'][0].data )
file.create_dataset("/conv3_3W", data = self.net.params['conv3_3'][0].data )
file.create_dataset("/conv3_1B", data = self.net.params['conv3_1'][1].data )
file.create_dataset("/conv3_2B", data = self.net.params['conv3_2'][1].data )
file.create_dataset("/conv3_3B", data = self.net.params['conv3_3'][1].data )
file.create_dataset("/conv4_1W", data = self.net.params['conv4_1'][0].data )
file.create_dataset("/conv4_2W", data = self.net.params['conv4_2'][0].data )
file.create_dataset("/conv4_3W", data = self.net.params['conv4_3'][0].data )
file.create_dataset("/conv4_1B", data = self.net.params['conv4_1'][1].data )
file.create_dataset("/conv4_2B", data = self.net.params['conv4_2'][1].data )
file.create_dataset("/conv4_3B", data = self.net.params['conv4_3'][1].data )
file.create_dataset("/conv5_1W", data = self.net.params['conv5_1'][0].data )
file.create_dataset("/conv5_2W", data = self.net.params['conv5_2'][0].data )
file.create_dataset("/conv5_3W", data = self.net.params['conv5_3'][0].data )
file.create_dataset("/conv5_1B", data = self.net.params['conv5_1'][1].data )
file.create_dataset("/conv5_2B", data = self.net.params['conv5_2'][1].data )
file.create_dataset("/conv5_3B", data = self.net.params['conv5_3'][1].data )
file.create_dataset("/conv6_W", data = self.net.params['fc6'][0].data )
file.create_dataset("/conv6_B", data = self.net.params['fc6'][1].data )
file.create_dataset("/conv7_W", data = self.net.params['fc7'][0].data )
file.create_dataset("/conv7_B", data = self.net.params['fc7'][1].data )
file.create_dataset("/conv8_1W", data = self.net.params['conv6_1'][0].data )
file.create_dataset("/conv8_2W", data = self.net.params['conv6_2'][0].data )
file.create_dataset("/conv8_1B", data = self.net.params['conv6_1'][1].data )
file.create_dataset("/conv8_2B", data = self.net.params['conv6_2'][1].data )
file.create_dataset("/conv9_1W", data = self.net.params['conv7_1'][0].data )
file.create_dataset("/conv9_2W", data = self.net.params['conv7_2'][0].data )
file.create_dataset("/conv9_1B", data = self.net.params['conv7_1'][1].data )
file.create_dataset("/conv9_2B", data = self.net.params['conv7_2'][1].data )
file.create_dataset("/conv10_1W", data = self.net.params['conv8_1'][0].data )
file.create_dataset("/conv10_2W", data = self.net.params['conv8_2'][0].data )
file.create_dataset("/conv10_1B", data = self.net.params['conv8_1'][1].data )
file.create_dataset("/conv10_2B", data = self.net.params['conv8_2'][1].data )
file.create_dataset("/conv11_1W", data = self.net.params['conv9_1'][0].data )
file.create_dataset("/conv11_2W", data = self.net.params['conv9_2'][0].data )
file.create_dataset("/conv11_1B", data = self.net.params['conv9_1'][1].data )
file.create_dataset("/conv11_2B", data = self.net.params['conv9_2'][1].data )
file.create_dataset("/block4_classes_W", data = self.net.params['conv4_3_norm_mbox_conf'][0].data )
file.create_dataset("/block4_classes_B", data = self.net.params['conv4_3_norm_mbox_conf'][1].data )
file.create_dataset("/block4_loc_W", data = self.net.params['conv4_3_norm_mbox_loc'][0].data )
file.create_dataset("/block4_loc_B", data = self.net.params['conv4_3_norm_mbox_loc'][1].data )
file.create_dataset("/block7_classes_W", data = self.net.params['fc7_mbox_conf'][0].data )
file.create_dataset("/block7_classes_B", data = self.net.params['fc7_mbox_conf'][1].data )
file.create_dataset("/block7_loc_W", data = self.net.params['fc7_mbox_loc'][0].data )
file.create_dataset("/block7_loc_B", data = self.net.params['fc7_mbox_loc'][1].data )
file.create_dataset("/block8_classes_W", data = self.net.params['conv6_2_mbox_conf'][0].data )
file.create_dataset("/block8_classes_B", data = self.net.params['conv6_2_mbox_conf'][1].data )
file.create_dataset("/block8_loc_W", data = self.net.params['conv6_2_mbox_loc'][0].data )
file.create_dataset("/block8_loc_B", data = self.net.params['conv6_2_mbox_loc'][1].data )
file.create_dataset("/block9_classes_W", data = self.net.params['conv7_2_mbox_conf'][0].data )
file.create_dataset("/block9_classes_B", data = self.net.params['conv7_2_mbox_conf'][1].data )
file.create_dataset("/block9_loc_W", data = self.net.params['conv7_2_mbox_loc'][0].data )
file.create_dataset("/block9_loc_B", data = self.net.params['conv7_2_mbox_loc'][1].data )
file.create_dataset("/block10_classes_W", data = self.net.params['conv8_2_mbox_conf'][0].data )
file.create_dataset("/block10_classes_B", data = self.net.params['conv8_2_mbox_conf'][1].data )
file.create_dataset("/block10_loc_W", data = self.net.params['conv8_2_mbox_loc'][0].data )
file.create_dataset("/block10_loc_B", data = self.net.params['conv8_2_mbox_loc'][1].data )
file.create_dataset("/block11_classes_W", data = self.net.params['conv9_2_mbox_conf'][0].data )
file.create_dataset("/block11_classes_B", data = self.net.params['conv9_2_mbox_conf'][1].data )
file.create_dataset("/block11_loc_W", data = self.net.params['conv9_2_mbox_loc'][0].data )
file.create_dataset("/block11_loc_B", data = self.net.params['conv9_2_mbox_loc'][1].data )
file.create_dataset("/conv4_3_norm", data = self.net.params['conv4_3_norm'][0].data )
def main(args):
'''main '''
detection = CaffeDetection(args.gpu_id,
args.model_def, args.model_weights,
args.image_resize, args.labelmap_file)
def parse_args():
'''parse args'''
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
parser.add_argument('--labelmap_file',
default='data/VOC0712/labelmap_voc.prototxt')
parser.add_argument('--model_def',
default='models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt')
parser.add_argument('--image_resize', default=300, type=int)
parser.add_argument('--model_weights',
default='models/VGGNet/VOC0712/SSD_300x300/'
'VGG_VOC0712_SSD_300x300_iter_120000.caffemodel')
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())
```
|
{
"source": "jfrancis71/PixelCNN",
"score": 3
}
|
#### File: jfrancis71/PixelCNN/PixelCNN.py
```python
import torch
import torch.nn as nn
class PixelCNNMaskConv2d(nn.Conv2d):
def __init__(self, mask_type, kernel_size, num_input_channels, in_data_channel_width, out_data_channel_width):
in_channels = num_input_channels*in_data_channel_width
out_channels = num_input_channels*out_data_channel_width
middle = kernel_size//2
super(PixelCNNMaskConv2d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, padding=middle)
self.mask = torch.nn.Parameter(torch.zeros((out_channels,in_channels,kernel_size,kernel_size)), requires_grad=False)
self.mask[:,:,:middle,:] = 1.0
self.mask[:,:,middle,:middle] = 1.0
if mask_type == "A":
for c in range(1,num_input_channels):
#If first layer then the data channel width is 1
self.mask[c*out_data_channel_width:(c+1)*out_data_channel_width,:c,middle,middle] = 1.0
else:
for c in range(num_input_channels):
self.mask[c*out_data_channel_width:(c+1)*out_data_channel_width,:(c+1)*in_data_channel_width,middle,middle] = 1.0
def forward(self, x):
self.weight.data *= self.mask
return super().forward(x)
class ResidualBlock(nn.Module):
def __init__(self, num_input_channels, in_data_channel_width, num_h=None):
super(ResidualBlock, self).__init__()
self.layer1 = PixelCNNMaskConv2d("B", kernel_size=1, num_input_channels=num_input_channels, in_data_channel_width=in_data_channel_width, out_data_channel_width=in_data_channel_width//2)
self.layer2 = PixelCNNMaskConv2d("B", kernel_size=3, num_input_channels=num_input_channels, in_data_channel_width=in_data_channel_width//2, out_data_channel_width=in_data_channel_width//2)
self.layer3 = PixelCNNMaskConv2d("B", kernel_size=1, num_input_channels=num_input_channels, in_data_channel_width=in_data_channel_width//2, out_data_channel_width=in_data_channel_width)
if num_h is not None:
self.conditional_prj = nn.Linear(num_h, num_input_channels*in_data_channel_width//2, bias=False)
else:
self.conditional_prj = None
def forward(self, x, h=None):
r = self.layer1(x)
r = nn.ReLU()(r)
r = self.layer2(r)
if h is not None:
prj = self.conditional_prj(h)
add_r = r.permute((2,3,0,1)) + prj
r = add_r.permute((2,3,0,1))
r = nn.ReLU()(r)
r = self.layer3(r)
r = nn.ReLU()(r)
return x+r
class PixelCNN(nn.Module):
def __init__(self, num_input_channels, num_distribution_params, num_h=None, num_blocks=15, data_channel_width=256):
super(PixelCNN, self).__init__()
self.input_layer = PixelCNNMaskConv2d("A", kernel_size=7, num_input_channels=num_input_channels, in_data_channel_width=1, out_data_channel_width=data_channel_width)
blocks = [ ResidualBlock(num_input_channels, data_channel_width, num_h) for _ in range(num_blocks) ]
self.blocks = nn.ModuleList(blocks)
self.layer1 = PixelCNNMaskConv2d("B", kernel_size=1, num_input_channels=num_input_channels, in_data_channel_width=data_channel_width, out_data_channel_width=data_channel_width//2)
self.layer2 = PixelCNNMaskConv2d("B", kernel_size=1, num_input_channels=num_input_channels, in_data_channel_width=data_channel_width//2, out_data_channel_width=num_distribution_params)
def forward(self, x, h=None):
x = self.input_layer(x)
for r in self.blocks:
x = r(x, h)
x = nn.ReLU()(x)
x = self.layer1(x)
x = nn.ReLU()(x)
x = self.layer2(x)
return x
```
|
{
"source": "jfrancis71/PyGenBrix",
"score": 2
}
|
#### File: PyGenBrix/dist_layers/glow.py
```python
import torch
import torch.nn as nn
from glow_pytorch import model as glow
import PyGenBrix.dist_layers.common_layers as dl
class _GlowDistribution(nn.Module):
def __init__(self, n_flow=32, n_block=4, num_conditional=None):
super(_GlowDistribution, self).__init__()
self.glow_net = glow.Glow(
3, n_flow, n_block, affine=False, conv_lu=True, num_conditional=num_conditional)
self.z_shapes = self.calc_z_shapes(3, 64, n_flow, n_block)
def log_prob(self, samples, conditional=None):
samples = samples - 0.5
log_p, log_dets, _ = self.glow_net(samples + torch.rand_like(samples)/32.0, conditional=conditional )
log_det = log_dets.mean()
log = log_p + log_det
nats_per_dim = log / ( 3 * 64 * 64 )
return {"log_prob": nats_per_dim}
def sample(self, conditional=None, temperature=0.7):
z_sample = []
for z in self.z_shapes:
z_new = torch.randn(1, z[0], z[1], z[2]) * temperature
z_sample.append(z_new.to(next(self.glow_net.parameters()).device))
return self.glow_net.reverse(z_sample, conditional=conditional) + 0.5
def calc_z_shapes(self, n_channel, input_size, n_flow, n_block):
z_shapes = [ (n_channel*2**block, input_size//2**block, input_size//2**block) for block in range(1,n_block) ]
z_shapes.append( (n_channel*2**(n_block+1), input_size//2**n_block, input_size//2**n_block) )
return z_shapes
class GlowDistribution(dl.Distribution):
def __init__(self):
super(GlowDistribution, self).__init__()
self.distribution = _GlowDistribution()
class GlowLayer(dl.Layer):
def __init__(self, num_conditional):
super(GlowLayer, self).__init__(_GlowDistribution(num_conditional=num_conditional))
```
#### File: PyGenBrix/models/binary_layer.py
```python
import torch.nn as nn
import torch.nn.functional as F
#Idea comes from Dreamer v2
#https://arxiv.org/pdf/2010.02193.pdf, page 3
#They used stochastic neuron, Following code is deterministic
class BinaryLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
#First return is non differentiable, second is using stg
bin_latent_code = (x>0.0)*1.0
probs = F.sigmoid(x)
stg_latent_code = bin_latent_code + probs - probs.detach()
return bin_latent_code, stg_latent_code
```
#### File: image32/3bit/GenderPixelCNN.py
```python
import argparse
import torch
import torchvision
import pytorch_lightning as pl
import PyGenBrix.dist_layers.pixelcnn as pcnn
import PyGenBrix.Train as Train
import PyGenBrix.dist_layers.common_layers as dl
import PyGenBrix.dist_layers.spatial_independent as sp
ap = argparse.ArgumentParser(description="GenderPixelCNN")
ap.add_argument("--tensorboard_log")
ap.add_argument("--max_epochs", default=10, type=int)
ns = ap.parse_args()
celeba_dataset = torchvision.datasets.CelebA(
root="/home/julian/ImageDataSets",
transform = torchvision.transforms.Compose( [
torchvision.transforms.Pad((-15, -40,-15-1, -30-1)),
torchvision.transforms.Resize(32),
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: dl.quantize(x,8)) ] ) )
class GenderTrainer(Train.LightningTrainer):
def __init__(self, model, dataset, add_graph=False, learning_rate=.001, batch_size=32):
super(GenderTrainer, self).__init__( model, dataset, add_graph, learning_rate, batch_size)
self.attr_index = celeba_dataset.attr_names.index('Male')
def get_distribution(self, y):
conditional = y[:,self.attr_index:self.attr_index+1].type(torch.float)
conditional_tensor = torch.reshape(conditional, (y.shape[0], 1, 1, 1))
conditional_tensor = torch.nn.Upsample(size=(32,32))(conditional_tensor)
return self.model(conditional_tensor)
class GenderPixelCNNCallback(pl.Callback):
def __init__(self):
super(GenderPixelCNNCallback, self).__init__()
def on_validation_epoch_end(self, trainer, pl_module):
target = torch.arange(start=0,end=20,device=pl_module.device).type(torch.long)
conditional = (target < 10).float().unsqueeze(1)
conditional_tensor = torch.reshape(conditional, (20, 1, 1, 1))
conditional_tensor = torch.nn.Upsample(size=(32,32))(conditional_tensor)
imglist = [pl_module.model(conditional_tensor[c:c+1]).sample() for c in range(20)]
imglist = torch.clip(torch.cat(imglist, axis=0),0.0,1.0)
trainer.logger.experiment.add_image("epoch_image", torchvision.utils.make_grid(imglist, padding=10, nrow=5 ), trainer.current_epoch, dataformats="CHW")
mymodel = pcnn.PixelCNNLayer(
event_shape=[3,32,32],
output_distribution_layer=sp.SpatialIndependentDistributionLayer([3, 32, 32], dl.IndependentQuantizedLayer(num_buckets=8), num_params=30),
num_conditional=1)
pl.Trainer(fast_dev_run = False, gpus=1, accumulate_grad_batches = 16, max_epochs=ns.max_epochs, default_root_dir=ns.tensorboard_log, callbacks=[
GenderPixelCNNCallback()]).fit(GenderTrainer( mymodel, celeba_dataset, learning_rate = .001, batch_size = 4 ) )
```
#### File: PyGenBrix/models/vdvae_train.py
```python
import argparse
import numpy as np
import torch
import torch.nn as nn
import torchvision
import pytorch_lightning as pl
import vdvae.train as vdvae_train
import vdvae.hps as hps
import vdvae.vae as vdvae
import pygenbrix_layer as pygl
import PyGenBrix.dist_layers.common_layers as dl
import PyGenBrix.dist_layers.spatial_independent as sp
class VDVAEModel(nn.Module):
def __init__(self, vae, ema_vae):
super(VDVAEModel, self).__init__()
self.vae, self.ema_vae = vae, ema_vae
class VDVAETrainer(pl.LightningModule):
def __init__(self, model, dataset, batch_size=8, learning_rate=0.0002):
super(VDVAETrainer, self).__init__()
self.model = model
self.dataset = dataset
self.train_set, self.val_set = self.get_datasets()
self.automatic_optimization = False
self.batch_size = batch_size
self.learning_rate = learning_rate
def get_datasets(self):
dataset_size = len(self.dataset)
training_size = np.round(dataset_size*0.9).astype(int)
train_set, val_set = torch.utils.data.random_split(
self.dataset, [training_size, dataset_size-training_size],
generator=torch.Generator().manual_seed(42) )
return (train_set, val_set)
def training_step(self, batch, batch_indx):
x, y = batch
x = x.permute(0, 2, 3, 1)
stats = vdvae_train.training_step(h, (x-.5)*4, x, self.model.vae, self.model.ema_vae, self.optimizers(), -1)
self.log('log_prob', -stats["elbo"], on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log('kl', stats["rate"], on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log('recon_error', stats["distortion"], on_step=True, on_epoch=True, prog_bar=True, logger=True)
def validation_step(self, batch, batch_indx):
x, y = batch
x = x.permute(0, 2, 3, 1)
stats = vdvae_train.eval_step((x-.5)*4, x, self.model.ema_vae)
self.log("validation_log_prob", -stats["elbo"], on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log("validation_kl", stats["rate"], on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log("validation_recon_error", stats["distortion"], on_step=False, on_epoch=True, prog_bar=True, logger=True)
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr = self.learning_rate)
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=4, drop_last=True, pin_memory=True)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_set, batch_size = self.batch_size, num_workers=4, drop_last=True, pin_memory=True)
class LogSamplesVAECallback(pl.Callback):
def __init__(self, step_freq):
super(LogSamplesVAECallback, self).__init__()
self.step_freq = step_freq
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if (pl_module.global_step % self.step_freq == 0) and (batch_idx % trainer.accumulate_grad_batches == 0):
samples = pl_module.model.ema_vae.forward_uncond_samples(8, t=1.0)
samples = torch.tensor(samples)
samples_grid = torchvision.utils.make_grid(samples, padding=10, nrow=4)
pl_module.logger.experiment.add_image("train_sample", samples_grid, pl_module.global_step, dataformats="CHW")
ap = argparse.ArgumentParser(description="VDVAE")
ap.add_argument("--tensorboard_log")
ap.add_argument("--max_epochs", default=10, type=int)
ap.add_argument("--lr", default=.0002, type=float)
ap.add_argument("--ema_rate", default=.9999, type=float)
ap.add_argument("--fast_dev_run", action="store_true")
ap.add_argument("--dataset")
ap.add_argument("--rv_distribution")
ns = ap.parse_args()
h = hps.Hyperparams()
h.width = 384
h.zdim = 16
h.dec_blocks = "1x1,4m1,4x2,8m4,8x5,16m8,16x10,32m16,32x21"
h.enc_blocks = "32x11,32d2,16x6,16d2,8x6,8d2,4x3,4d4,1x3"
h.image_size = 32
h.ema_rate = ns.ema_rate
h.custom_width_str = ""
h.no_bias_above = 64
h.bottleneck_multiple = 0.25
h.num_mixtures = 10
h.grad_clip = 200.0
h.skip_threshold = 400.0
if ns.dataset == "cifar10":
dataset = torchvision.datasets.CIFAR10(root='/home/julian/ImageDataSets/CIFAR10', train=True,
download=False, transform=torchvision.transforms.ToTensor())
h.image_channels = 3
elif ns.dataset == "celeba32":
dataset = torchvision.datasets.ImageFolder(root="/home/julian/ImageDataSets/celeba/",
transform = torchvision.transforms.Compose([
torchvision.transforms.Pad((-15, -40,-15-1, -30-1)),
torchvision.transforms.Resize(32), torchvision.transforms.ToTensor(),
]))
h.image_channels = 3
elif ns.dataset == "mnist32":
dataset = torchvision.datasets.MNIST('/home/julian/ImageDataSets/MNIST',
train=True, download=False,
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(32),
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: 1.0-((x<0.5)*1.0))]))
h.image_channels = 1
h.image_size = 32
else:
print("Dataset not recognized.")
exit(1)
if ns.rv_distribution == "bernoulli":
rv_distribution = dl.IndependentBernoulliLayer()
elif ns.rv_distribution == "q3":
rv_distribution = dl.IndependentQuantizedLayer(num_buckets = 8)
elif ns.rv_distribution == "spiq3":
rv_distribution = sp.SpatialIndependentDistributionLayer([h.image_channels, h.image_width, image_width], dl.IndependentQuantizedLayer(num_buckets = 8), num_params=30)
elif ns.rv_distribution == "PixelCNNDiscMixDistribution":
rv_distribution = pixel_cnn.PixelCNNDiscreteMixLayer()
elif ns.rv_distribution == "VDVAEDiscMixDistribution":
rv_distribution = pygl.VDVAEDiscMixtureLayer(h)
else:
print("rv distribution not recognized")
quit()
vae = vdvae.VAE(h, rv_distribution)
ema_vae = vdvae.VAE(h, rv_distribution)
ema_vae.requires_grad = False
model = VDVAEModel(vae, ema_vae)
trainer = VDVAETrainer(model, dataset, batch_size=8, learning_rate=ns.lr)
pl.Trainer(fast_dev_run = ns.fast_dev_run, gpus=1, accumulate_grad_batches = 1, max_epochs=ns.max_epochs, default_root_dir=ns.tensorboard_log,
callbacks=[LogSamplesVAECallback(1000)]).fit(trainer)
```
|
{
"source": "jfrancis71/pytorch-lightning-bolts",
"score": 2
}
|
#### File: self_supervised/amdim/networks.py
```python
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class AMDIMEncoder(nn.Module):
def __init__(self, dummy_batch, num_channels=3, encoder_feature_dim=64, embedding_fx_dim=512,
conv_block_depth=3, encoder_size=32, use_bn=False):
super().__init__()
# NDF = encoder hidden feat size
# RKHS = output dim
n_depth = conv_block_depth
ndf = encoder_feature_dim
self.ndf = encoder_feature_dim
n_rkhs = embedding_fx_dim
self.n_rkhs = embedding_fx_dim
self.use_bn = use_bn
self.dim2layer = None
self.encoder_size = encoder_size
# encoding block for local features
print(f'Using a {encoder_size}x{encoder_size} encoder')
if encoder_size == 32:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 3, 1, 0, False),
ConvResNxN(ndf, ndf, 1, 1, 0, use_bn),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 4, True, use_bn),
ConvResBlock(ndf * 4, ndf * 4, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 4, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 4, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
elif encoder_size == 64:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 3, 1, 0, False),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 8, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 8, True, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 8, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
elif encoder_size == 128:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 5, 2, 2, False, pad_mode='reflect'),
Conv3x3(ndf, ndf, 3, 1, 0, False),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 8, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 8, True, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 8, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
else:
raise RuntimeError(f"Could not build encoder. Encoder size {encoder_size} is not supported")
self._config_modules(
dummy_batch,
output_widths=[1, 5, 7],
n_rkhs=n_rkhs,
use_bn=use_bn
)
def init_weights(self, init_scale=1.):
"""
Run custom weight init for modules...
"""
for layer in self.layer_list:
if isinstance(layer, (ConvResNxN, ConvResBlock)):
layer.init_weights(init_scale)
for layer in self.modules():
if isinstance(layer, (ConvResNxN, ConvResBlock)):
layer.init_weights(init_scale)
if isinstance(layer, FakeRKHSConvNet):
layer.init_weights(init_scale)
def _config_modules(self, x, output_widths, n_rkhs, use_bn):
"""
Configure the modules for extracting fake rkhs embeddings for infomax.
"""
# get activations from each block to see output dims
enc_acts = self._forward_acts(x)
# out dimension to layer index
# dim = number of output feature vectors
self.dim2layer = {}
# pull out layer indexes for the requested output_widths
for layer_i, conv_out in enumerate(enc_acts):
for output_width in output_widths:
b, c, w, h = conv_out.size()
if w == output_width:
self.dim2layer[w] = layer_i
# get projected activation sizes at different layers
# ndf_1 = enc_acts[self.dim2layer[1]].size(1)
ndf_5 = enc_acts[self.dim2layer[5]].size(1)
ndf_7 = enc_acts[self.dim2layer[7]].size(1)
# configure modules for fake rkhs embeddings
self.rkhs_block_5 = FakeRKHSConvNet(ndf_5, n_rkhs, use_bn)
self.rkhs_block_7 = FakeRKHSConvNet(ndf_7, n_rkhs, use_bn)
def _forward_acts(self, x):
"""
Return activations from all layers.
"""
# run forward pass through all layers
layer_acts = [x]
for _, layer in enumerate(self.layer_list):
layer_in = layer_acts[-1]
layer_out = layer(layer_in)
layer_acts.append(layer_out)
# remove input from the returned list of activations
return_acts = layer_acts[1:]
return return_acts
def forward(self, x):
# compute activations in all layers for x
activations = self._forward_acts(x)
# gather rkhs embeddings from certain layers
# last feature map with (b, d, 1, 1) (ie: last network out)
r1 = activations[self.dim2layer[1]]
# last feature map with (b, d, 5, 5)
r5 = activations[self.dim2layer[5]]
r5 = self.rkhs_block_5(r5)
# last feature map with (b, d, 7, 7)
r7 = activations[self.dim2layer[7]]
r7 = self.rkhs_block_7(r7)
return r1, r5, r7
class Conv3x3(nn.Module):
def __init__(self, n_in, n_out, n_kern, n_stride, n_pad,
use_bn=True, pad_mode='constant'):
super(Conv3x3, self).__init__()
assert (pad_mode in ['constant', 'reflect'])
self.n_pad = (n_pad, n_pad, n_pad, n_pad)
self.pad_mode = pad_mode
self.conv = nn.Conv2d(n_in, n_out, n_kern, n_stride, 0,
bias=(not use_bn))
self.relu = nn.ReLU(inplace=True)
self.bn = MaybeBatchNorm2d(n_out, True, use_bn)
def forward(self, x):
if self.n_pad[0] > 0:
# maybe pad the input
x = F.pad(x, self.n_pad, mode=self.pad_mode)
# always apply conv
x = self.conv(x)
# maybe apply batchnorm
x = self.bn(x)
# always apply relu
out = self.relu(x)
return out
class ConvResBlock(nn.Module):
def __init__(self, n_in, n_out, width, stride, pad, depth, use_bn):
super(ConvResBlock, self).__init__()
layer_list = [ConvResNxN(n_in, n_out, width, stride, pad, use_bn)]
for i in range(depth - 1):
layer_list.append(ConvResNxN(n_out, n_out, 1, 1, 0, use_bn))
self.layer_list = nn.Sequential(*layer_list)
def init_weights(self, init_scale=1.):
"""
Do a fixup-ish init for each ConvResNxN in this block.
"""
for m in self.layer_list:
m.init_weights(init_scale)
def forward(self, x):
# run forward pass through the list of ConvResNxN layers
x_out = self.layer_list(x)
return x_out
class ConvResNxN(nn.Module):
def __init__(self, n_in, n_out, width, stride, pad, use_bn=False):
super(ConvResNxN, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.width = width
self.stride = stride
self.pad = pad
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(n_in, n_out, width, stride, pad, bias=False)
self.conv2 = nn.Conv2d(n_out, n_out, 1, 1, 0, bias=False)
self.n_grow = n_out - n_in
if self.n_grow < 0:
# use self.conv3 to downsample feature dim
self.conv3 = nn.Conv2d(n_in, n_out, width, stride, pad, bias=True)
else:
# self.conv3 is not used when n_out >= n_in
self.conv3 = None
self.bn1 = MaybeBatchNorm2d(n_out, True, use_bn)
def init_weights(self, init_scale=1.):
# initialize first conv in res branch
# -- rescale the default init for nn.Conv2d layers
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
# initialize second conv in res branch
# -- set to 0, like fixup/zero init
nn.init.constant_(self.conv2.weight, 0.)
def forward(self, x):
h1 = self.bn1(self.conv1(x))
h2 = self.conv2(self.relu2(h1))
if self.n_out < self.n_in:
h3 = self.conv3(x)
elif self.n_in == self.n_out:
h3 = F.avg_pool2d(x, self.width, self.stride, self.pad)
else:
h3_pool = F.avg_pool2d(x, self.width, self.stride, self.pad)
h3 = F.pad(h3_pool, (0, 0, 0, 0, 0, self.n_grow))
h23 = h2 + h3
return h23
class MaybeBatchNorm2d(nn.Module):
def __init__(self, n_ftr, affine, use_bn):
super(MaybeBatchNorm2d, self).__init__()
self.bn = nn.BatchNorm2d(n_ftr, affine=affine)
self.use_bn = use_bn
def forward(self, x):
if self.use_bn:
x = self.bn(x)
return x
class NopNet(nn.Module):
def __init__(self, norm_dim=None):
super(NopNet, self).__init__()
self.norm_dim = norm_dim
def forward(self, x):
if self.norm_dim is not None:
x_norms = torch.sum(x ** 2., dim=self.norm_dim, keepdim=True)
x_norms = torch.sqrt(x_norms + 1e-6)
x = x / x_norms
return x
class FakeRKHSConvNet(nn.Module):
def __init__(self, n_input, n_output, use_bn=False):
super(FakeRKHSConvNet, self).__init__()
self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn_out = MaybeBatchNorm2d(n_output, True, True)
self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1,
stride=1, padding=0, bias=True)
# when possible, initialize shortcut to be like identity
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.)
def init_weights(self, init_scale=1.):
# initialize first conv in res branch
# -- rescale the default init for nn.Conv2d layers
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
# initialize second conv in res branch
# -- set to 0, like fixup/zero init
nn.init.constant_(self.conv2.weight, 0.)
def forward(self, x):
h_res = self.conv2(self.relu1(self.bn1(self.conv1(x))))
h = self.bn_out(h_res + self.shortcut(x))
return h
```
#### File: models/rl/test_scripts.py
```python
from unittest import mock
import pytest
@pytest.mark.parametrize('cli_args', ['--env PongNoFrameskip-v4'
' --max_steps 10'
' --fast_dev_run 1'
' --warm_start_size 10'
' --n_steps 2'
' --batch_size 10'])
def test_cli_run_rl_dqn(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.dqn_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', ['--env PongNoFrameskip-v4'
' --max_steps 10'
' --fast_dev_run 1'
' --warm_start_size 10'
' --n_steps 2'
' --batch_size 10'])
def test_cli_run_rl_double_dqn(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.double_dqn_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', ['--env PongNoFrameskip-v4'
' --max_steps 10'
' --fast_dev_run 1'
' --warm_start_size 10'
' --n_steps 2'
' --batch_size 10'])
def test_cli_run_rl_dueling_dqn(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.dueling_dqn_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', ['--env PongNoFrameskip-v4'
' --max_steps 10'
' --fast_dev_run 1'
' --warm_start_size 10'
' --n_steps 2'
' --batch_size 10'])
def test_cli_run_rl_noisy_dqn(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.noisy_dqn_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', ['--env PongNoFrameskip-v4'
' --max_steps 10'
' --fast_dev_run 1'
' --warm_start_size 10'
' --n_steps 2'
' --batch_size 10'])
def test_cli_run_rl_per_dqn(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.per_dqn_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', ['--env CartPole-v0'
' --max_steps 10'
' --fast_dev_run 1'
' --batch_size 10'])
def test_cli_run_rl_reinforce(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.reinforce_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', ['--env CartPole-v0'
' --max_steps 10'
' --fast_dev_run 1'
' --batch_size 10'])
def test_cli_run_rl_vanilla_policy_gradient(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.vanilla_policy_gradient_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
```
#### File: rl/unit/test_reinforce.py
```python
import argparse
from unittest import TestCase
import gym
import numpy as np
import torch
from pl_bolts.datamodules.experience_source import DiscountedExperienceSource
from pl_bolts.models.rl.common.agents import Agent
from pl_bolts.models.rl.common.gym_wrappers import ToTensor
from pl_bolts.models.rl.common.networks import MLP
from pl_bolts.models.rl.reinforce_model import Reinforce
class TestReinforce(TestCase):
def setUp(self) -> None:
self.env = ToTensor(gym.make("CartPole-v0"))
self.obs_shape = self.env.observation_space.shape
self.n_actions = self.env.action_space.n
self.net = MLP(self.obs_shape, self.n_actions)
self.agent = Agent(self.net)
self.exp_source = DiscountedExperienceSource(self.env, self.agent)
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser = Reinforce.add_model_specific_args(parent_parser)
args_list = [
"--env", "CartPole-v0",
"--batch_size", "32",
"--gamma", "0.99"
]
self.hparams = parent_parser.parse_args(args_list)
self.model = Reinforce(**vars(self.hparams))
self.rl_dataloader = self.model.train_dataloader()
def test_loss(self):
"""Test the reinforce loss function"""
batch_states = torch.rand(16, 4)
batch_actions = torch.rand(16).long()
batch_qvals = torch.rand(16)
loss = self.model.loss(batch_states, batch_actions, batch_qvals)
self.assertIsInstance(loss, torch.Tensor)
def test_get_qvals(self):
"""Test that given an batch of episodes that it will return a list of qvals for each episode"""
batch_qvals = []
rewards = np.ones(32)
out = self.model.calc_qvals(rewards)
batch_qvals.append(out)
self.assertIsInstance(batch_qvals[0][0], float)
self.assertEqual(batch_qvals[0][0], (batch_qvals[0][1] * self.hparams.gamma) + 1.0)
def test_calc_q_vals(self):
rewards = np.ones(4)
gt_qvals = [3.9403989999999998, 2.9701, 1.99, 1.0]
qvals = self.model.calc_qvals(rewards)
self.assertEqual(gt_qvals, qvals)
```
|
{
"source": "jfrausto7/genclass3D",
"score": 3
}
|
#### File: jfrausto7/genclass3D/hed.py
```python
import cv2
import os
class CropLayer(object):
def __init__(self, params, blobs):
# initialize our starting and ending (x, y)-coordinates of the crop
self.startX = 0
self.startY = 0
self.endX = 0
self.endY = 0
def getMemoryShapes(self, inputs):
""" the crop layer will receive two inputs -- we need to crop the first input blob to match the shape of the second one,
keeping the batch size and number of channels """
(inputShape, targetShape) = (inputs[0], inputs[1])
(batchSize, numChannels) = (inputShape[0], inputShape[1])
(H, W) = (targetShape[2], targetShape[3])
# compute the starting and ending crop coordinates
self.startX = int((inputShape[3] - targetShape[3]) / 2)
self.startY = int((inputShape[2] - targetShape[2]) / 2)
self.endX = self.startX + W
self.endY = self.startY + H
# return the shape of the volume (we perform the actual crop during the forward pass)
return [[batchSize, numChannels, H, W]]
def forward(self, inputs):
# use the derived (x, y)-coordinates to perform the crop
return [inputs[0][:, :, self.startY:self.endY,
self.startX:self.endX]]
def detect_edges(img):
# load serialized edge detector from disk
protoPath = "./models/hed_model/deploy.prototxt"
modelPath = "./models/hed_model/hed_pretrained_bsds.caffemodel"
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath) # model
image = img
(H, W) = image.shape[:2]
# construct a blob out of the input image for the Holistically-Nested Edge Detector
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(W, H),
mean=(104.00698794, 116.66876762, 122.67891434),
swapRB=False, crop=False)
# set the blob as the input to the network and perform a forward pass to compute edges
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], (W, H))
hed = (255 * hed).astype("uint8")
return hed
```
|
{
"source": "jfrausto7/rareaf",
"score": 2
}
|
#### File: contracts/python/config.py
```python
from pyteal import Bytes, Tmpl, Int
import json
import os
def get_config():
config = None
with open('../../../config.json', 'r') as f:
config = json.load(f)
for k,v in os.environ.items():
if k[:5] == "TMPL_":
config['application'][k[5:].lower()] = v
return config
configuration = get_config()
listing_key = Bytes("listing")
tag_key = Bytes("tag:")
platform_fee = Tmpl.Int("TMPL_FEE_AMT")
platform_addr = Tmpl.Bytes("TMPL_OWNER_ADDR")
platform_admin= Tmpl.Bytes("TMPL_ADMIN_ADDR")
app_id = Tmpl.Int("TMPL_APP_ID")
price_token = Tmpl.Int("TMPL_PRICE_ID")
seed_amt = Int(int(configuration['application']['seed_amt']))
max_price = Int(int(configuration['application']['max_price']))
action_create = Bytes("create")
action_tag = Bytes("tag")
action_untag = Bytes("untag")
action_iprice = Bytes("price_increase")
action_dprice = Bytes("price_decrease")
action_delete = Bytes("delete")
action_purchase= Bytes("purchase")
action_safety = Bytes("safety")
```
|
{
"source": "jfraxanet/nbdev_tutorial_JF",
"score": 3
}
|
#### File: nbdev_tutorial_JF/nbdev_tutorial_JF/core.py
```python
__all__ = ['say_hello', 'hiSayer']
# Cell
def say_hello(to):
"Say hello to someone"
return(f"hello {to}")
# Cell
class hiSayer:
"Dumb class that spams hi to someone"
def __init__(self,to):
self.to = to
def say(self):
"Calls function say_hello to say hello"
return say_hello(self.to)
```
|
{
"source": "jfreddypuentes/name-dataset",
"score": 3
}
|
#### File: jfreddypuentes/name-dataset/evaluate.py
```python
from names_dataset import NameDataset
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html
# Precision: The precision is intuitively the ability of the classifier not to label as positive a sample that is negative.
# Recall: The recall is intuitively the ability of the classifier to find all the positive samples.
def read_dict_file(filename):
with open(filename, 'r', encoding='utf8') as r:
lines = r.read().strip().split('\n')
return lines
def main():
m = NameDataset()
names = read_dict_file('eng_dictionary/names-from-forbes-wp_users.txt')
not_names = read_dict_file('eng_dictionary/google-10000-english-no-names.txt')
not_names.extend(read_dict_file('eng_dictionary/1000-no-names.txt'))
names = sorted(set(names))
not_names = sorted(set(not_names))
# 0 => not a name
# 1 => name
targets = []
predictions = []
for q in names:
predictions.append(m.search_first_name(q))
targets.append(True)
for q in not_names:
predictions.append(m.search_first_name(q))
targets.append(False)
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
print('P', precision_score(y_true=targets, y_pred=predictions))
print('R', recall_score(y_true=targets, y_pred=predictions))
print('F', f1_score(y_true=targets, y_pred=predictions))
print('A', accuracy_score(y_true=targets, y_pred=predictions))
if __name__ == '__main__':
main()
```
#### File: name-dataset/names_dataset/query.py
```python
import os
def _query(search_set, key, use_upper_case):
if use_upper_case and key.title() != key:
return False
return key.strip().lower() in search_set
class NameDataset:
FIRST_NAME_SEARCH = 0
LAST_NAME_SEARCH = 1
def __init__(self):
first_names_filename = os.path.join(os.path.dirname(__file__), 'first_names.all.txt')
with open(first_names_filename, 'r', errors='ignore', encoding='utf8') as r:
self.first_names = set(r.read().strip().split('\n'))
last_names_filename = os.path.join(os.path.dirname(__file__), 'last_names.all.txt')
with open(last_names_filename, 'r', errors='ignore', encoding='utf8') as r:
self.last_names = set(r.read().strip().split('\n'))
def search_first_name(self, first_name, use_upper_case=False):
return _query(self.first_names, first_name, use_upper_case)
def search_last_name(self, last_name, use_upper_case=False):
return _query(self.last_names, last_name, use_upper_case)
if __name__ == '__main__':
import sys
if sys.version_info < (3, 0):
print('Please use Python3+')
exit(1)
if len(sys.argv) < 2:
print('Give names separated by a comma as input.')
sys.exit(1)
m = NameDataset()
names_list = sys.argv[1].split(',')
print('----- First names ----')
print('Name'.ljust(30), 'Present?')
for name in names_list:
# ljust just for aesthetic reasons ;)
print(str(name).ljust(30), m.search_first_name(name))
print('----- Last names ----')
print('Name'.ljust(30), 'Present?')
for name in names_list:
# ljust just for aesthetic reasons ;)
print(str(name).ljust(30), m.search_last_name(name))
sys.exit(0)
```
|
{
"source": "j-freddy/ultimate-tictactoe-2",
"score": 3
}
|
#### File: j-freddy/ultimate-tictactoe-2/main.py
```python
import sys, os, math, pygame
from data import *
from cell.cell_value import CellValue
from game import Game
SCREEN_WIDTH = gui_data["screen_width"]
SCREEN_HEIGHT = gui_data["screen_height"]
SCREEN_DIM = min(SCREEN_WIDTH, SCREEN_HEIGHT)
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
def load_image(path):
return pygame.image.load(path).convert_alpha()
# Methods in a dictionary are called once
# The return result is stored
images = {
"o" : load_image(os.path.join("img", "o.png")),
"x" : load_image(os.path.join("img", "x.png")),
"board-frame": load_image(os.path.join("img", "board-frame.png"))
}
### GUI
def draw_cell(cell, x, y, dim):
img = images["o"] if cell.get_value() == CellValue.O else images["x"]
img_scaled = pygame.transform.smoothscale(img, (dim, dim))
screen.blit(img_scaled, (x, y))
def draw_local_board(board, x, y, board_dim):
d = board_dim
# Highlight board if active
if board.active:
pygame.draw.rect(screen, gui_data["highlight_colour"], (x, y, d, d))
# Draw frame
img_frame = pygame.transform.smoothscale(images["board-frame"], (d, d))
screen.blit(img_frame, (x, y))
# Draw cells
cell_dim = int(board_dim / board.num_rows)
for (i, cell) in enumerate(board.cells):
if cell.get_value() != CellValue.Empty:
row = math.floor(i / board.num_cols)
col = i % board.num_cols
draw_cell(
cell,
col * cell_dim + x,
row * cell_dim + y,
cell_dim
)
# Draw winner
if board.get_value() != CellValue.Empty:
draw_cell(board, x, y, d)
def draw_global_board(board):
global_board = board
cell_dim = int(SCREEN_DIM / global_board.num_rows)
for (i, local_board) in enumerate(global_board.cells):
row = math.floor(i / global_board.num_cols)
col = i % global_board.num_cols
draw_local_board(
local_board,
col * cell_dim,
row * cell_dim,
cell_dim
)
def draw(game):
screen.fill((255, 255, 255))
# Draw winner
if not game.in_progress():
screen.fill(gui_data["winner_colour"])
draw_global_board(game.global_board)
pygame.display.flip()
def on_click(board):
# Get local board clicked
global_cell_dim = int(SCREEN_DIM / board.num_rows)
(mouse_x, mouse_y) = pygame.mouse.get_pos()
row = math.floor(mouse_y / global_cell_dim)
col = math.floor(mouse_x / global_cell_dim)
local_board = board.get_cell(row, col)
# Get cell clicked on local board
local_cell_dim = int(global_cell_dim / local_board.num_rows)
local_x = mouse_x % global_cell_dim
local_y = mouse_y % global_cell_dim
row = math.floor(local_y / local_cell_dim)
col = math.floor(local_x / local_cell_dim)
# Update cell
game.make_move(local_board, row, col)
# Create game
game = Game()
draw(game)
### Main loop
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if game.current_player.is_human() and game.in_progress():
on_click(game.global_board)
draw(game)
# Handle AIs
if game.current_player.is_ai() and game.in_progress():
move_made = game.handle_ai_iter()
if move_made:
draw(game)
```
#### File: ultimate-tictactoe-2/player/player_human.py
```python
from player.player_type import PlayerType
from player.player import Player
class PlayerHuman(Player):
def __init__(self, id):
super().__init__(id)
def get_type(self):
return PlayerType.Human
```
|
{
"source": "jfreeman812/Project_ZF",
"score": 4
}
|
#### File: Project_ZF/Final_Project_Capstone/TomeRater.py
```python
DEBUG = True
class User(object):
def __init__(self, name, email):
self.name = name
self.email = email
self.books = {}
self.rating = None
if DEBUG:
print("USER: {0}".format(self.__repr__()))
def get_email(self):
return self.email
def change_email(self, updated_address):
self.email = updated_address
print('{user}\'s email has been updated to: {email}'.format(user=self.name, email=self.email))
def __repr__(self):
return 'Customer Name: {name}, Customer Email: {email}, Customer Total Books Read: {books}.'.format(
name=self.name, email=self.email, books=len(self.books))
def __eq__(self, other_user):
return self.name.lower() == other_user.name.lower() and self.email.lower() == other_user.email.lower()
def read_book(self, book, rating=None):
self.rating = rating
self.books[book] = rating
def get_average_rating(self):
average = -1
total = 0
if len(self.books) > 0:
for rating in self.books.values():
if rating is not None:
total += rating
average = total/len(self.books)
return average
class Book(object):
MAX_RATING = 5
def __init__(self, title, isbn):
self.title = title
self.isbn = isbn
self.ratings = []
def get_title(self):
return self.title
def get_isbn(self):
return self.isbn
def set_isbn(self, new_isbn):
self.isbn = new_isbn
print('The book "{title}" ISBN has been updated : {isbn}'.format(isbn=self.isbn, title=self.title))
def add_rating(self, rating):
if rating in range(self.MAX_RATING):
self.ratings.append(rating)
else:
print('This is an Invalid Rating! (rating = {rating})'.format(rating=rating))
def __eq__(self, other_user):
return self.title.lower() == other_user.title.lower() and self.isbn.lower() == other_user.isbn.lower()
def get_average_rating(self):
return sum(self.ratings) / len(self.ratings)
def __hash__(self):
return hash((self.title, self.isbn))
class Fiction(Book):
def __init__(self, title, author, isbn):
self.author = author
super(Fiction, self).__init__(title=title, isbn=isbn)
def get_author(self):
return self.author
def __repr__(self):
return'{title} by {author}'.format(title=self.title, author=self.author)
class Non_Fiction(Book):
def __init__(self, title, subject, level, isbn):
self.subject = subject
self.level = level
super(Non_Fiction, self).__init__(title, isbn)
def get_subject(self):
return self.subject
def get_level(self):
return self.level
def __repr__(self):
return "{title}, a {level} manual on {subject}".format\
(title=self.title, level=self.level, subject=self.subject)
class TomeRater(object):
def __init__(self):
self.users = {}
self.books = {}
def create_book(self, title, isbn):
return Book(title=title, isbn=isbn)
def create_novel(self, title, author, isbn):
return Fiction(title=title, author=author, isbn=isbn)
def create_non_fiction(self, title, subject, level, isbn):
return Non_Fiction(title=title, subject=subject, level=level, isbn=isbn)
def add_book_to_user(self, book, email, rating=None):
if email in self.users.keys():
self.users[email].read_book(book, rating)
if rating is not None: # RCH
book.add_rating(rating)
if book not in self.books.keys():
self.books[book] = 0
self.books[book] += 1
else:
print("No user with email {}".format(email))
def add_user(self, name, email, books=None):
self.users[email] = User(name, email)
if books is not None:
for book in books:
self.add_book_to_user(book, email)
def print_catalog(self):
for key in self.books.keys():
print(key)
def print_users(self):
for value in self.users.values():
print(value)
def get_most_read_book(self):
return max(self.books, key=self.books.get)
def highest_rated_book(self):
average_ratings_books = {}
for book in self.books:
average_ratings_books[book.title] = book.get_average_rating()
return max(average_ratings_books, key=average_ratings_books.get)
def most_positive_user(self):
average_ratings_user = {}
for user in self.users.values():
average_ratings_user[user.name] = user.get_average_rating()
return max(average_ratings_user, key=average_ratings_user.get)
#Getting Creative!
#To take your project to the next level, choose/
#one of the following extension ideas to implement:
def get_n_most_read_books(self, n):
"""
Returns the n books which have been read the most in descending order.
"""
if type(n) == int:
books_sorted = [k for k in sorted(self.books, key=self.books.get, reverse=True)]
return books_sorted[:n]
else:
print("The argument n = {n} is not of type int. Please pass an int.".format(n=n))
def get_n_most_prolific_readers(self, n):
"""
Returns the n readers which have read the most books in descending order.
"""
if type(n) == int:
readers = [(reader, reader.get_books_read()) for reader in self.users.values()]
readers_sorted = [k[0] for k in sorted(readers, key=lambda reader: reader[1], reverse=True)]
return readers_sorted[:n]
else:
print("The argument n = {n} is not of type int. Please pass an int.".format(n=n))
def get_n_most_expensive_books(self, n):
"""
Returns the n books which have the highest price in descending order.
"""
if type(n) == int:
books = {book: book.get_price() for book in self.books.keys()}
books_sorted = [k for k in sorted(books, key=books.get, reverse=True)]
return books_sorted[:n]
else:
print("The argument n = {n} is not of type int. Please pass an int.".format(n=n))
def get_worth_of_user(self, user_email):
"""
Determines the total price of all books read by the user associated
with the user_email argument.
"""
if user_email.find("@") == -1 or (user_email.find(".com") == -1 and user_email.find(".edu") == -1 and user_email.find(".org") == -1):
print("The {email} is invalid, please ensure it has an @ and a .com, .edu, or .org domain.".format(email=user_email))
else:
user = self.users.get(user_email)
if not user:
print("User does not currently exist with email {email}. Please use an email for a valid user.".format(email=user_email))
else:
price = 0
for book in user.books.keys():
price += book.get_price()
return price
```
|
{
"source": "jfreeman812/pyrax",
"score": 2
}
|
#### File: pyrax/pyrax/autoscale.py
```python
from __future__ import absolute_import, unicode_literals
import base64
import pyrax
from pyrax.client import BaseClient
from pyrax.cloudloadbalancers import CloudLoadBalancer
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
class ScalingGroup(BaseResource):
def __init__(self, *args, **kwargs):
super(ScalingGroup, self).__init__(*args, **kwargs)
self._non_display = ["active", "launchConfiguration", "links",
"groupConfiguration", "policies", "scalingPolicies"]
self._repr_properties = ["name", "cooldown", "metadata",
"min_entities", "max_entities"]
self._make_policies()
def _make_policies(self):
"""
Convert the 'scalingPolicies' dictionary into AutoScalePolicy objects.
"""
self.policies = [AutoScalePolicy(self.manager, dct, self)
for dct in self.scalingPolicies]
def get_state(self):
"""
Returns the current state of this scaling group.
"""
return self.manager.get_state(self)
def pause(self):
"""
Pauses all execution of the policies for this scaling group.
"""
return self.manager.pause(self)
def resume(self):
"""
Resumes execution of the policies for this scaling group.
"""
return self.manager.resume(self)
def update(self, name=None, cooldown=None, min_entities=None,
max_entities=None, metadata=None):
"""
Updates this ScalingGroup. One or more of the attributes can be
specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method.
"""
return self.manager.update(self, name=name,
cooldown=cooldown, min_entities=min_entities,
max_entities=max_entities, metadata=metadata)
def update_metadata(self, metadata):
"""
Adds the given metadata dict to the existing metadata for this scaling
group.
"""
return self.manager.update_metadata(self, metadata=metadata)
def get_configuration(self):
"""
Returns the scaling group configuration in a dictionary.
"""
return self.manager.get_configuration(self)
def get_launch_config(self):
"""
Returns the launch configuration for this scaling group.
"""
return self.manager.get_launch_config(self)
def update_launch_config(self, server_name=None, image=None, flavor=None,
disk_config=None, metadata=None, personality=None, networks=None,
load_balancers=None, key_name=None, config_drive=False, user_data=None):
"""
Updates the server launch configuration for this scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
"""
return self.manager.update_launch_config(self, server_name=server_name,
image=image, flavor=flavor, disk_config=disk_config,
metadata=metadata, personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
def update_launch_metadata(self, metadata):
"""
Adds the given metadata dict to the existing metadata for this scaling
group's launch configuration.
"""
return self.manager.update_launch_metadata(self, metadata)
def add_policy(self, name, policy_type, cooldown, change=None,
is_percent=False, desired_capacity=None, args=None):
"""
Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
"""
return self.manager.add_policy(self, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def list_policies(self):
"""
Returns a list of all policies defined for this scaling group.
"""
return self.manager.list_policies(self)
def get_policy(self, policy):
"""
Gets the detail for the specified policy.
"""
return self.manager.get_policy(self, policy)
def update_policy(self, policy, name=None, policy_type=None, cooldown=None,
change=None, is_percent=False, desired_capacity=None, args=None):
"""
Updates the specified policy. One or more of the parameters may be
specified.
"""
return self.manager.update_policy(scaling_group=self, policy=policy,
name=name, policy_type=policy_type, cooldown=cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def execute_policy(self, policy):
"""
Executes the specified policy for this scaling group.
"""
return self.manager.execute_policy(scaling_group=self, policy=policy)
def delete_policy(self, policy):
"""
Deletes the specified policy from this scaling group.
"""
return self.manager.delete_policy(scaling_group=self, policy=policy)
def add_webhook(self, policy, name, metadata=None):
"""
Adds a webhook to the specified policy.
"""
return self.manager.add_webhook(self, policy, name, metadata=metadata)
def list_webhooks(self, policy):
"""
Returns a list of all webhooks for the specified policy.
"""
return self.manager.list_webhooks(self, policy)
def update_webhook(self, policy, webhook, name=None, metadata=None):
"""
Updates the specified webhook. One or more of the parameters may be
specified.
"""
return self.manager.update_webhook(scaling_group=self, policy=policy,
webhook=webhook, name=name, metadata=metadata)
def update_webhook_metadata(self, policy, webhook, metadata):
"""
Adds the given metadata dict to the existing metadata for the specified
webhook.
"""
return self.manager.update_webhook_metadata(self, policy, webhook,
metadata)
def delete_webhook(self, policy, webhook):
"""
Deletes the specified webhook from the specified policy.
"""
return self.manager.delete_webhook(self, policy, webhook)
@property
def policy_count(self):
return len(self.policies)
##################################################################
# The following property declarations allow access to the base attributes
# of the ScalingGroup held in the 'groupConfiguration' dict as if they
# were native attributes.
##################################################################
@property
def name(self):
return self.groupConfiguration.get("name")
@name.setter
def name(self, val):
self.groupConfiguration["name"] = val
@property
def cooldown(self):
return self.groupConfiguration.get("cooldown")
@cooldown.setter
def cooldown(self, val):
self.groupConfiguration["cooldown"] = val
@property
def metadata(self):
return self.groupConfiguration.get("metadata")
@metadata.setter
def metadata(self, val):
self.groupConfiguration["metadata"] = val
@property
def min_entities(self):
return self.groupConfiguration.get("minEntities")
@min_entities.setter
def min_entities(self, val):
self.groupConfiguration["minEntities"] = val
@property
def max_entities(self):
return self.groupConfiguration.get("maxEntities")
@max_entities.setter
def max_entities(self, val):
self.groupConfiguration["maxEntities"] = val
##################################################################
class ScalingGroupManager(BaseManager):
def __init__(self, api, resource_class=None, response_key=None,
plural_response_key=None, uri_base=None):
super(ScalingGroupManager, self).__init__(api,
resource_class=resource_class, response_key=response_key,
plural_response_key=plural_response_key, uri_base=uri_base)
def get_state(self, scaling_group):
"""
Returns the current state of the specified scaling group as a
dictionary.
"""
uri = "/%s/%s/state" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
data = resp_body["group"]
ret = {}
ret["active"] = [itm["id"] for itm in data["active"]]
ret["active_capacity"] = data["activeCapacity"]
ret["desired_capacity"] = data["desiredCapacity"]
ret["pending_capacity"] = data["pendingCapacity"]
ret["paused"] = data["paused"]
return ret
def pause(self, scaling_group):
"""
Pauses all execution of the policies for the specified scaling group.
"""
uri = "/%s/%s/pause" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_post(uri)
return None
def resume(self, scaling_group):
"""
Resumes execution of the policies for the specified scaling group.
"""
uri = "/%s/%s/resume" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_post(uri)
return None
def get_configuration(self, scaling_group):
"""
Returns the scaling group's configuration in a dictionary.
"""
uri = "/%s/%s/config" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
return resp_body.get("groupConfiguration")
def replace(self, scaling_group, name, cooldown, min_entities,
max_entities, metadata=None):
"""
Replace an existing ScalingGroup configuration. All of the attributes
must be specified If you wish to delete any of the optional attributes,
pass them in as None.
"""
body = self._create_group_config_body(name, cooldown, min_entities,
max_entities, metadata=metadata)
group_id = utils.get_id(scaling_group)
uri = "/%s/%s/config" % (self.uri_base, group_id)
resp, resp_body = self.api.method_put(uri, body=body)
def update(self, scaling_group, name=None, cooldown=None,
min_entities=None, max_entities=None, metadata=None):
"""
Updates an existing ScalingGroup. One or more of the attributes can
be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method.
"""
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
uri = "/%s/%s/config" % (self.uri_base, scaling_group.id)
if cooldown is None:
cooldown = scaling_group.cooldown
if min_entities is None:
min_entities = scaling_group.min_entities
if max_entities is None:
max_entities = scaling_group.max_entities
body = {"name": name or scaling_group.name,
"cooldown": cooldown,
"minEntities": min_entities,
"maxEntities": max_entities,
"metadata": metadata or scaling_group.metadata,
}
resp, resp_body = self.api.method_put(uri, body=body)
return None
def update_metadata(self, scaling_group, metadata):
"""
Adds the given metadata dict to the existing metadata for the scaling
group.
"""
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
curr_meta = scaling_group.metadata
curr_meta.update(metadata)
return self.update(scaling_group, metadata=curr_meta)
def get_launch_config(self, scaling_group):
"""
Returns the launch configuration for the specified scaling group.
"""
key_map = {
"OS-DCF:diskConfig": "disk_config",
"flavorRef": "flavor",
"imageRef": "image",
}
uri = "/%s/%s/launch" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
ret = {}
data = resp_body.get("launchConfiguration")
ret["type"] = data.get("type")
args = data.get("args", {})
ret["load_balancers"] = args.get("loadBalancers")
for key, value in args.get("server", {}).items():
norm_key = key_map.get(key, key)
ret[norm_key] = value
return ret
def replace_launch_config(self, scaling_group, launch_config_type,
server_name, image, flavor, disk_config=None, metadata=None,
personality=None, networks=None, load_balancers=None,
key_name=None, config_drive=False, user_data=None):
"""
Replace an existing launch configuration. All of the attributes must be
specified. If you wish to delete any of the optional attributes, pass
them in as None.
"""
group_id = utils.get_id(scaling_group)
uri = "/%s/%s/launch" % (self.uri_base, group_id)
body = self._create_launch_config_body(
launch_config_type=launch_config_type, server_name=server_name,
image=image, flavor=flavor, disk_config=disk_config,
metadata=metadata, personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
resp, resp_body = self.api.method_put(uri, body=body)
def update_launch_config(self, scaling_group, server_name=None, image=None,
flavor=None, disk_config=None, metadata=None, personality=None,
networks=None, load_balancers=None, key_name=None, config_drive=False,
user_data=None):
"""
Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
"""
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
uri = "/%s/%s/launch" % (self.uri_base, scaling_group.id)
largs = scaling_group.launchConfiguration.get("args", {})
srv_args = largs.get("server", {})
lb_args = largs.get("loadBalancers", {})
flav = flavor or srv_args.get("flavorRef")
dconf = disk_config or srv_args.get("OS-DCF:diskConfig", "AUTO")
if personality is None:
personality = srv_args.get("personality", [])
cfg_drv = config_drive or srv_args.get("config_drive")
if user_data:
user_data = base64.b64encode(user_data)
usr_data = user_data or srv_args.get("user_data")
update_metadata = metadata or srv_args.get("metadata")
body = {"type": "launch_server",
"args": {
"server": {
"name": server_name or srv_args.get("name"),
"imageRef": image or srv_args.get("imageRef"),
"flavorRef": flav,
"OS-DCF:diskConfig": dconf,
"networks": networks or srv_args.get("networks"),
},
"loadBalancers": load_balancers or lb_args,
},
}
bas = body["args"]["server"]
if cfg_drv:
bas["config_drive"] = cfg_drv
if usr_data:
bas["user_data"] = usr_data
if personality:
bas["personality"] = self._encode_personality(personality)
if update_metadata:
bas["metadata"] = update_metadata
key_name = key_name or srv_args.get("key_name")
if key_name:
bas["key_name"] = key_name
resp, resp_body = self.api.method_put(uri, body=body)
return None
def update_launch_metadata(self, scaling_group, metadata):
"""
Adds the given metadata dict to the existing metadata for the scaling
group's launch configuration.
"""
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
curr_meta = scaling_group.launchConfiguration.get("args", {}).get(
"server", {}).get("metadata", {})
curr_meta.update(metadata)
return self.update_launch_config(scaling_group, metadata=curr_meta)
def add_policy(self, scaling_group, name, policy_type, cooldown,
change=None, is_percent=False, desired_capacity=None, args=None):
"""
Adds a policy with the given values to the specified scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
"""
uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group))
body = self._create_policy_body(name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
# "body" needs to be a list
body = [body]
resp, resp_body = self.api.method_post(uri, body=body)
pol_info = resp_body.get("policies")[0]
return AutoScalePolicy(self, pol_info, scaling_group)
def _create_policy_body(self, name, policy_type, cooldown, change=None,
is_percent=None, desired_capacity=None, args=None):
body = {"name": name, "cooldown": cooldown, "type": policy_type}
if change is not None:
if is_percent:
body["changePercent"] = change
else:
body["change"] = change
if desired_capacity is not None:
body["desiredCapacity"] = desired_capacity
if args is not None:
body["args"] = args
return body
def list_policies(self, scaling_group):
"""
Returns a list of all policies defined for the specified scaling group.
"""
uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
return [AutoScalePolicy(self, data, scaling_group)
for data in resp_body.get("policies", [])]
def get_policy(self, scaling_group, policy):
"""
Gets the detail for the specified policy.
"""
uri = "/%s/%s/policies/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_get(uri)
data = resp_body.get("policy")
return AutoScalePolicy(self, data, scaling_group)
def replace_policy(self, scaling_group, policy, name,
policy_type, cooldown, change=None, is_percent=False,
desired_capacity=None, args=None):
"""
Replace an existing policy. All of the attributes must be specified. If
you wish to delete any of the optional attributes, pass them in as
None.
"""
policy_id = utils.get_id(policy)
group_id = utils.get_id(scaling_group)
uri = "/%s/%s/policies/%s" % (self.uri_base, group_id, policy_id)
body = self._create_policy_body(name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
resp, resp_body = self.api.method_put(uri, body=body)
def update_policy(self, scaling_group, policy, name=None, policy_type=None,
cooldown=None, change=None, is_percent=False,
desired_capacity=None, args=None):
"""
Updates the specified policy. One or more of the parameters may be
specified.
"""
uri = "/%s/%s/policies/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
if not isinstance(policy, AutoScalePolicy):
# Received an ID
policy = self.get_policy(scaling_group, policy)
body = {"name": name or policy.name,
"type": policy_type or policy.type,
"cooldown": cooldown or policy.cooldown,
}
if desired_capacity is not None:
body["desiredCapacity"] = desired_capacity
elif change is not None:
if is_percent:
body["changePercent"] = change
else:
body["change"] = change
else:
if getattr(policy, "changePercent", None) is not None:
body["changePercent"] = policy.changePercent
elif getattr(policy, "change", None) is not None:
body["change"] = policy.change
elif getattr(policy, "desiredCapacity", None) is not None:
body["desiredCapacity"] = policy.desiredCapacity
args = args or getattr(policy, "args", None)
if args is not None:
body["args"] = args
resp, resp_body = self.api.method_put(uri, body=body)
return None
def execute_policy(self, scaling_group, policy):
"""
Executes the specified policy for this scaling group.
"""
uri = "/%s/%s/policies/%s/execute" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_post(uri)
return None
def delete_policy(self, scaling_group, policy):
"""
Deletes the specified policy from the scaling group.
"""
uri = "/%s/%s/policies/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_delete(uri)
def _create_webhook_body(self, name, metadata=None):
if metadata is None:
# If updating a group with existing metadata, metadata MUST be
# passed. Leaving it out causes Otter to return 400.
metadata = {}
body = {"name": name, "metadata": metadata}
return body
def add_webhook(self, scaling_group, policy, name, metadata=None):
"""
Adds a webhook to the specified policy.
"""
uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
body = self._create_webhook_body(name, metadata=metadata)
# "body" needs to be a list
body = [body]
resp, resp_body = self.api.method_post(uri, body=body)
data = resp_body.get("webhooks")[0]
return AutoScaleWebhook(self, data, policy, scaling_group)
def list_webhooks(self, scaling_group, policy):
"""
Returns a list of all webhooks for the specified policy.
"""
uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_get(uri)
return [AutoScaleWebhook(self, data, policy, scaling_group)
for data in resp_body.get("webhooks", [])]
def get_webhook(self, scaling_group, policy, webhook):
"""
Gets the detail for the specified webhook.
"""
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
resp, resp_body = self.api.method_get(uri)
data = resp_body.get("webhook")
return AutoScaleWebhook(self, data, policy, scaling_group)
def replace_webhook(self, scaling_group, policy, webhook, name,
metadata=None):
"""
Replace an existing webhook. All of the attributes must be specified.
If you wish to delete any of the optional attributes, pass them in as
None.
"""
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
group_id = utils.get_id(scaling_group)
policy_id = utils.get_id(policy)
webhook_id = utils.get_id(webhook)
body = self._create_webhook_body(name, metadata=metadata)
resp, resp_body = self.api.method_put(uri, body=body)
def update_webhook(self, scaling_group, policy, webhook, name=None,
metadata=None):
"""
Updates the specified webhook. One or more of the parameters may be
specified.
"""
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
if not isinstance(webhook, AutoScaleWebhook):
# Received an ID
webhook = self.get_webhook(scaling_group, policy, webhook)
body = {"name": name or webhook.name,
"metadata": metadata or webhook.metadata,
}
resp, resp_body = self.api.method_put(uri, body=body)
webhook.reload()
return webhook
def update_webhook_metadata(self, scaling_group, policy, webhook, metadata):
"""
Adds the given metadata dict to the existing metadata for the specified
webhook.
"""
if not isinstance(webhook, AutoScaleWebhook):
webhook = self.get_webhook(scaling_group, policy, webhook)
curr_meta = webhook.metadata or {}
curr_meta.update(metadata)
return self.update_webhook(scaling_group, policy, webhook,
metadata=curr_meta)
def delete_webhook(self, scaling_group, policy, webhook):
"""
Deletes the specified webhook from the specified policy.
"""
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
resp, resp_body = self.api.method_delete(uri)
return None
@staticmethod
def _resolve_lbs(load_balancers):
"""
Takes either a single LB reference or a list of references and returns
the dictionary required for creating a Scaling Group.
References can be either a dict that matches the structure required by
the autoscale API, a CloudLoadBalancer instance, or the ID of the load
balancer.
"""
lb_args = []
if not isinstance(load_balancers, list):
lbs = [load_balancers]
else:
lbs = load_balancers
for lb in lbs:
if isinstance(lb, dict):
lb_args.append(lb)
elif isinstance(lb, CloudLoadBalancer):
lb_args.append({
"loadBalancerId": lb.id,
"port": lb.port,
})
elif isinstance(lb, tuple):
lb_args.append({"loadBalancerId": lb[0],
"port": lb[1]})
else:
# See if it's an ID for a Load Balancer
try:
instance = pyrax.cloud_loadbalancers.get(lb)
except Exception:
raise exc.InvalidLoadBalancer("Received an invalid "
"specification for a Load Balancer: '%s'" % lb)
lb_args.append({
"loadBalancerId": instance.id,
"port": instance.port,
})
return lb_args
def _encode_personality(self, personality):
"""
Personality files must be base64-encoded before transmitting.
"""
if personality is None:
personality = []
else:
personality = utils.coerce_to_list(personality)
for pfile in personality:
if "contents" in pfile:
pfile["contents"] = base64.b64encode(pfile["contents"])
return personality
def _create_body(self, name, cooldown, min_entities, max_entities,
launch_config_type, server_name, image, flavor, disk_config=None,
metadata=None, personality=None, networks=None,
load_balancers=None, scaling_policies=None, group_metadata=None,
key_name=None, config_drive=False, user_data=None):
"""
Used to create the dict required to create any of the following:
A Scaling Group
"""
if metadata is None:
metadata = {}
if scaling_policies is None:
scaling_policies = []
group_config = self._create_group_config_body(name, cooldown,
min_entities, max_entities, metadata=group_metadata)
launch_config = self._create_launch_config_body(launch_config_type,
server_name, image, flavor, disk_config=disk_config,
metadata=metadata, personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
body = {
"groupConfiguration": group_config,
"launchConfiguration": launch_config,
"scalingPolicies": scaling_policies,
}
return body
def _create_group_config_body(self, name, cooldown, min_entities,
max_entities, metadata=None):
if metadata is None:
# If updating a group with existing metadata, metadata MUST be
# passed. Leaving it out causes Otter to return 400.
metadata = {}
body = {
"name": name,
"cooldown": cooldown,
"minEntities": min_entities,
"maxEntities": max_entities,
"metadata": metadata,
}
return body
def _create_launch_config_body(self, launch_config_type,
server_name, image, flavor, disk_config=None, metadata=None,
personality=None, networks=None, load_balancers=None,
key_name=None, config_drive=False, user_data=None):
server_args = {
"flavorRef": "%s" % flavor,
"name": server_name,
"imageRef": utils.get_id(image),
}
if metadata is not None:
server_args["metadata"] = metadata
if personality is not None:
server_args["personality"] = self._encode_personality(personality)
if networks is not None:
server_args["networks"] = networks
if disk_config is not None:
server_args["OS-DCF:diskConfig"] = disk_config
if key_name is not None:
server_args["key_name"] = key_name
if config_drive is not False:
server_args['config_drive'] = config_drive
if user_data is not None:
server_args['user_data'] = base64.b64encode(user_data)
if load_balancers is None:
load_balancers = []
load_balancer_args = self._resolve_lbs(load_balancers)
return {"type": launch_config_type,
"args": {"server": server_args,
"loadBalancers": load_balancer_args}}
class AutoScalePolicy(BaseResource):
def __init__(self, manager, info, scaling_group, *args, **kwargs):
super(AutoScalePolicy, self).__init__(manager, info, *args, **kwargs)
if not isinstance(scaling_group, ScalingGroup):
scaling_group = manager.get(scaling_group)
self.scaling_group = scaling_group
self._non_display = ["links", "scaling_group"]
def get(self):
"""
Gets the details for this policy.
"""
return self.manager.get_policy(self.scaling_group, self)
reload = get
def delete(self):
"""
Deletes this policy.
"""
return self.manager.delete_policy(self.scaling_group, self)
def update(self, name=None, policy_type=None, cooldown=None, change=None,
is_percent=False, desired_capacity=None, args=None):
"""
Updates this policy. One or more of the parameters may be
specified.
"""
return self.manager.update_policy(scaling_group=self.scaling_group,
policy=self, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def execute(self):
"""
Executes this policy.
"""
return self.manager.execute_policy(self.scaling_group, self)
def add_webhook(self, name, metadata=None):
"""
Adds a webhook to this policy.
"""
return self.manager.add_webhook(self.scaling_group, self, name,
metadata=metadata)
def list_webhooks(self):
"""
Returns a list of all webhooks for this policy.
"""
return self.manager.list_webhooks(self.scaling_group, self)
def get_webhook(self, webhook):
"""
Gets the detail for the specified webhook.
"""
return self.manager.get_webhook(self.scaling_group, self, webhook)
def update_webhook(self, webhook, name=None, metadata=None):
"""
Updates the specified webhook. One or more of the parameters may be
specified.
"""
return self.manager.update_webhook(self.scaling_group, policy=self,
webhook=webhook, name=name, metadata=metadata)
def update_webhook_metadata(self, webhook, metadata):
"""
Adds the given metadata dict to the existing metadata for the specified
webhook.
"""
return self.manager.update_webhook_metadata(self.scaling_group, self,
webhook, metadata)
def delete_webhook(self, webhook):
"""
Deletes the specified webhook from this policy.
"""
return self.manager.delete_webhook(self.scaling_group, self, webhook)
class AutoScaleWebhook(BaseResource):
def __init__(self, manager, info, policy, scaling_group, *args, **kwargs):
super(AutoScaleWebhook, self).__init__(manager, info, *args, **kwargs)
if not isinstance(policy, AutoScalePolicy):
policy = manager.get_policy(scaling_group, policy)
self.policy = policy
self._non_display = ["links", "policy"]
def get(self):
return self.policy.get_webhook(self)
reload = get
def update(self, name=None, metadata=None):
"""
Updates this webhook. One or more of the parameters may be specified.
"""
return self.policy.update_webhook(self, name=name, metadata=metadata)
def update_metadata(self, metadata):
"""
Adds the given metadata dict to the existing metadata for this webhook.
"""
return self.policy.update_webhook_metadata(self, metadata)
def delete(self):
"""
Deletes this webhook.
"""
return self.policy.delete_webhook(self)
class AutoScaleClient(BaseClient):
"""
This is the primary class for interacting with AutoScale.
"""
name = "Autoscale"
def _configure_manager(self):
"""
Creates a manager to handle autoscale operations.
"""
self._manager = ScalingGroupManager(self,
resource_class=ScalingGroup, response_key="group",
uri_base="groups")
def get_state(self, scaling_group):
"""
Returns the current state of the specified scaling group.
"""
return self._manager.get_state(scaling_group)
def pause(self, scaling_group):
"""
Pauses all execution of the policies for the specified scaling group.
"""
# NOTE: This is not yet implemented. The code is based on the docs,
# so it should either work or be pretty close.
return self._manager.pause(scaling_group)
def resume(self, scaling_group):
"""
Resumes execution of the policies for the specified scaling group.
"""
# NOTE: This is not yet implemented. The code is based on the docs,
# so it should either work or be pretty close.
return self._manager.resume(scaling_group)
def replace(self, scaling_group, name, cooldown, min_entities,
max_entities, metadata=None):
"""
Replace an existing ScalingGroup configuration. All of the attributes
must be specified. If you wish to delete any of the optional
attributes, pass them in as None.
"""
return self._manager.replace(scaling_group, name, cooldown,
min_entities, max_entities, metadata=metadata)
def update(self, scaling_group, name=None, cooldown=None, min_entities=None,
max_entities=None, metadata=None):
"""
Updates an existing ScalingGroup. One or more of the attributes can be
specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method.
"""
return self._manager.update(scaling_group, name=name, cooldown=cooldown,
min_entities=min_entities, max_entities=max_entities,
metadata=metadata)
def update_metadata(self, scaling_group, metadata):
"""
Adds the given metadata dict to the existing metadata for the scaling
group.
"""
return self._manager.update_metadata(scaling_group, metadata)
def get_configuration(self, scaling_group):
"""
Returns the scaling group's configuration in a dictionary.
"""
return self._manager.get_configuration(scaling_group)
def get_launch_config(self, scaling_group):
"""
Returns the launch configuration for the specified scaling group.
"""
return self._manager.get_launch_config(scaling_group)
def replace_launch_config(self, scaling_group, launch_config_type,
server_name, image, flavor, disk_config=None, metadata=None,
personality=None, networks=None, load_balancers=None,
key_name=None):
"""
Replace an existing launch configuration. All of the attributes must be
specified. If you wish to delete any of the optional attributes, pass
them in as None.
"""
return self._manager.replace_launch_config(scaling_group,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name)
def update_launch_config(self, scaling_group, server_name=None, image=None,
flavor=None, disk_config=None, metadata=None, personality=None,
networks=None, load_balancers=None, key_name=None, config_drive=False,
user_data=None):
"""
Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
"""
return self._manager.update_launch_config(scaling_group,
server_name=server_name, image=image, flavor=flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
def update_launch_metadata(self, scaling_group, metadata):
"""
Adds the given metadata dict to the existing metadata for the scaling
group's launch configuration.
"""
return self._manager.update_launch_metadata(scaling_group, metadata)
def add_policy(self, scaling_group, name, policy_type, cooldown,
change=None, is_percent=False, desired_capacity=None, args=None):
"""
Adds a policy with the given values to the specified scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage.
"""
return self._manager.add_policy(scaling_group, name, policy_type,
cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def list_policies(self, scaling_group):
"""
Returns a list of all policies defined for the specified scaling group.
"""
return self._manager.list_policies(scaling_group)
def get_policy(self, scaling_group, policy):
"""
Gets the detail for the specified policy.
"""
return self._manager.get_policy(scaling_group, policy)
def replace_policy(self, scaling_group, policy, name,
policy_type, cooldown, change=None, is_percent=False,
desired_capacity=None, args=None):
"""
Replace an existing policy. All of the attributes must be specified. If
you wish to delete any of the optional attributes, pass them in as
None.
"""
return self._manager.replace_policy(scaling_group, policy, name,
policy_type, cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def update_policy(self, scaling_group, policy, name=None, policy_type=None,
cooldown=None, change=None, is_percent=False,
desired_capacity=None, args=None):
"""
Updates the specified policy. One or more of the parameters may be
specified.
"""
return self._manager.update_policy(scaling_group, policy, name=name,
policy_type=policy_type, cooldown=cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
def execute_policy(self, scaling_group, policy):
"""
Executes the specified policy for the scaling group.
"""
return self._manager.execute_policy(scaling_group=scaling_group,
policy=policy)
def delete_policy(self, scaling_group, policy):
"""
Deletes the specified policy from the scaling group.
"""
return self._manager.delete_policy(scaling_group=scaling_group,
policy=policy)
def add_webhook(self, scaling_group, policy, name, metadata=None):
"""
Adds a webhook to the specified policy.
"""
return self._manager.add_webhook(scaling_group, policy, name,
metadata=metadata)
def list_webhooks(self, scaling_group, policy):
"""
Returns a list of all webhooks defined for the specified policy.
"""
return self._manager.list_webhooks(scaling_group, policy)
def get_webhook(self, scaling_group, policy, webhook):
"""
Gets the detail for the specified webhook.
"""
return self._manager.get_webhook(scaling_group, policy, webhook)
def replace_webhook(self, scaling_group, policy, webhook, name,
metadata=None):
"""
Replace an existing webhook. All of the attributes must be specified.
If you wish to delete any of the optional attributes, pass them in as
None.
"""
return self._manager.replace_webhook(scaling_group, policy, webhook,
name, metadata=metadata)
def update_webhook(self, scaling_group, policy, webhook, name=None,
metadata=None):
"""
Updates the specified webhook. One or more of the parameters may be
specified.
"""
return self._manager.update_webhook(scaling_group=scaling_group,
policy=policy, webhook=webhook, name=name, metadata=metadata)
def update_webhook_metadata(self, scaling_group, policy, webhook, metadata):
"""
Adds the given metadata dict to the existing metadata for the specified
webhook.
"""
return self._manager.update_webhook_metadata(scaling_group, policy,
webhook, metadata)
def delete_webhook(self, scaling_group, policy, webhook):
"""
Deletes the specified webhook from the policy.
"""
return self._manager.delete_webhook(scaling_group, policy, webhook)
```
#### File: pyrax/pyrax/cloudcdn.py
```python
from functools import wraps
import re
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
class CloudCDNFlavor(BaseResource):
pass
class CloudCDNFlavorManager(BaseManager):
def list(self):
resp, resp_body = self.api.method_get("/%s" % self.uri_base)
return [CloudCDNFlavor(self, info)
for info in resp_body[self.plural_response_key]]
def get(self, flavor_id):
resp, resp_body = self.api.method_get(
"/%s/%s" % (self.uri_base, flavor_id))
return CloudCDNFlavor(self, resp_body)
class CloudCDNService(BaseResource):
def patch(self, changes):
self.manager.patch(self.id, changes)
def delete(self):
self.manager.delete(self)
def delete_assets(self, url=None, all=False):
self.manager.delete_assets(self.id, url, all)
class CloudCDNServiceManager(BaseManager):
def create(self, name, flavor_id, domains, origins,
restrictions=None, caching=None):
body = {"name": name,
"flavor_id": flavor_id,
"domains": domains,
"origins": origins,
"restrictions": restrictions or [],
"caching": caching or []}
resp, resp_body = self.api.method_post("/%s" % self.uri_base,
body=body)
body["id"] = resp.headers.get("location").split("/")[-1]
return CloudCDNService(self, body)
def patch(self, service_id, changes):
resp, resp_body = self.api.method_patch(
"/%s/%s" % (self.uri_base, service_id), body=changes)
return None
def delete_assets(self, service_id, url=None, all=False):
uri = "/%s/%s/assets" % (self.uri_base, service_id)
queries = {}
if all:
queries["all"] = "true"
if url is not None:
queries["url"] = url
qs = utils.dict_to_qs(queries)
if qs:
uri = "%s?%s" % (uri, qs)
self.api.method_delete(uri)
return None
def list(self, limit=None, marker=None):
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri)
class CloudCDNClient(BaseClient):
"""
This is the base client for creating and managing Cloud CDN.
"""
def __init__(self, *args, **kwargs):
super(CloudCDNClient, self).__init__(*args, **kwargs)
self.name = "Cloud CDN"
def _configure_manager(self):
"""
Creates the Manager instances to handle monitoring.
"""
self._flavor_manager = CloudCDNFlavorManager(self,
uri_base="flavors", resource_class=CloudCDNFlavor,
response_key=None, plural_response_key="flavors")
self._services_manager = CloudCDNServiceManager(self,
uri_base="services", resource_class=CloudCDNService,
response_key=None, plural_response_key="services")
def ping(self):
"""Ping the server
Returns None if successful, or raises some exception...TODO
"""
self.method_get("/ping")
def list_flavors(self):
"""List CDN flavors."""
return self._flavor_manager.list()
def get_flavor(self, flavor_id):
"""Get one CDN flavor."""
return self._flavor_manager.get(flavor_id)
def list_services(self, limit=None, marker=None):
"""List CDN services."""
return self._services_manager.list(limit=limit, marker=marker)
def get_service(self, service_id):
"""Get one CDN service."""
return self._services_manager.get(service_id)
def create_service(self, name, flavor_id, domains, origins,
restrictions=None, caching=None):
"""Create a new CDN service.
Arguments:
name: The name of the service.
flavor_id: The ID of the flavor to use for this service.
domains: A list of dictionaries, each of which has a required
key "domain" and optional key "protocol" (the default
protocol is http).
origins: A list of dictionaries, each of which has a required
key "origin" which is the URL or IP address to pull
origin content from. Optional keys include "port" to
use a port other than the default of 80, and "ssl"
to enable SSL, which is disabled by default.
caching: An optional
"""
return self._services_manager.create(name, flavor_id, domains, origins,
restrictions, caching)
def patch_service(self, service_id, changes):
"""Update a CDN service with a patch
Arguments:
service_id: The ID of the service to update.
changes: A list of dictionaries containing the following keys:
op, path, and value. The "op" key can be any of the
following actions: add, replace, or remove. Path
is the path to update. A value must be specified for
add or replace ops, but can be omitted for remove.
"""
self._services_manager.patch(service_id, changes)
def delete_service(self, service):
"""Delete a CDN service."""
self._services_manager.delete(service)
def delete_assets(self, service_id, url=None, all=False):
"""Delete CDN assets
Arguments:
service_id: The ID of the service to delete from.
url: The URL at which to delete assets
all: When True, delete all assets associated with the service_id.
You cannot specifiy both url and all.
"""
self._services_manager.delete_assets(service_id, url, all)
#################################################################
# The following methods are defined in the generic client class,
# but don't have meaning in cdn, as there is not a single
# resource that defines this module.
#################################################################
def list(self, limit=None, marker=None):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def get(self, item):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def create(self, *args, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def delete(self, item):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def find(self, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def findall(self, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
#################################################################
```
#### File: pyrax/pyrax/clouddns.py
```python
from __future__ import absolute_import, unicode_literals
from functools import wraps
import json
import re
import time
import six
import pyrax
from pyrax.client import BaseClient
from pyrax.cloudloadbalancers import CloudLoadBalancer
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
# How long (in seconds) to wait for a response from async operations
DEFAULT_TIMEOUT = 5
# How long (in seconds) to wait in between checks for async completion
DEFAULT_DELAY = 0.5
# How many times to retry a GET before raising an error
DEFAULT_RETRY = 3
def assure_domain(fnc):
@wraps(fnc)
def _wrapped(self, domain, *args, **kwargs):
if not isinstance(domain, CloudDNSDomain):
# Must be the ID or name. Try ID first:
try:
domain = self._manager.get(domain)
except exc.NotFound:
domain = self._manager.find(name=domain)
return fnc(self, domain, *args, **kwargs)
return _wrapped
class CloudDNSRecord(BaseResource):
"""
This class represents a domain record.
"""
GET_DETAILS = False
# Initialize the supported attributes.
type = None
name = None
data = None
priority = None
ttl = None
comment = None
def update(self, data=None, priority=None, ttl=None, comment=None):
"""
Modifies this record.
"""
return self.manager.update_record(self.domain_id, self, data=data,
priority=priority, ttl=ttl, comment=comment)
def get(self):
"""
Gets the full information for an existing record for this domain.
"""
return self.manager.get_record(self.domain_id, self)
def delete(self):
"""
Deletes an existing record for this domain.
"""
return self.manager.delete_record(self.domain_id, self)
class CloudDNSDomain(BaseResource):
"""
This class represents a DNS domain.
"""
def delete(self, delete_subdomains=False):
"""
Deletes this domain and all of its resource records. If this domain has
subdomains, each subdomain will now become a root domain. If you wish to
also delete any subdomains, pass True to 'delete_subdomains'.
"""
self.manager.delete(self, delete_subdomains=delete_subdomains)
def changes_since(self, date_or_datetime):
"""
Gets the changes for this domain since the specified date/datetime.
The date can be one of:
- a Python datetime object
- a Python date object
- a string in the format 'YYYY-MM-YY HH:MM:SS'
- a string in the format 'YYYY-MM-YY'
It returns a list of dicts, whose keys depend on the specific change
that was made. A simple example of such a change dict:
{u'accountId': 000000,
u'action': u'update',
u'changeDetails': [{u'field': u'serial_number',
u'newValue': u'1354038941',
u'originalValue': u'1354038940'},
{u'field': u'updated_at',
u'newValue': u'Tue Nov 27 17:55:41 UTC 2012',
u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}],
u'domain': u'example.com',
u'targetId': 00000000,
u'targetType': u'Domain'}
"""
return self.manager.changes_since(self, date_or_datetime)
def export(self):
"""
Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents
of the requested domain. This call is for a single domain only, and as such,
does not provide subdomain information.
Sample export:
{u'accountId': 000000,
u'contentType': u'BIND_9',
u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. '
'<EMAIL>. 1354202974 21600 3600 1814400 500'
'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.'
'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.',
u'id': 1111111}
"""
return self.manager.export_domain(self)
def update(self, emailAddress=None, ttl=None, comment=None):
"""
Provides a way to modify the following attributes of a domain
entry:
- email address
- ttl setting
- comment
"""
return self.manager.update_domain(self, emailAddress=emailAddress,
ttl=ttl, comment=comment)
def list_subdomains(self, limit=None, offset=None):
"""
Returns a list of all subdomains for this domain.
"""
return self.manager.list_subdomains(self, limit=limit, offset=offset)
def list_records(self, limit=None, offset=None):
"""
Returns a list of all records configured for this domain.
"""
return self.manager.list_records(self, limit=limit, offset=offset)
def search_records(self, record_type, name=None, data=None):
"""
Returns a list of all records configured for this domain that match
the supplied search criteria.
"""
return self.manager.search_records(self, record_type=record_type,
name=name, data=data)
def find_record(self, record_type, name=None, data=None):
"""
Returns a single record for this domain that matches the supplied
search criteria.
If no record matches, a DomainRecordNotFound exception will be raised.
If more than one matches, a DomainRecordNotUnique exception will
be raised.
"""
matches = self.manager.search_records(self, record_type=record_type,
name=name, data=data)
if not matches:
raise exc.DomainRecordNotFound
elif len(matches) > 1:
raise exc.DomainRecordNotUnique
return matches[0]
def add_records(self, records):
"""
Adds the records to this domain. Each record should be a dict with the
following keys:
- type (required)
- name (required)
- data (required)
- ttl (optional)
- comment (optional)
- priority (required for MX and SRV records; forbidden otherwise)
"""
return self.manager.add_records(self, records)
# Create an alias, so that adding a single record is more intuitive
add_record = add_records
def get_record(self, record):
"""
Gets the full information for an existing record for this domain.
"""
return self.manager.get_record(self, record)
def update_record(self, record, data=None, priority=None,
ttl=None, comment=None):
"""
Modifies an existing record for this domain.
"""
return self.manager.update_record(self, record, data=data,
priority=priority, ttl=ttl, comment=comment)
def update_records(self, records):
"""
Modifies multiple existing records for a domain. Each record to be
updated should be a dict with following required keys:
- id
- name
Each record must also contain one or more of the following optional
keys:
- data
- ttl
- comment
- priority (optional for MX and SRV records; forbidden otherwise)
"""
return self.manager.update_records(self, records)
def delete_record(self, record):
"""
Deletes an existing record for this domain.
"""
return self.manager.delete_record(self, record)
class CloudDNSPTRRecord(object):
"""
This represents a Cloud DNS PTR record (reverse DNS).
"""
def __init__(self, data=None, device=None):
self.type = self.id = self.data = self.name = None
self.ttl = self.comment = None
if data:
for key, val in data.items():
setattr(self, key, val)
self.device = device
def delete(self):
"""
Deletes this PTR record from its device.
"""
return pyrax.cloud_dns.delete_ptr_records(self.device, self.data)
def __repr__(self):
reprkeys = ("id", "data", "name", "ttl")
info = ", ".join("%s=%s" % (key, getattr(self, key)) for key in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
class CloudDNSManager(BaseManager):
def __init__(self, api, resource_class=None, response_key=None,
plural_response_key=None, uri_base=None):
super(CloudDNSManager, self).__init__(api, resource_class=resource_class,
response_key=response_key, plural_response_key=plural_response_key,
uri_base=uri_base)
self._paging = {"domain": {}, "subdomain": {}, "record": {}}
self._reset_paging(service="all")
self._timeout = DEFAULT_TIMEOUT
self._delay = DEFAULT_DELAY
def _create_body(self, name, emailAddress, ttl=3600, comment=None,
subdomains=None, records=None):
"""
Creates the appropriate dict for creating a new domain.
"""
if subdomains is None:
subdomains = []
if records is None:
records = []
body = {"domains": [{
"name": name,
"emailAddress": emailAddress,
"ttl": ttl,
"comment": comment,
"subdomains": {
"domains": subdomains
},
"recordsList": {
"records": records
},
}]}
return body
def _set_timeout(self, timeout):
"""
Changes the duration for which the program will wait for a response from
the DNS system. Setting the timeout to zero will make that program wait
an indefinite amount of time.
"""
self._timeout = timeout
def _set_delay(self, delay):
"""
Changes the interval that the program will pause in between attempts to
see if a request has completed.
"""
self._delay = delay
def _reset_paging(self, service, body=None):
"""
Resets the internal attributes when there is no current paging request.
"""
if service == "all":
for svc in self._paging.keys():
svc_dct = self._paging[svc]
svc_dct["next_uri"] = svc_dct["prev_uri"] = None
svc_dct["total_entries"] = None
return
svc_dct = self._paging[service]
svc_dct["next_uri"] = svc_dct["prev_uri"] = None
svc_dct["total_entries"] = None
if not body:
return
svc_dct["total_entries"] = body.get("totalEntries")
links = body.get("links")
uri_base = self.uri_base
if links:
for link in links:
href = link["href"]
pos = href.index(uri_base)
page_uri = href[pos - 1:]
if link["rel"] == "next":
svc_dct["next_uri"] = page_uri
elif link["rel"] == "previous":
svc_dct["prev_uri"] = page_uri
def _get_pagination_qs(self, limit, offset):
pagination_items = []
if limit is not None:
pagination_items.append("limit=%s" % limit)
if offset is not None:
pagination_items.append("offset=%s" % offset)
qs = "&".join(pagination_items)
qs = "?%s" % qs if qs else ""
return qs
def list(self, limit=None, offset=None):
"""Gets a list of all domains, or optionally a page of domains."""
uri = "/%s%s" % (self.uri_base, self._get_pagination_qs(limit, offset))
return self._list(uri)
def _list(self, uri, obj_class=None, list_all=False):
"""
Handles the communication with the API when getting
a full listing of the resources managed by this class.
"""
resp, resp_body = self._retry_get(uri)
if obj_class is None:
obj_class = self.resource_class
data = resp_body[self.plural_response_key]
ret = [obj_class(self, res, loaded=False)
for res in data if res]
self._reset_paging("domain", resp_body)
if list_all:
dom_paging = self._paging.get("domain", {})
while dom_paging.get("next_uri"):
next_uri = dom_paging.get("next_uri")
ret.extend(self._list(uri=next_uri, obj_class=obj_class,
list_all=False))
return ret
def list_previous_page(self):
"""
When paging through results, this will return the previous page, using
the same limit. If there are no more results, a NoMoreResults exception
will be raised.
"""
uri = self._paging.get("domain", {}).get("prev_uri")
if uri is None:
raise exc.NoMoreResults("There are no previous pages of domains "
"to list.")
return self._list(uri)
def list_next_page(self):
"""
When paging through results, this will return the next page, using the
same limit. If there are no more results, a NoMoreResults exception
will be raised.
"""
uri = self._paging.get("domain", {}).get("next_uri")
if uri is None:
raise exc.NoMoreResults("There are no more pages of domains to "
"list.")
return self._list(uri)
def _get(self, uri):
"""
Handles the communication with the API when getting
a specific resource managed by this class.
Because DNS returns a different format for the body,
the BaseManager method must be overridden here.
"""
uri = "%s?showRecords=false&showSubdomains=false" % uri
resp, body = self._retry_get(uri)
body["records"] = []
return self.resource_class(self, body, loaded=True)
def _retry_get(self, uri):
"""
Handles GET calls to the Cloud DNS API in order to retry on empty
body responses.
"""
for i in six.moves.range(DEFAULT_RETRY):
resp, body = self.api.method_get(uri)
if body:
return resp, body
# Tried too many times
raise exc.ServiceResponseFailure("The Cloud DNS service failed to "
"respond to the request.")
def _async_call(self, uri, body=None, method="GET", error_class=None,
has_response=True, *args, **kwargs):
"""
Handles asynchronous call/responses for the DNS API.
Returns the response headers and body if the call was successful.
If an error status is returned, and the 'error_class' parameter is
specified, that class of error will be raised with the details from
the response. If no error class is specified, the response headers
and body will be returned to the calling method, which will have
to handle the result.
"""
api_methods = {
"GET": self._retry_get,
"POST": self.api.method_post,
"PUT": self.api.method_put,
"DELETE": self.api.method_delete,
}
api_method = api_methods[method]
try:
if body is None:
resp, resp_body = api_method(uri, *args, **kwargs)
else:
resp, resp_body = api_method(uri, body=body, *args, **kwargs)
except Exception as e:
if error_class:
raise error_class(e)
else:
raise
callbackURL = resp_body["callbackUrl"].split("/status/")[-1]
massagedURL = "/status/%s?showDetails=true" % callbackURL
start = time.time()
timed_out = False
while (resp_body["status"] == "RUNNING") and not timed_out:
resp_body = None
while resp_body is None and not timed_out:
resp, resp_body = self._retry_get(massagedURL)
if self._timeout:
timed_out = ((time.time() - start) > self._timeout)
time.sleep(self._delay)
if timed_out:
raise exc.DNSCallTimedOut("The API call to '%s' did not complete "
"after %s seconds." % (uri, self._timeout))
if error_class and (resp_body["status"] == "ERROR"):
# This call will handle raising the error.
self._process_async_error(resp_body, error_class)
if has_response:
ret = resp, resp_body["response"]
else:
ret = resp, resp_body
try:
resp_body = json.loads(resp_body)
except Exception:
pass
return ret
def _process_async_error(self, resp_body, error_class):
"""
The DNS API does not return a consistent format for their error
messages. This abstracts out the differences in order to present
a single unified message in the exception to be raised.
"""
def _fmt_error(err):
# Remove the cumbersome Java-esque message
details = err.get("details", "").replace("\n", " ")
if not details:
details = err.get("message", "")
return "%s (%s)" % (details, err.get("code", ""))
error = resp_body.get("error", "")
if "failedItems" in error:
# Multi-error response
faults = error.get("failedItems", {}).get("faults", [])
msgs = [_fmt_error(fault) for fault in faults]
msg = "\n".join(msgs)
else:
msg = _fmt_error(error)
raise error_class(msg)
def _create(self, uri, body, records=None, subdomains=None,
return_none=False, return_raw=False, **kwargs):
"""
Handles the communication with the API when creating a new
resource managed by this class.
Since DNS works completely differently for create() than the other
APIs, this method overrides the default BaseManager behavior.
If 'records' are supplied, they should be a list of dicts. Each
record dict should have the following format:
{"name": "example.com",
"type": "A",
"data": "192.0.2.17",
"ttl": 86400}
If 'subdomains' are supplied, they should be a list of dicts. Each
subdomain dict should have the following format:
{"name": "sub1.example.com",
"comment": "1st sample subdomain",
"emailAddress": "<EMAIL>"}
"""
self.run_hooks("modify_body_for_create", body, **kwargs)
resp, resp_body = self._async_call(uri, body=body, method="POST",
error_class=exc.DomainCreationFailed)
response_body = resp_body[self.response_key][0]
return self.resource_class(self, response_body)
def delete(self, domain, delete_subdomains=False):
"""
Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'.
"""
uri = "/%s/%s" % (self.uri_base, utils.get_id(domain))
if delete_subdomains:
uri = "%s?deleteSubdomains=true" % uri
resp, resp_body = self._async_call(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False)
def findall(self, **kwargs):
"""
Finds all items with attributes matching ``**kwargs``.
Normally this isn't very efficient, since the default action is to
load the entire list and then filter on the Python side, but the DNS
API provides a more efficient search option when filtering on name.
So if the filter is on name, use that; otherwise, use the default.
"""
if (len(kwargs) == 1) and ("name" in kwargs):
# Filtering on name; use the more efficient method.
nm = kwargs["name"].lower()
uri = "/%s?name=%s" % (self.uri_base, nm)
matches = self._list(uri, list_all=True)
return [match for match in matches
if match.name.lower() == nm]
else:
return super(CloudDNSManager, self).findall(**kwargs)
def changes_since(self, domain, date_or_datetime):
"""
Gets the changes for a domain since the specified date/datetime.
The date can be one of:
- a Python datetime object
- a Python date object
- a string in the format 'YYYY-MM-YY HH:MM:SS'
- a string in the format 'YYYY-MM-YY'
It returns a list of dicts, whose keys depend on the specific change
that was made. A simple example of such a change dict:
{u'accountId': 000000,
u'action': u'update',
u'changeDetails': [{u'field': u'serial_number',
u'newValue': u'1354038941',
u'originalValue': u'1354038940'},
{u'field': u'updated_at',
u'newValue': u'Tue Nov 27 17:55:41 UTC 2012',
u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}],
u'domain': u'example.com',
u'targetId': 00000000,
u'targetType': u'Domain'}
"""
domain_id = utils.get_id(domain)
dt = utils.iso_time_string(date_or_datetime, show_tzinfo=True)
uri = "/domains/%s/changes?since=%s" % (domain_id, dt)
resp, body = self._retry_get(uri)
return body.get("changes", [])
def export_domain(self, domain):
"""
Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents
of the requested domain. This call is for a single domain only, and as
such, does not provide subdomain information.
Sample export:
{u'accountId': 000000,
u'contentType': u'BIND_9',
u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. '
'<EMAIL>. 1354202974 21600 3600 1814400 500'
'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.'
'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.',
u'id': 1111111}
"""
uri = "/domains/%s/export" % utils.get_id(domain)
resp, resp_body = self._async_call(uri, method="GET",
error_class=exc.NotFound)
return resp_body.get("contents", "")
def import_domain(self, domain_data):
"""
Takes a string in the BIND 9 format and creates a new domain. See the
'export_domain()' method for a description of the format.
"""
uri = "/domains/import"
body = {"domains": [{
"contentType": "BIND_9",
"contents": domain_data,
}]}
resp, resp_body = self._async_call(uri, method="POST", body=body,
error_class=exc.DomainCreationFailed)
return resp_body
def update_domain(self, domain, emailAddress=None, ttl=None, comment=None):
"""
Provides a way to modify the following attributes of a domain
record:
- email address
- ttl setting
- comment
"""
if not any((emailAddress, ttl, comment)):
raise exc.MissingDNSSettings(
"No settings provided to update_domain().")
uri = "/domains/%s" % utils.get_id(domain)
body = {"comment": comment,
"ttl": ttl,
"emailAddress": emailAddress,
}
none_keys = [key for key, val in body.items()
if val is None]
for none_key in none_keys:
body.pop(none_key)
resp, resp_body = self._async_call(uri, method="PUT", body=body,
error_class=exc.DomainUpdateFailed, has_response=False)
return resp_body
def list_subdomains(self, domain, limit=None, offset=None):
"""
Returns a list of all subdomains of the specified domain.
"""
# The commented-out uri is the official API, but it is
# horribly slow.
# uri = "/domains/%s/subdomains" % utils.get_id(domain)
uri = "/domains?name=%s" % domain.name
page_qs = self._get_pagination_qs(limit, offset)
if page_qs:
uri = "%s&%s" % (uri, page_qs[1:])
return self._list_subdomains(uri, domain.id)
def _list_subdomains(self, uri, domain_id):
resp, body = self._retry_get(uri)
self._reset_paging("subdomain", body)
subdomains = body.get("domains", [])
return [CloudDNSDomain(self, subdomain, loaded=False)
for subdomain in subdomains
if subdomain["id"] != domain_id]
def list_subdomains_previous_page(self):
"""
When paging through subdomain results, this will return the previous
page, using the same limit. If there are no more results, a
NoMoreResults exception will be raised.
"""
uri = self._paging.get("subdomain", {}).get("prev_uri")
if uri is None:
raise exc.NoMoreResults("There are no previous pages of subdomains "
"to list.")
return self._list_subdomains(uri)
def list_subdomains_next_page(self):
"""
When paging through subdomain results, this will return the next page,
using the same limit. If there are no more results, a NoMoreResults
exception will be raised.
"""
uri = self._paging.get("subdomain", {}).get("next_uri")
if uri is None:
raise exc.NoMoreResults("There are no more pages of subdomains "
"to list.")
return self._list_subdomains(uri)
def list_records(self, domain, limit=None, offset=None):
"""
Returns a list of all records configured for the specified domain.
"""
uri = "/domains/%s/records%s" % (utils.get_id(domain),
self._get_pagination_qs(limit, offset))
return self._list_records(uri)
def _list_records(self, uri):
resp, body = self._retry_get(uri)
self._reset_paging("record", body)
# The domain ID will be in the URL
pat = "domains/([^/]+)/records"
mtch = re.search(pat, uri)
dom_id = mtch.groups()[0]
records = body.get("records", [])
for record in records:
record["domain_id"] = dom_id
return [CloudDNSRecord(self, record, loaded=False)
for record in records if record]
def list_records_previous_page(self):
"""
When paging through record results, this will return the previous page,
using the same limit. If there are no more results, a NoMoreResults
exception will be raised.
"""
uri = self._paging.get("record", {}).get("prev_uri")
if uri is None:
raise exc.NoMoreResults("There are no previous pages of records "
"to list.")
return self._list_records(uri)
def list_records_next_page(self):
"""
When paging through record results, this will return the next page,
using the same limit. If there are no more results, a NoMoreResults
exception will be raised.
"""
uri = self._paging.get("record", {}).get("next_uri")
if uri is None:
raise exc.NoMoreResults("There are no more pages of records to list.")
return self._list_records(uri)
def search_records(self, domain, record_type, name=None, data=None):
"""
Returns a list of all records configured for the specified domain that
match the supplied search criteria.
"""
search_params = []
if name:
search_params.append("name=%s" % name)
if data:
search_params.append("data=%s" % data)
query_string = "&".join(search_params)
dom_id = utils.get_id(domain)
uri = "/domains/%s/records?type=%s" % (dom_id, record_type)
if query_string:
uri = "%s&%s" % (uri, query_string)
resp, body = self._retry_get(uri)
records = body.get("records", [])
self._reset_paging("record", body)
rec_paging = self._paging.get("record", {})
while rec_paging.get("next_uri"):
resp, body = self._retry_get(rec_paging.get("next_uri"))
self._reset_paging("record", body)
records.extend(body.get("records", []))
for record in records:
record["domain_id"] = dom_id
return [CloudDNSRecord(self, record, loaded=False)
for record in records if record]
def add_records(self, domain, records):
"""
Adds the records to this domain. Each record should be a dict with the
following keys:
- type (required)
- name (required)
- data (required)
- ttl (optional)
- comment (optional)
- priority (required for MX and SRV records; forbidden otherwise)
"""
if isinstance(records, dict):
# Single record passed
records = [records]
dom_id = utils.get_id(domain)
uri = "/domains/%s/records" % dom_id
body = {"records": records}
resp, resp_body = self._async_call(uri, method="POST", body=body,
error_class=exc.DomainRecordAdditionFailed, has_response=False)
records = resp_body.get("response", {}).get("records", [])
for record in records:
record["domain_id"] = dom_id
return [CloudDNSRecord(self, record, loaded=False)
for record in records if record]
def get_record(self, domain, record):
"""
Gets the full information for an existing record for this domain.
"""
rec_id = utils.get_id(record)
domain_id = utils.get_id(domain)
uri = "/domains/%s/records/%s" % (domain_id, rec_id)
resp, resp_body = self._retry_get(uri)
resp_body["domain_id"] = domain_id
return CloudDNSRecord(self, resp_body, loaded=False)
def update_record(self, domain, record, data=None, priority=None,
ttl=None, comment=None):
"""
Modifies an existing record for a domain.
"""
rdict = {"id": record.id,
"name": record.name,
}
pdict = {"data": data,
"priority": priority,
"ttl": ttl,
"comment": comment,
}
utils.params_to_dict(pdict, rdict)
return self.update_records(domain, [rdict])
def update_records(self, domain, records):
"""
Modifies an existing records for a domain.
"""
if not isinstance(records, list):
raise TypeError("Expected records of type list")
uri = "/domains/%s/records" % utils.get_id(domain)
resp, resp_body = self._async_call(uri, method="PUT",
body={"records": records},
error_class=exc.DomainRecordUpdateFailed, has_response=False)
return resp_body
def delete_record(self, domain, record):
"""
Deletes an existing record for a domain.
"""
uri = "/domains/%s/records/%s" % (utils.get_id(domain),
utils.get_id(record))
resp, resp_body = self._async_call(uri, method="DELETE",
error_class=exc.DomainRecordDeletionFailed, has_response=False)
return resp_body
def _get_ptr_details(self, device, device_type):
"""
Takes a device and device type and returns the corresponding HREF link
and service name for use with PTR record management.
"""
context = self.api.identity
region = self.api.region_name
if device_type.lower().startswith("load"):
ep = pyrax._get_service_endpoint(context, "load_balancer", region)
svc = "loadbalancers"
svc_name = "cloudLoadBalancers"
else:
ep = pyrax._get_service_endpoint(context, "compute", region)
svc = "servers"
svc_name = "cloudServersOpenStack"
href = "%s/%s/%s" % (ep, svc, utils.get_id(device))
return (href, svc_name)
def _resolve_device_type(self, device):
"""
Given a device, determines if it is a CloudServer, a CloudLoadBalancer,
or an invalid device.
"""
try:
from tests.unit import fakes
server_types = (pyrax.CloudServer, fakes.FakeServer)
lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer,
fakes.FakeDNSDevice)
except ImportError:
# Not running with tests
server_types = (pyrax.CloudServer, )
lb_types = (CloudLoadBalancer, )
if isinstance(device, server_types):
device_type = "server"
elif isinstance(device, lb_types):
device_type = "loadbalancer"
else:
raise exc.InvalidDeviceType("The device '%s' must be a CloudServer "
"or a CloudLoadBalancer." % device)
return device_type
def list_ptr_records(self, device):
"""
Returns a list of all PTR records configured for this device.
"""
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
uri = "/rdns/%s?href=%s" % (svc_name, href)
try:
resp, resp_body = self._retry_get(uri)
except exc.NotFound:
return []
records = [CloudDNSPTRRecord(rec, device)
for rec in resp_body.get("records", [])]
return records
def add_ptr_records(self, device, records):
"""
Adds one or more PTR records to the specified device.
"""
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
if not isinstance(records, (list, tuple)):
records = [records]
body = {"recordsList": {
"records": records},
"link": {
"content": "",
"href": href,
"rel": svc_name,
}}
uri = "/rdns"
# This is a necessary hack, so here's why: if you attempt to add
# PTR records to device, and you don't have rights to either the device
# or the IP address, the DNS API will return a 401 - Unauthorized.
# Unfortunately, the pyrax client interprets this as a bad auth token,
# and there is no way to distinguish this from an actual authentication
# failure. The client will attempt to re-authenticate as a result, and
# will fail, due to the DNS API not having regional endpoints. The net
# result is that an EndpointNotFound exception will be raised, which
# we catch here and then raise a more meaningful exception.
# The Rackspace DNS team is working on changing this to return a 403
# instead; when that happens this kludge can go away.
try:
resp, resp_body = self._async_call(uri, body=body, method="POST",
error_class=exc.PTRRecordCreationFailed)
except exc.EndpointNotFound:
raise exc.InvalidPTRRecord("The domain/IP address information is not "
"valid for this device.")
return resp_body.get("records")
records = [CloudDNSPTRRecord(rec, device)
for rec in resp_body.get("records", [])]
return records
def update_ptr_record(self, device, record, domain_name, data=None,
ttl=None, comment=None):
"""
Updates a PTR record with the supplied values.
"""
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
try:
rec_id = record.id
except AttributeError:
rec_id = record
rec = {"name": domain_name,
"id": rec_id,
"type": "PTR",
"data": data,
}
if ttl is not None:
# Minimum TTL is 300 seconds
rec["ttl"] = max(300, ttl)
if comment is not None:
# Maximum comment length is 160 chars
rec["comment"] = comment[:160]
body = {"recordsList": {
"records": [rec]},
"link": {
"content": "",
"href": href,
"rel": svc_name,
}}
uri = "/rdns"
try:
resp, resp_body = self._async_call(uri, body=body, method="PUT",
has_response=False, error_class=exc.PTRRecordUpdateFailed)
except exc.EndpointNotFound as e:
raise exc.InvalidPTRRecord("The record domain/IP address "
"information is not valid for this device.")
return resp_body.get("status") == "COMPLETED"
def delete_ptr_records(self, device, ip_address=None):
"""
Deletes the PTR records for the specified device. If 'ip_address' is
supplied, only the PTR records with that IP address will be deleted.
"""
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
uri = "/rdns/%s?href=%s" % (svc_name, href)
if ip_address:
uri = "%s&ip=%s" % (uri, ip_address)
resp, resp_body = self._async_call(uri, method="DELETE",
has_response=False,
error_class=exc.PTRRecordDeletionFailed)
return resp_body.get("status") == "COMPLETED"
class CloudDNSClient(BaseClient):
"""
This is the primary class for interacting with Cloud DNS.
"""
name = "Cloud DNS"
def _configure_manager(self):
"""
Creates a manager to handle the instances, and another
to handle flavors.
"""
self._manager = CloudDNSManager(self, resource_class=CloudDNSDomain,
response_key="domains", plural_response_key="domains",
uri_base="domains")
def method_get(self, uri, **kwargs):
"""
Overload the method_get function in order to retry on empty body
responses from the Cloud DNS API
"""
for i in six.moves.range(3):
resp, body = super(CloudDNSClient, self).method_get(uri, **kwargs)
if body:
return resp, body
raise exc.ServiceResponseFailure("The Cloud DNS service failed to "
"respond to the request.")
def set_timeout(self, timeout):
"""
Sets the amount of time that calls will wait for a response from
the DNS system before timing out. Setting the timeout to zero will
cause execution to wait indefinitely until the call completes.
"""
self._manager._set_timeout(timeout)
def set_delay(self, delay):
"""
Changes the interval that the program will pause in between attempts to
see if a request has completed.
"""
self._manager._set_delay(delay)
def list(self, limit=None, offset=None):
"""Returns a list of all resources."""
return self._manager.list(limit=limit, offset=offset)
def list_previous_page(self):
"""Returns the previous page of results."""
return self._manager.list_previous_page()
def list_next_page(self):
"""Returns the next page of results."""
return self._manager.list_next_page()
def get_domain_iterator(self):
"""
Returns an iterator that will return each available domain. If there are
more than the limit of 100 domains, the iterator will continue to fetch
domains from the API until all domains have been returned.
"""
return DomainResultsIterator(self._manager)
@assure_domain
def changes_since(self, domain, date_or_datetime):
"""
Gets the changes for a domain since the specified date/datetime.
The date can be one of:
- a Python datetime object
- a Python date object
- a string in the format 'YYYY-MM-YY HH:MM:SS'
- a string in the format 'YYYY-MM-YY'
It returns a list of dicts, whose keys depend on the specific change
that was made. A simple example of such a change dict:
{u'accountId': 000000,
u'action': u'update',
u'changeDetails': [{u'field': u'serial_number',
u'newValue': u'1354038941',
u'originalValue': u'1354038940'},
{u'field': u'updated_at',
u'newValue': u'Tue Nov 27 17:55:41 UTC 2012',
u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}],
u'domain': u'example.com',
u'targetId': 00000000,
u'targetType': u'Domain'}
"""
return domain.changes_since(date_or_datetime)
@assure_domain
def export_domain(self, domain):
"""
Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents
of the requested domain. This call is for a single domain only, and as
such, does not provide subdomain information.
Sample export:
{u'accountId': 000000,
u'contentType': u'BIND_9',
u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. '
'<EMAIL>. 1354202974 21600 3600 1814400 500'
'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.'
'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.',
u'id': 1111111}
"""
return domain.export()
def import_domain(self, domain_data):
"""
Takes a string in the BIND 9 format and creates a new domain. See the
'export_domain()' method for a description of the format.
"""
return self._manager.import_domain(domain_data)
@assure_domain
def update_domain(self, domain, emailAddress=None, ttl=None, comment=None):
"""
Provides a way to modify the following attributes of a domain
record:
- email address
- ttl setting
- comment
"""
return domain.update(emailAddress=emailAddress,
ttl=ttl, comment=comment)
@assure_domain
def delete(self, domain, delete_subdomains=False):
"""
Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'.
"""
domain.delete(delete_subdomains=delete_subdomains)
@assure_domain
def list_subdomains(self, domain, limit=None, offset=None):
"""
Returns a list of all subdomains for the specified domain.
"""
return domain.list_subdomains(limit=limit, offset=offset)
def get_subdomain_iterator(self, domain, limit=None, offset=None):
"""
Returns an iterator that will return each available subdomain for the
specified domain. If there are more than the limit of 100 subdomains,
the iterator will continue to fetch subdomains from the API until all
subdomains have been returned.
"""
return SubdomainResultsIterator(self._manager, domain=domain)
def list_subdomains_previous_page(self):
"""Returns the previous page of subdomain results."""
return self._manager.list_subdomains_previous_page()
def list_subdomains_next_page(self):
"""Returns the next page of subdomain results."""
return self._manager.list_subdomains_next_page()
@assure_domain
def list_records(self, domain, limit=None, offset=None):
"""
Returns a list of all records configured for the specified domain.
"""
return domain.list_records(limit=limit, offset=offset)
def get_record_iterator(self, domain):
"""
Returns an iterator that will return each available DNS record for the
specified domain. If there are more than the limit of 100 records, the
iterator will continue to fetch records from the API until all records
have been returned.
"""
return RecordResultsIterator(self._manager, domain=domain)
def list_records_previous_page(self):
"""Returns the previous page of record results."""
return self._manager.list_records_previous_page()
def list_records_next_page(self):
"""Returns the next page of record results."""
return self._manager.list_records_next_page()
@assure_domain
def search_records(self, domain, record_type, name=None, data=None):
"""
Returns a list of all records configured for the specified domain
that match the supplied search criteria.
"""
return domain.search_records(record_type=record_type,
name=name, data=data)
@assure_domain
def find_record(self, domain, record_type, name=None, data=None):
"""
Returns a single record for this domain that matches the supplied
search criteria.
If no record matches, a DomainRecordNotFound exception will be raised.
If more than one matches, a DomainRecordNotUnique exception will
be raised.
"""
return domain.find_record(record_type=record_type,
name=name, data=data)
@assure_domain
def add_records(self, domain, records):
"""
Adds the records to this domain. Each record should be a dict with the
following keys:
- type (required)
- name (required)
- data (required)
- ttl (optional)
- comment (optional)
- priority (required for MX and SRV records; forbidden otherwise)
"""
return domain.add_records(records)
# Create an alias, so that adding a single record is more intuitive
add_record = add_records
@assure_domain
def get_record(self, domain, record):
"""
Gets the full information for an existing record or record ID for the
specified domain.
"""
return domain.get_record(record)
@assure_domain
def update_record(self, domain, record, data=None, priority=None, ttl=None,
comment=None):
"""
Modifies an existing record for a domain.
"""
return domain.update_record(record, data=data, priority=priority,
ttl=ttl, comment=comment)
@assure_domain
def update_records(self, domain, records):
"""
Modifies multiple existing records for a domain. Each record to be
updated should be a dict with following required keys:
- id
- name
Each record must also contain one or more of the following optional
keys:
- data
- ttl
- comment
- priority (optional for MX and SRV records; forbidden otherwise)
"""
return domain.update_records(records)
@assure_domain
def delete_record(self, domain, record):
"""
Deletes an existing record for this domain.
"""
return domain.delete_record(record)
def list_ptr_records(self, device):
"""
Returns a list of all PTR records configured for this device.
"""
return self._manager.list_ptr_records(device)
def add_ptr_records(self, device, records):
"""
Adds one or more PTR records to the specified device.
"""
return self._manager.add_ptr_records(device, records)
def update_ptr_record(self, device, record, domain_name, data=None,
ttl=None, comment=None):
"""
Updates a PTR record with the supplied values.
"""
return self._manager.update_ptr_record(device, record, domain_name,
data=data, ttl=ttl, comment=comment)
def delete_ptr_records(self, device, ip_address=None):
"""
Deletes the PTR records for the specified device. If 'ip_address'
is supplied, only the PTR records with that IP address will be deleted.
"""
return self._manager.delete_ptr_records(device, ip_address=ip_address)
def get_absolute_limits(self):
"""
Returns a dict with the absolute limits for the current account.
"""
resp, body = self.method_get("/limits")
absolute_limits = body.get("limits", {}).get("absolute")
return absolute_limits
def get_rate_limits(self):
"""
Returns a dict with the current rate limit information for domain
and status requests.
"""
resp, body = self.method_get("/limits")
rate_limits = body.get("limits", {}).get("rate")
ret = []
for rate_limit in rate_limits:
limits = rate_limit["limit"]
uri_limits = {"uri": rate_limit["uri"],
"limits": limits}
ret.append(uri_limits)
return ret
class ResultsIterator(object):
"""
This object will iterate over all the results for a given
type of listing, no matter how many items exist.
This is an abstract class; subclasses must define the
_init_methods() method.
"""
def __init__(self, manager, domain=None):
self.manager = manager
self.domain = domain
self.domain_id = utils.get_id(domain) if domain else None
self.results = []
self.next_uri = ""
self.extra_args = tuple()
self._init_methods()
def _init_methods(self):
"""
Must be implemented in subclasses.
"""
raise NotImplementedError()
def __iter__(self):
return self
def next(self):
"""
Return the next available item. If there are no more items in the
local 'results' list, check if there is a 'next_uri' value. If so,
use that to get the next page of results from the API, and return
the first item from that query.
"""
try:
return self.results.pop(0)
except IndexError:
if self.next_uri is None:
raise StopIteration()
else:
if not self.next_uri:
if self.domain:
self.results = self.list_method(self.domain)
else:
self.results = self.list_method()
else:
args = self.extra_args
self.results = self._list_method(self.next_uri, *args)
self.next_uri = self.manager._paging.get(
self.paging_service, {}).get("next_uri")
# We should have more results.
try:
return self.results.pop(0)
except IndexError:
raise StopIteration()
class DomainResultsIterator(ResultsIterator):
"""
ResultsIterator subclass for iterating over all domains.
"""
def _init_methods(self):
self.list_method = self.manager.list
self._list_method = self.manager._list
self.paging_service = "domain"
class SubdomainResultsIterator(ResultsIterator):
"""
ResultsIterator subclass for iterating over all subdomains.
"""
def _init_methods(self):
self.list_method = self.manager.list_subdomains
self._list_method = self.manager._list_subdomains
self.extra_args = (self.domain_id, )
self.paging_service = "subdomain"
class RecordResultsIterator(ResultsIterator):
"""
ResultsIterator subclass for iterating over all domain records.
"""
def _init_methods(self):
self.list_method = self.manager.list_records
self._list_method = self.manager._list_records
self.paging_service = "record"
```
#### File: pyrax/pyrax/cloudnetworks.py
```python
from __future__ import absolute_import, unicode_literals
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
# Constants to represent the 'special' network IDs.
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
PSEUDO_NETWORKS = (PUBLIC_NET_ID, SERVICE_NET_ID)
def _get_server_networks(network, public=False, private=False, key=None):
key = key or "net-id"
net_id = utils.get_id(network)
ret = [{key: net_id}]
if public:
ret.append({key: PUBLIC_NET_ID})
if private:
ret.append({key: SERVICE_NET_ID})
return ret
class CloudNetwork(BaseResource):
"""
This represents a network in the cloud. It can be either an isolated
network, the public network, or the ServiceNet network.
While resources generally use 'name' as the text identifier, the Cloud
Networks API uses 'label' instead. This module aliases the attributes andi
methods so that you can use either in your code.
"""
id = None
cidr = None
label = None
def _get_name(self):
return self.label
def _set_name(self, name):
self.label = name
name = property(_get_name, _set_name)
@property
def is_isolated(self):
"""Returns True if this is a user-defined network."""
return self.id not in PSEUDO_NETWORKS
def get(self):
if not self.is_isolated:
# These are placeholders, not actual networks
return
return super(CloudNetwork, self).get()
def delete(self):
"""
Wraps the standard delete() method to catch expected exceptions and
raise the appropriate pyrax exceptions.
"""
try:
return super(CloudNetwork, self).delete()
except exc.Forbidden as e:
# Network is in use
raise exc.NetworkInUse("Cannot delete a network in use by a server.")
def get_server_networks(self, public=False, private=False, key=None):
"""
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
"""
return _get_server_networks(self, public=public, private=private,
key=key)
class CloudNetworkManager(BaseManager):
"""
Does nothing special, but is used in testing.
"""
def _create_body(self, name, label=None, cidr=None):
"""
Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute.
"""
label = label or name
body = {"network": {
"label": label,
"cidr": cidr,
}}
return body
class CloudNetworkClient(BaseClient):
"""
This is the base client for creating and managing Cloud Networks.
"""
def __init__(self, *args, **kwargs):
super(CloudNetworkClient, self).__init__(*args, **kwargs)
self.name = "<NAME>"
# Constants to represent the 'special' network IDs.
self.PUBLIC_NET_ID = PUBLIC_NET_ID
self.SERVICE_NET_ID = SERVICE_NET_ID
self.PSEUDO_NETWORKS = PSEUDO_NETWORKS
def _configure_manager(self):
"""
Creates the Manager instance to handle networks.
"""
self._manager = CloudNetworkManager(self, resource_class=CloudNetwork,
response_key="network", uri_base="os-networksv2")
def create(self, label=None, name=None, cidr=None):
"""
Wraps the basic create() call to handle specific failures.
"""
try:
return super(CloudNetworkClient, self).create(label=label,
name=name, cidr=cidr)
except exc.BadRequest as e:
msg = e.message
if "too many networks" in msg:
raise exc.NetworkCountExceeded("Cannot create network; the "
"maximum number of isolated networks already exist.")
elif "does not contain enough" in msg:
raise exc.NetworkCIDRInvalid("Networks must contain two or "
"more hosts; the CIDR '%s' is too restrictive." % cidr)
elif "CIDR is malformed" in msg:
raise exc.NetworkCIDRMalformed("The CIDR '%s' is not valid." % cidr)
else:
# Something unexpected
raise
def delete(self, network):
"""
Wraps the standard delete() method to catch expected exceptions and
raise the appropriate pyrax exceptions.
"""
try:
return super(CloudNetworkClient, self).delete(network)
except exc.Forbidden as e:
# Network is in use
raise exc.NetworkInUse("Cannot delete a network in use by a server.")
def find_network_by_label(self, label):
"""
This is inefficient; it gets all the networks and then filters on
the client side to find the matching name.
"""
networks = self.list()
match = [network for network in networks
if network.label == label]
if not match:
raise exc.NetworkNotFound("No network with the label '%s' exists" %
label)
elif len(match) > 1:
raise exc.NetworkLabelNotUnique("There were %s matches for the label "
"'%s'." % (len(match), label))
return match[0]
# Create an alias using 'name'
find_network_by_name = find_network_by_label
def get_server_networks(self, network, public=False, private=False,
key=None):
"""
Creates the dict of network UUIDs required by Cloud Servers when
creating a new server with isolated networks. By default, the UUID
values are returned with the key of "net-id", which is what novaclient
expects. Other tools may require different values, such as 'uuid'. If
that is the case, pass the desired key as the 'key' parameter.
By default only this network is included. If you wish to create a
server that has either the public (internet) or private (ServiceNet)
networks, you have to pass those parameters in with values of True.
"""
return _get_server_networks(network, public=public, private=private,
key=key)
```
#### File: samples/autoscale/add_policy.py
```python
from __future__ import print_function
import os
import six
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
au = pyrax.autoscale
def safe_int(val, allow_zero=True):
"""
This function converts the six.moves.input values to integers. It handles
invalid entries, and optionally forbids values of zero.
"""
try:
ret = int(val)
except ValueError:
print("Sorry, '%s' is not a valid integer." % val)
return False
if not allow_zero and ret == 0:
print("Please enter a non-zero integer.")
return False
return ret
# Get the current scaling groups
sgs = au.list()
if not sgs:
print("There are no scaling groups defined. Please run the "
"'create_scaling_group.py' script first.")
exit()
print()
print("Available Scaling Groups:")
for pos, sg in enumerate(sgs):
print("%s - %s" % (pos, sg.name))
answer = six.moves.input("Enter the number of the scaling group you wish to add a "
"policy to: ")
if not answer:
print("Nothing entered; exiting.")
exit()
intanswer = safe_int(answer)
if not 0 <= intanswer < len(sgs):
print("The number '%s' does not correspond to any scaling group." % answer)
exit()
sg = sgs[intanswer]
pname = ""
while not pname:
pname = six.moves.input("Enter a name for this policy: ")
cooldown = 0
while not cooldown:
cooldown_input = six.moves.input("Enter a cooldown period in seconds: ")
cooldown = safe_int(cooldown_input, False)
change = 0
while not change:
change = safe_int(six.moves.input("Enter the change increment: "), False)
answer = six.moves.input("Is that a percentage change? [y/N]: ")
is_percent = "y" in answer.lower()
policy = au.add_policy(sg, pname, "webhook", cooldown, change, is_percent)
print("Policy added: %s" % policy)
```
#### File: tests/unit/test_cloud_cdn.py
```python
import unittest
import mock
from pyrax.cloudcdn import CloudCDNClient
from pyrax.cloudcdn import CloudCDNFlavor
from pyrax.cloudcdn import CloudCDNFlavorManager
from pyrax.cloudcdn import CloudCDNService
from pyrax.cloudcdn import CloudCDNServiceManager
class CloudCDNTest(unittest.TestCase):
@mock.patch("pyrax.client.BaseClient.method_get")
def test_ping(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
sot.ping()
mock_get.assert_called_with("/ping")
@mock.patch("pyrax.cloudcdn.CloudCDNFlavorManager.list")
def test_list_flavors(self, mock_list):
sot = CloudCDNClient(mock.MagicMock())
sot.list_flavors()
mock_list.assert_called_once_with()
@mock.patch("pyrax.cloudcdn.CloudCDNFlavorManager.get")
def test_get_flavor(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
flavor = "flavor"
sot.get_flavor(flavor)
mock_get.assert_called_once_with(flavor)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.list")
def test_list_services(self, mock_list):
sot = CloudCDNClient(mock.MagicMock())
sot.list_services()
mock_list.assert_called_with(limit=None, marker=None)
kwargs = {"limit": 1, "marker": 2}
sot.list_services(**kwargs)
mock_list.assert_called_with(**kwargs)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.get")
def test_get_service(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
service = "service"
sot.get_service(service)
mock_get.assert_called_once_with(service)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.create")
def test_create_service(self, mock_create):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2, 3, 4, 5, 6)
sot.create_service(*args)
mock_create.assert_called_once_with(*args)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.patch")
def test_patch_service(self, mock_patch):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2)
sot.patch_service(*args)
mock_patch.assert_called_once_with(*args)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.delete")
def test_delete_service(self, mock_delete):
sot = CloudCDNClient(mock.MagicMock())
service = "service"
sot.delete_service(service)
mock_delete.assert_called_once_with(service)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.delete_assets")
def test_delete_assets(self, mock_delete):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2, 3)
sot.delete_assets(*args)
mock_delete.assert_called_once_with(*args)
```
#### File: tests/unit/test_http.py
```python
from __future__ import absolute_import, unicode_literals
import json
import logging
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
import pyrax.utils as utils
import pyrax.exceptions as exc
from pyrax import client
from pyrax import fakes
class HttpTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(HttpTest, self).__init__(*args, **kwargs)
self.http = pyrax.http
def setUp(self):
pass
def tearDown(self):
pass
def test_request(self):
mthd = random.choice(list(self.http.req_methods.keys()))
sav_method = self.http.req_methods[mthd]
resp = fakes.FakeResponse()
self.http.req_methods[mthd] = Mock(return_value=resp)
uri = utils.random_unicode()
hk = utils.random_unicode()
hv = utils.random_unicode()
headers = {hk: hv}
self.http.request(mthd, uri, headers=headers)
self.http.req_methods[mthd].assert_called_once_with(uri,
headers=headers)
self.http.req_methods[mthd] = sav_method
def test_request_no_json(self):
mthd = random.choice(list(self.http.req_methods.keys()))
sav_method = self.http.req_methods[mthd]
resp = fakes.FakeResponse()
resp.json = Mock(side_effect=ValueError(""))
self.http.req_methods[mthd] = Mock(return_value=resp)
uri = utils.random_unicode()
hk = utils.random_unicode()
hv = utils.random_unicode()
headers = {hk: hv}
self.http.request(mthd, uri, headers=headers)
self.http.req_methods[mthd].assert_called_once_with(uri,
headers=headers)
self.http.req_methods[mthd] = sav_method
def test_request_exception(self):
mthd = random.choice(list(self.http.req_methods.keys()))
sav_method = self.http.req_methods[mthd]
resp = fakes.FakeResponse()
resp.status_code = 404
self.http.req_methods[mthd] = Mock(return_value=resp)
uri = utils.random_unicode()
hk = utils.random_unicode()
hv = utils.random_unicode()
headers = {hk: hv}
self.assertRaises(exc.NotFound, self.http.request, mthd, uri,
headers=headers)
def test_request_data(self):
mthd = random.choice(list(self.http.req_methods.keys()))
sav_method = self.http.req_methods[mthd]
resp = fakes.FakeResponse()
self.http.req_methods[mthd] = Mock(return_value=resp)
uri = utils.random_unicode()
hk = utils.random_unicode()
hv = utils.random_unicode()
headers = {hk: hv}
data = utils.random_unicode()
self.http.request(mthd, uri, headers=headers, data=data)
self.http.req_methods[mthd].assert_called_once_with(uri,
headers=headers, data=data)
self.http.req_methods[mthd] = sav_method
def test_request_body(self):
mthd = random.choice(list(self.http.req_methods.keys()))
sav_method = self.http.req_methods[mthd]
resp = fakes.FakeResponse()
self.http.req_methods[mthd] = Mock(return_value=resp)
uri = utils.random_unicode()
hk = utils.random_unicode()
hv = utils.random_unicode()
headers = {hk: hv}
body = utils.random_unicode()
jbody = json.dumps(body)
self.http.request(mthd, uri, headers=headers, body=body)
self.http.req_methods[mthd].assert_called_once_with(uri,
headers=headers, data=jbody)
self.http.req_methods[mthd] = sav_method
def test_http_log_req(self):
args = ("a", "b")
kwargs = {"headers": {"c": "C"}}
mthd = utils.random_unicode()
uri = utils.random_unicode()
sav_pdbug = pyrax._http_debug
pyrax._http_debug = False
self.assertIsNone(self.http.http_log_req(mthd, uri, args, kwargs))
pyrax._http_debug = True
sav_pldbug = pyrax._logger.debug
pyrax._logger.debug = Mock()
self.http.http_log_req(mthd, uri, args, kwargs)
pyrax._logger.debug.assert_called_once_with(
"\nREQ: curl -i -X %s a b -H 'c: C' %s\n" % (mthd, uri))
kwargs["body"] = "text"
self.http.http_log_req(mthd, uri, args, kwargs)
cargs, ckw = pyrax._logger.debug.call_args
self.assertEqual(cargs, ("REQ BODY: text\n", ))
pyrax._logger.debug = sav_pldbug
pyrax._http_debug = sav_pdbug
def test_http_log_resp(self):
log = logging.getLogger("pyrax")
sav_pldbug = log.debug
log.debug = Mock()
resp = fakes.FakeResponse()
body = "body"
sav_pdbug = pyrax._http_debug
pyrax._http_debug = False
self.http.http_log_resp(resp, body)
self.assertFalse(log.debug.called)
pyrax._http_debug = True
self.http.http_log_resp(resp, body)
self.assertTrue(log.debug.called)
log.debug.assert_any_call("RESP: %s\n%s", resp, resp.headers)
log.debug.assert_called_with("RESP BODY: %s", body)
log.debug = sav_pldbug
pyrax._http_debug = sav_pdbug
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jfreeman812/SecurityTesting",
"score": 2
}
|
#### File: clients/http/base_http_client.py
```python
import logging
import requests
from requests.packages import urllib3
from syntribos.clients.http.debug_logger import log_http_transaction
urllib3.disable_warnings()
class HTTPClient(object):
"""Allows clients to inherit requests.request.
@summary: Redefines request() so that keyword args are passed.
The parameters are passed through a named dictionary
instead of kwargs. Client methods can then take parameters
that may overload request parameters, which allows client
method calls to override parts of the request with parameters
sent directly to requests, overriding the client method logic
either in part or whole on the fly.
"""
LOG = logging.getLogger(__name__)
def __init__(self):
self.default_headers = {}
@log_http_transaction(log=LOG)
def request(self, method, url, headers=None, params=None, data=None,
sanitize=False, requestslib_kwargs=None):
# set requestslib_kwargs to an empty dict if None
requestslib_kwargs = requestslib_kwargs if (
requestslib_kwargs is not None) else {}
# Set defaults
params = params if params is not None else {}
verify = False
sanitize = sanitize
# If headers are provided by both, headers "wins" over default_headers
headers = dict(self.default_headers, **(headers or {}))
# Override url if present in requestslib_kwargs
if 'url' in list(requestslib_kwargs.keys()):
url = requestslib_kwargs.get('url', None) or url
del requestslib_kwargs['url']
# Override method if present in requestslib_kwargs
if 'method' in list(requestslib_kwargs.keys()):
method = requestslib_kwargs.get('method', None) or method
del requestslib_kwargs['method']
# The requests lib already removes None key/value pairs, but we force
# it here in case that behavior ever changes
for key in list(requestslib_kwargs.keys()):
if requestslib_kwargs[key] is None:
del requestslib_kwargs[key]
# Create the final parameters for the call to the base request()
# Wherever a parameter is provided both by the calling method AND
# the requests_lib kwargs dictionary, requestslib_kwargs "wins"
requestslib_kwargs = dict(
{'headers': headers, 'params': params, 'verify': verify,
'data': data}, **requestslib_kwargs)
# Make the request
return requests.request(method, url, **requestslib_kwargs)
```
#### File: extensions/rax_identity/client.py
```python
import json
import logging
from oslo_config import cfg
from requests import RequestException as RequestException
from syntribos.clients.http.client import SynHTTPClient
from syntribos.utils.memoize import memoize
logging.basicConfig(level=logging.CRITICAL)
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def authenticate(endpoint, username, apiKey):
headers = {'content-type': 'application/json'}
if endpoint.endswith('/v2.0/'):
endpoint = '{0}tokens'.format(endpoint)
elif endpoint.endswith('/v2.0'):
endpoint = '{0}/tokens'.format(endpoint)
elif endpoint.endswith('/v2.0/tokens'):
pass
else:
endpoint = '{0}/v2.0/tokens'.format(endpoint)
data = {'auth': {"RAX-KSKEY:apiKeyCredentials": {"username": username,
"apiKey": apiKey}}}
data = json.dumps(data)
try:
resp, _ = SynHTTPClient().request(
"POST", endpoint, headers=headers, data=data, sanitize=True)
r = resp.json()
except RequestException as e:
LOG.debug(e)
else:
if not r:
raise Exception("Failed to authenticate")
if 'access' not in r or not r['access']:
raise Exception("Failed to parse Auth response Body")
return r['access']
def authenticate_config(user_section):
return authenticate(
endpoint=CONF.get(user_section).endpoint or CONF.user.endpoint,
username=CONF.get(user_section).username or CONF.user.username,
apiKey=CONF.get(user_section).apiKey or CONF.user.apiKey)
@memoize
def get_token(user_section='user'):
"""Returns unscoped v2 token."""
access_data = authenticate_config(user_section)
return access_data['token']['id']
```
#### File: extensions/rax_payment_system/models.py
```python
import json
import logging
import xml.etree.ElementTree as ET
import xmltodict
from oslo_config import cfg
logging.basicConfig(level=logging.CRITICAL)
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
XMLNS_ATOM = "http://www.w3.org/2005/Atom"
XMLNS_NS1 = "http://payment.api.rackspacecloud.com/v1"
XMLNS_NS2 = "http://payment.api.rackspacecloud.com/v1"
XMLNS_NS4 = "http://common.api.rackspacecloud.com/service-profile/v1.0"
XMLNS_NS5 = "http://docs.openstack.org/common/api/v1.0"
class BasePaymentSystemModel(object):
def __init__(self, kwargs):
super(BasePaymentSystemModel, self).__init__()
self._log = logging.getLogger(__name__)
for k, v in kwargs.items():
if k != "self" and not k.startswith("_"):
setattr(self, k, v)
def serialize(self, format_type):
try:
serialize_method = '_obj_to_{0}'.format(format_type)
return getattr(self, serialize_method)()
except Exception as serialization_exception:
self._log.error(
'Error occured during serialization of a data model into'
'the "%s: \n%s" format',
format_type, serialization_exception)
self._log.exception(serialization_exception)
@classmethod
def deserialize(cls, serialized_str, format_type):
if serialized_str and len(serialized_str) > 0:
try:
deserialize_method = '_{0}_to_obj'.format(format_type)
return getattr(cls, deserialize_method)(serialized_str)
except Exception as deserialization_exception:
cls._log.exception(deserialization_exception)
cls._log.debug(
"Deserialization Error: Attempted to deserialize type"
" using type: {0}".format(format_type.decode(
encoding='UTF-8', errors='ignore')))
cls._log.debug(
"Deserialization Error: Unable to deserialize the "
"following:\n{0}".format(serialized_str.decode(
encoding='UTF-8', errors='ignore')))
@classmethod
def _json_to_obj(cls, data_dict):
return cls._dict_to_obj(data_dict)
@classmethod
def _xml_to_obj(cls, element):
return cls._xml_ele_to_obj(element)
def _obj_to_json(self):
return json.dumps(self._obj_to_dict())
def _obj_to_xml(self):
raise NotImplementedError
# These next two functions must be defined by the child classes before
# serializing
def _obj_to_dict(self):
raise NotImplementedError
def _obj_to_xml_ele(self):
raise NotImplementedError
@staticmethod
def _find(element, tag):
"""Finds element with tag
:param element: :class:`xml.etree.ElementTree.Element`, the element
through which to start searching
:param tag: the tag to search for
:returns: The element with tag `tag` if found, or a new element with
tag None if not found
:rtype: :class:`xml.etree.ElementTree.Element`
"""
if element is None:
return ET.Element(None)
new_element = element.find(tag)
if new_element is None:
return ET.Element(None)
return new_element
@staticmethod
def _build_list_model(data, field_name, model):
"""Builds list of python objects from XML or json data
If data type is json, will find all json objects with `field_name` as
key, and convert them into python objects of type `model`.
If XML, will find all :class:`xml.etree.ElementTree.Element` with
`field_name` as tag, and convert them into python objects of type
`model`
:param data: Either json or XML object
:param str field_name: json key or XML tag
:param model: Class of objects to be returned
:returns: list of `model` objects
:rtype: `list`
"""
if data is None:
return []
if isinstance(data, dict):
if data.get(field_name) is None:
return []
return [model._dict_to_obj(tmp) for tmp in data.get(field_name)]
return [model._xml_ele_to_obj(tmp) for tmp in data.findall(field_name)]
@staticmethod
def _build_list(items, element=None):
"""Builds json object or xml element from model
Calls either :func:`item._obj_to_dict` or
:func:`item.obj_to_xml_ele` on all objects in `items`, and either
returns the dict objects as a list or appends `items` to `element`
:param items: list of objects for conversion
:param element: The element to be appended, or None if json
:returns: list of dicts if `element` is None or `element` otherwise.
"""
if element is None:
if items is None:
return []
return [item._obj_to_dict() for item in items]
else:
if items is None:
return element
for item in items:
element.append(item._obj_to_xml_ele())
return element
@staticmethod
def _create_text_element(name, text):
"""Creates element with text data
:returns: new element with name `name` and text `text`
:rtype: :class:`xml.etree.ElementTree.Element`
"""
element = ET.Element(name)
if text is True or text is False:
element.text = str(text).lower()
elif text is None:
return ET.Element(None)
else:
element.text = str(text)
return element
def __ne__(self, obj):
return not self.__eq__(obj)
@classmethod
def _remove_empty_values(cls, data):
"""Remove empty values
Returns a new dictionary based on 'dictionary', minus any keys with
values that evaluate to False.
:param dict data: Dictionary to be pruned
:returns: dictionary without empty values
:rtype: `dict`
"""
if isinstance(data, dict):
return dict(
(k, v) for k, v in data.items() if v not in (
[], {}, None))
elif isinstance(data, ET.Element):
if data.attrib:
data.attrib = cls._remove_empty_values(data.attrib)
data._children = [
c for c in data._children if c.tag is not None and (
c.attrib or c.text is not None or c._children)]
return data
@staticmethod
def _get_sub_model(model, json=True):
"""Converts object to json or XML
:param model: Object to convert
:param boolean json: True if converting to json, false if XML
"""
if json:
if model is not None:
return model._obj_to_dict()
else:
return None
else:
if model is not None:
return model._obj_to_xml_ele()
else:
return ET.Element(None)
class PaymentMethod(BasePaymentSystemModel):
def __init__(self,
methodId=None,
creationDate=None,
ran=None,
status=None,
isDefault=None,
modifiedDate=None,
methodClass=None,
methodClassName=None,
addressVerificationInformation=None,
level3Eligible=None):
super(PaymentMethod, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
if 'paymentCard' in data:
_model_class = PaymentCardMethod
_model_name = 'paymentCard'
elif 'electronicCheck' in data:
_model_class = ACHMethod
_model_name = 'electronicCheck'
elif 'ukDirectDebit' in data:
_model_class = UKDebitMethod
_model_name = 'ukDirectDebit'
elif 'sepa' in data:
_model_class = SEPAMethod
_model_name = 'sepa'
_model = _model_class(data.get(_model_name))
_avi = data.get('addressVerificationInformation')
return cls(methodId=data.get('id') or data.get("@id"),
creationDate=data.get('creationDate') or
data.get('@creationDate'),
ran=data.get('ran') or data.get('@ran'),
status=data.get('status') or data.get('@status'),
isDefault=data.get('isDefault') or data.get('@isDefault'),
modifiedDate=data.get('modifiedDate') or
data.get('@modifiedDate'),
methodClass=_model,
methodClassName=_model_name,
addressVerificationInformation=_avi,
level3Eligible=data.get('level3Eligible'))
def _obj_to_dict(self):
if self.methodClassName == 'paymentCard':
_model_name = 'paymentCard'
elif self.methodClassName == 'electronicCheck':
_model_name = 'electronicCheck'
elif self.methodClassName == 'ukDirectDebit':
_model_name = 'ukDirectDebit'
elif self.methodClassName == 'sepa':
_model_name = 'sepa'
dic = {}
dic['addressVerificationInformation'] = \
self.addressVerificationInformation
dic[_model_name] = self.methodClass._obj_to_dict()['papi:method']
return {"method": self._remove_empty_values(dic)}
class PaymentCardMethod(BasePaymentSystemModel):
def __init__(self,
cardVerificationNumber=None,
expirationDate=None,
cardHolderName=None,
cardType=None,
cardNumber=None):
super(PaymentCardMethod, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
return cls(cardVerificationNumber=data.get('cardVerificationNumber'),
expirationDate=data.get('expirationDate'),
cardHolderName=data.get('cardHolderName'),
cardType=data.get('cardType'),
cardNumber=data.get('cardNumber'))
def _obj_to_dict(self):
dic = {}
dic['expirationDate'] = self.expirationDate
dic['cardVerificationNumber'] = self.cardVerificationNumber
dic['cardHolderName'] = self.cardHolderName
dic['cardType'] = self.cardType
dic['cardNumber'] = self.cardNumber
return {"papi:method": {"PaymentCard": self._remove_empty_values(dic)}}
def _obj_to_xml(self):
dic = {'ns2:method': {}}
dic['ns2:method'] = self._obj_to_dict()['papi:method']
dic['ns2:method']['@xmlns:atom'] = XMLNS_ATOM
dic['ns2:method']['@xmlns:ns2'] = XMLNS_NS2
return xmltodict.unparse(dic)
class ACHMethod(BasePaymentSystemModel):
def __init__(self,
accountNumber=None,
accountType=None,
achPaymentType=None,
routingNumber=None,
accountHolderName=None):
super(ACHMethod, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
return cls(accountNumber=data.get('accountNumber'),
accountType=data.get('accountType'),
achPaymentType=data.get('achPaymentType'),
routingNumber=data.get('routingNumber'),
accountHolderName=data.get('accountHolderName'))
def _obj_to_dict(self):
dic = {}
dic['accountNumber'] = self.accountNumber
dic['accountType'] = self.accountType
dic['achPaymentType'] = self.achPaymentType
dic['routingNumber'] = self.routingNumber
dic['accountHolderName'] = self.accountHolderName
return {"papi:method": {
'electronicCheck': self._remove_empty_values(dic)}}
def _obj_to_xml(self):
dic = {'ns2:method': {}}
dic['ns2:method'] = self._obj_to_dict()['papi:method']
dic['ns2:method']['@xmlns:atom'] = XMLNS_ATOM
dic['ns2:method']['@xmlns:ns2'] = XMLNS_NS2
return xmltodict.unparse(dic)
class UKDebitMethod(BasePaymentSystemModel):
def __init__(self,
bankSortCode=None,
bankNumber=None,
accountHolderName=None):
super(UKDebitMethod, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
return cls(bankSortCode=data.get('bankSortCode'),
bankNumber=data.get('bankNumber'),
accountHolderName=data.get('accountHolderName'))
def _obj_to_dict(self):
dic = {}
dic['bankSortCode'] = self.bankSortCode
dic['bankNumber'] = self.bankNumber
dic['accountHolderName'] = self.accountHolderName
return {"papi:method": {
"ukDirectDebit": self._remove_empty_values(dic)}}
def _obj_to_xml(self):
dic = {'ns2:method': {}}
dic['ns2:method'] = self._obj_to_dict()['papi:method']
dic['ns2:method']['@xmlns:atom'] = XMLNS_ATOM
dic['ns2:method']['@xmlns:ns2'] = XMLNS_NS2
return xmltodict.unparse(dic)
class SEPAMethod(BasePaymentSystemModel):
def __init__(self,
bic=None,
iban=None,
accountHolderName=None):
super(SEPAMethod, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
return cls(bic=data.get('bic'),
iban=data.get('iban'),
accountHolderName=data.get('accountHolderName'))
def _obj_to_dict(self):
dic = {}
dic['bic'] = self.bic
dic['iban'] = self.iban
dic['accountHolderName'] = self.accountHolderName
return {"papi:method": {"sepa": self._remove_empty_values(dic)}}
def _obj_to_xml(self):
dic = {'ns2:method': {}}
dic['ns2:method'] = self._obj_to_dict()['papi:method']
dic['ns2:method']['@xmlns:atom'] = XMLNS_ATOM
dic['ns2:method']['@xmlns:ns2'] = XMLNS_NS2
return xmltodict.unparse(dic)
class MethodValidation(BasePaymentSystemModel):
def __init__(self,
methodValidationId=None,
validationResults=None,
gatewayMessage=None,
approvalStatus=None,
lineOfBusiness=None,
contractEntity=None,
currencyCode=None,
method=None):
super(MethodValidation, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
return cls(methodValidationId=data.get('id'),
validationResults=data.get('validationResults'),
lineOfBusiness=data.get('lineOfBusiness'),
gatewayMessage=data.get('gatewayMessage'),
contractEntity=data.get('contractEntity'),
currencyCode=data.get('currencyCode'),
approvadsflStatus=data.get('approvalStatus'),
method=PaymentMethod(data.get('method')))
def _obj_to_dict(self):
dic = {}
dic['lineOfBusiness'] = self.lineOfBusiness
dic['contractEntity'] = self.contractEntity
dic['currencyCode'] = self.currencyCode
dic['method'] = self.method._obj_to_dict()['papi:method']
return {"papi:methodValidation": self._remove_empty_values(dic)}
def _obj_to_xml(self):
dic = {'ns3:methodValidation': {}}
dic['ns3:methodValidation'] = \
self._obj_to_dict()['papi:methodValidation']
dic['ns3:methodValidation']['@xmlns:ns2'] = XMLNS_ATOM
dic['ns3:methodValidation']['@xmlns:ns3'] = XMLNS_NS2
dic['ns3:methodValidation']['@xmlns:ns4'] = XMLNS_NS4
dic['ns3:methodValidation']['@xmlns:ns5'] = XMLNS_NS5
return xmltodict.unparse(dic)
class MethodAssociation(BasePaymentSystemModel):
def __init__(self,
methodValidationId=None,
methodId=None,
ran=None):
super(MethodAssociation, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
return cls(methodValidationId=data.get('methodValidationId'),
methodId=data.get('methodId'),
ran=data.get('ran'))
def _obj_to_dict(self):
dic = {}
dic['methodValidationId'] = self.methodValidationId
dic.ran = self.ran
return {"methodAssociation": self._remove_empty_values(dic)}
class Payment(BasePaymentSystemModel):
def __init__(self,
paymentId=None,
levelThreeOrderInformation=None,
addressVerificationInformation=None,
submissionDate=None,
submissionId=None,
amount=None,
comments=None,
methodId=None,
status=None,
gatewayMessage=None):
super(Payment, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
_ltoi = data.get('levelThreeOrderInformation')
_avi = data.get('addressVerification')
_subid = data.get('submissionId')
_methid = data.get('methodId')
_gtr = data.get('gatewayTransactionReference')
return cls(paymentId=data.get('id') or data.get('@id'),
levelThreeOrderInformation=_ltoi,
addressVerificationInformation=_avi,
submissionDate=data.get('submissionDate'),
submissionId=_subid,
amount=data.get('amount'),
gatewayTransactionReference=_gtr,
comments=data.get('comments'),
status=data.get('status'),
methodId=_methid)
def _obj_to_dict(self):
dic = {}
dic['levelThreeOrderInformation'] = self.levelThreeOrderInformation
dic['addressVerificationInformation'] = \
self.addressVerificationInformation
dic['submissionId'] = self.submissionId
dic['amount'] = self.amount,
dic['comments'] = self.comments,
dic['methodId'] = self.methodId
return {'papi:payment': self._remove_empty_values(dic)}
def _obj_to_xml(self):
dic = {'ns2:payment': {}}
dic['ns2:payment'] = self._obj_to_dict['papi:payment']
dic['ns2:payment']['@xmlns:atom'] = XMLNS_ATOM
dic['ns2:payment']['@xmlns:ns2'] = XMLNS_NS2
return xmltodict.unparse(dic)
class Void(BasePaymentSystemModel):
def __init__(self,
voidId=None,
voidAmount=None,
comments=None,
submissionId=None,
gatewayTransactionReference=None,
gatewayMessage=None,
status=None,
submissionDate=None):
super(Void, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
_subid = data.get('submissionId')
_gtr = data.get('gatewayTransactionReference')
return cls(voidId=data.get('id') or data.get('@id'),
status=data.get('status'),
gatewayTransactionReference=_gtr,
gatewayMessage=data.get('gatewayMessage'),
voidAmount=data.get('voidAmount'),
comments=data.get('comments'),
submissionId=_subid,
submissionDate=data.get('submissionDate'))
def _obj_to_dict(self):
dic = {}
dic['voidAmount'] = self.voidAmount
dic['comments'] = self.comments
dic['submissionId'] = self.submissionId
return {'papi:void': self._remove_empty_values(dic)}
def _obj_to_xml(self):
dic = {'ns3:void': {}}
dic['ns3:void'] = \
self._obj_to_dict()['papi:void']
dic['ns3:void']['@xmlns:ns2'] = XMLNS_ATOM
dic['ns3:void']['@xmlns:ns3'] = XMLNS_NS2
dic['ns3:void']['@xmlns:ns4'] = XMLNS_NS4
dic['ns3:void']['@xmlns:ns5'] = XMLNS_NS5
return xmltodict.unparse(dic)
class Refund(BasePaymentSystemModel):
def __init__(self,
refundId=None,
refundAmount=None,
comments=None,
submissionId=None,
status=None,
submissionDate=None,
gatewayTransactionReference=None,
gatewayMessage=None,
methodId=None):
super(Refund, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, data):
_subid = data.get('submissionId')
_gtr = data.get('gatewayTransactionReference')
_methid = data.get('methodId')
return cls(refundId=data.get('id'),
refundAmount=data.get('refundAmount'),
comments=data.get('comments'),
submissionId=_subid,
status=data.get('status'),
submissionDate=data.get('submissionDate'),
gatewayTransactionReference=_gtr,
gatewayMessage=data.get('gatewayMessage'),
methodId=_methid)
def _obj_to_dict(self):
dic = {}
dic['refundAmount'] = self.refundAmount
dic['comments'] = self.comments,
dic['submissionId'] = self.submissionId
return {'papi:refund': self._remove_empty_values(dic)}
def _obj_to_xml(self):
dic = {'ns3:refund': {}}
dic['ns3:refund'] = \
self._obj_to_dict()['papi:refund']
dic['ns3:refund']['@xmlns:ns2'] = XMLNS_ATOM
dic['ns3:refund']['@xmlns:ns3'] = XMLNS_NS2
dic['ns3:refund']['@xmlns:ns4'] = XMLNS_NS4
dic['ns3:refund']['@xmlns:ns5'] = XMLNS_NS5
```
#### File: tests/fuzz/integer_overflow.py
```python
import syntribos
from syntribos._i18n import _
from syntribos.checks import time_diff as time_diff
from syntribos.tests.fuzz import base_fuzz
class IntOverflowBody(base_fuzz.BaseFuzzTestCase):
"""Test for integer overflow vulnerabilities in HTTP body."""
test_name = "INTEGER_OVERFLOW_BODY"
test_type = "data"
data_key = "integer-overflow.txt"
def test_case(self):
self.diff_signals.register(time_diff(self))
if "TIME_DIFF_OVER" in self.diff_signals:
self.register_issue(
defect_type="int_timing",
severity=syntribos.MEDIUM,
confidence=syntribos.LOW,
description=(_("The time it took to resolve a request with an "
"invalid integer was too long compared to the "
"baseline request. This could indicate a "
"vulnerability to buffer overflow attacks")))
class IntOverflowParams(IntOverflowBody):
"""Test for integer overflow vulnerabilities in HTTP params."""
test_name = "INTEGER_OVERFLOW_PARAMS"
test_type = "params"
class IntOverflowHeaders(IntOverflowBody):
"""Test for integer overflow vulnerabilities in HTTP header."""
test_name = "INTEGER_OVERFLOW_HEADERS"
test_type = "headers"
class IntOverflowURL(IntOverflowBody):
"""Test for integer overflow vulnerabilities in HTTP URL."""
test_name = "INTEGER_OVERFLOW_URL"
test_type = "url"
url_var = "FUZZ"
```
#### File: tests/fuzz/redos.py
```python
import syntribos
from syntribos.checks import time_diff as time_diff
from syntribos.tests.fuzz import base_fuzz
class ReDosBody(base_fuzz.BaseFuzzTestCase):
"""Test for Regex DoS vulnerabilities in HTTP body."""
test_name = "REDOS_BODY"
test_type = "data"
data_key = "redos.txt"
def test_case(self):
self.run_default_checks()
self.diff_signals.register(time_diff(self))
if "TIME_DIFF_OVER" in self.diff_signals:
self.register_issue(
defect_type="redos_timing",
severity=syntribos.MEDIUM,
confidence=syntribos.LOW,
description=("A response to one of our payload requests has "
"taken too long compared to the baseline "
"request. This could indicate a vulnerability "
"to time-based Regex DoS attacks"))
class ReDosParams(ReDosBody):
"""Test for Regex DoS vulnerabilities in HTTP params."""
test_name = "REDOS_PARAMS"
test_type = "params"
class ReDosHeaders(ReDosBody):
"""Test for Regex DoS vulnerabilities in HTTP header."""
test_name = "REDOS_HEADERS"
test_type = "headers"
class ReDosURL(ReDosBody):
"""Test for Regex DoS vulnerabilities in HTTP URL."""
test_name = "REDOS_URL"
test_type = "url"
url_var = "FUZZ"
```
|
{
"source": "jfrerich/django-getrealty",
"score": 3
}
|
#### File: django-getrealty/realty/routers.py
```python
class RealtyDatabaseRouter(object):
"""
Determine how to route database calls for an app's models (in this case,
for an app named Example). All other models will be routed to the next
router in the DATABASE_ROUTERS setting if applicable,
or otherwise to the default database.
"""
def db_for_read(self, model, **hints):
"""Send all read operations on realty app models to `realty_db`."""
if model._meta.app_label == 'realty':
return 'realty_db'
return None
def db_for_write(self, model, **hints):
"""Send all write operations on Example app models to `example`."""
if model._meta.app_label == 'realty':
return 'realty_db'
return None
def allow_relation(self, obj1, obj2, **hints):
"""Determine if relationship is allowed between two objects."""
# Allow any relation between two models that are both in the Example app.
if obj1._meta.app_label == 'realty' and obj2._meta.app_label == 'realty':
return True
# No opinion if neither object is in the Example app (defer to default or other routers).
elif 'realty' not in [obj1._meta.app_label, obj2._meta.app_label]:
return None
# Block relationship if one object is in the Example app and the other isn't.
return False
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""Ensure that the Example app's models get created on the right database."""
if app_label == 'realty':
# The realty app should be migrated only on the realty database.
return db == 'realty_db'
elif db == 'realty_db':
# Ensure that all other apps don't get migrated on the example database.
return False
# No opinion for all other scenarios
return None
```
#### File: jfrerich/django-getrealty/runtests.py
```python
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
# from realty.models import RealtyModel
if not settings.configured:
settings.configure(
DATABASE_ENGINE='sqlite3',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'realty',
'tests',
],
)
def run_tests():
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["tests"])
sys.exit(bool(failures))
if __name__ == "__main__":
run_tests()
```
|
{
"source": "jfrerich/getRealty",
"score": 3
}
|
#### File: getRealty/getrealty/options.py
```python
import logging
import os
import sys
from argparse import ArgumentParser
import getrealty
config = getrealty.settings.config
logger = logging.getLogger(__name__)
def generate_argparser():
parser = ArgumentParser(description='Let\'s get some CAD data!')
# parser.add_argument('integers', metavar='N', type=int, nargs='+',
# help='an integer for the accumulator')
parser.add_argument('--rnumbers', help='list of rnumbers. Space separated')
# parser.add_argument('--rnumbers ALL',
# help='pull All rnumbers in the specified -county \
# cache')
parser.add_argument('--county',
help='County (choices are Bastrop, Williamson)',
required=True)
parser.add_argument('--subd', help='Subdivision (Advanced Search)')
parser.add_argument('--minacres', help='Min Acres (Advanced Search)')
parser.add_argument('--maxacres', help='Max Acres (Advanced Search)')
parser.add_argument('--minvalue',
help='Min Assessed Value (Advanced Search)')
parser.add_argument('--maxvalue',
help='Max Assessed Value (Advanced Search)')
parser.add_argument('--o', '--output',
help='output name of .XLSX and search files')
parser.add_argument('--date', help='append date to output of .XLSX')
parser.add_argument('-no_query_ping_server',
action="store_true",
help='don\'t query user if want to submit server ping')
parser.add_argument('-no_query_overwrite_xls',
action="store_true",
help='don\'t query user if want to overwrite xls')
parser.add_argument('--run_dir',
help='Location of cache, searches, and excel dir. \
(if defined, all exist in run_dir, otw, \
includes /$county/ for each dir')
parser.add_argument('-use_last_adv_search',
action="store_true",
help='use last advanced search request don\'t send \
to server')
parser.add_argument('-sep_wkts',
action="store_true",
help='If > 900 entires, place 900 entries in separate \
workbooks')
parser.add_argument('-force',
action="store_true",
help='force server retreiving of Ommitted Rnumbers \
from DEFAULT_MIN_VALUE > value > DEFAULT_MAX_VALUE')
parser.add_argument('--work_dir')
group_db_query = parser.add_argument_group('DB QUERY OPTIONS')
group_db_query.add_argument('-db_search', action="store_true")
group_db_query.add_argument('--password',
help="WHERE command for searching DB. Ex. \
WHERE r_num=R53761")
group_db_update = parser.add_argument_group('DB UPDATE OPTIONS')
group_db_update.add_argument('-update_db_file',
action="store_true",
help=".xls file with desired changes to \
make to DB")
group_db_update.add_argument('-update_db_from_server',
action="store_true",
help="will force requested rnumbers to \
update information with latest data from \
server")
group_db_update.add_argument('-update_db_from_cache',
action="store_true",
help="used to recalculate values in the db, \
from the cache values")
# group_db_update.add_argument('-update_db')
group_db_update.add_argument('-update_db_sum_diff_than_db',
help="If the differences in summary_db and \
database are different, update without \
resolving conflicts")
group_regress = parser.add_argument_group('REGRESSION OPTIONS')
group_regress.add_argument('-regress', action="store_true")
group_regress.add_argument('--regress_get_columns')
group_regress.add_argument('--sql_where')
group_regress.add_argument('--sql_search')
group_new_db_options = parser.add_argument_group('NEW DB QUERY OPTIONS')
group_new_db_options.add_argument('--update_db_file_junk',
help='''.xls file with desired changes
to make to DB''')
# group_new_db_options.add_argument(' ',
# help='.xls file with desired changes \
# parser.parse_args')
# group_new_db_options.add_argument(' ',
# help='-sql_search <comma separated \
# list of sql column options>')
#
# <Column_Name>:Min:Max - Range of min / max values
# <Column_Name>::Max - Values less than Max value
# <Column_Name>:Min: - Values greater than Min value
# <Column_Name>:<value> - Values equal to <value>
# <Column_Name>:!<value> - Values NOT equal to <value>
#
# Examples:
#
# -sql_search "Subd:ARTESIAN OAKS SEC 3"
# -sql_search "Different___Zip:\!0,Different___Addr:1"
#
# tcsh
#
# Setting multiple in a shell
#
# set SQL2 = "Different___Zip:\!0,"
# set SQL2 = "\$SQL2 Different___State:\!0, "
# set SQL2 = "\$SQL2 Prop_Details___Acres:1:2, "
# set SQL2 = "\$SQL2 Prop_Details___AssessVal:30000:, "
# set SQL2 = "\$SQL2 TaxDates___LastYrPaid:2012:"
#
# \$SCRIPT -sql_search "\$SQL2" \
#
# LIKE Clause
#
# % is required when using LIKE clause
#
# set RNUMBER='"CIRCLE%"'
# -sql_where "WHERE subd LIKE \$RNUMBER"
# print help if no args given
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def setDefaults():
getCounty()
args = generate_argparser()
setDefaultValue(args.minvalue, 'MIN_VALUE')
setDefaultValue(args.maxvalue, 'MAX_VALUE')
setDefaultValue(args.subd, 'SUBDIVISION')
setDefaultValue(args.county, 'COUNTY')
setDefaultValue(args.minacres, 'ACRES_LEAST')
setDefaultValue(args.maxacres, 'ACRES_MOST')
setDefaultValue(args.run_dir, 'RUN_DIR')
setDefaultValue(args.work_dir, 'WORK_DIR')
setDefaultValue(args.o, 'OUTPUT')
setDefaultValue(args.rnumbers, 'RNUMBERS')
setDefaultValue(args.db_search, 'DB_SEARCH')
setDefaultValue(args.sql_search, 'SQL_SEARCH')
setDefaultValue(args.force, 'FORCE')
setDefaultValue(args.regress, 'REGRESS')
setDefaultValue(args.regress_get_columns, 'REGRESS_GET_COLUMNS')
setDefaultValue(args.sql_where, 'SQL_WHERE')
setDefaultValue(args.no_query_overwrite_xls, 'NO_QUERY_OVERWRITE_XLS')
setDefaultValue(args.no_query_ping_server, 'NO_QUERY_PING_SERVER')
setDefaultValue(args.update_db_from_server, 'UPDATE_DB_FROM_SERVER')
setDefaultValue(args.use_last_adv_search, 'USE_LAST_ADV_SEARCH')
setDefaultValue(args.update_db_from_cache, 'UPDATE_DB_FROM_CACHE')
os.chdir(config['defaults']['WORK_DIR'])
def setDefaultValue(arg, key):
if arg is not None:
config['defaults'][key] = arg
def getCounty():
# required options
# defined required options here
# $logger->error("ERROR: Must Provide A County (-county)");
# county = config.DEFAULT_COUNTY
# if config.DEFAULT_COUNTY is 'Bastrop':
if config['defaults']['COUNTY'] is 'Bastrop':
config['defaults']['HOST_TAX_ACCESSOR'] = 'http://www.bastroptac.com'
config['defaults']['HOST_TAX_ACCESSOR_WWW'] = 'www.bastroptac.com'
config['defaults']['HOST_TAX_ACCESSOR_ADVANCED'] = \
'http://www.bastroptac.com'
config['defaults']['HOST_TAX_ACCESSOR_WWW_ADVANCED'] = \
'www.bastroptac.com'
config['defaults']['GIS_WWW'] = '''http://gis.bisconsultants.com/
bastropcad/?slayer=0&exprnum=0&esearch="'''
elif (config['defaults']['COUNTY'] is "Williamson"):
config['defaults']['HOST_TAX_ACCESSOR'] = 'http://www.tax.wilco.org'
config['defaults']['HOST_TAX_ACCESSOR_WWW'] = 'tax.wilco.org'
config['defaults']['HOST_TAX_ACCESSOR_ADVANCED'] = \
'http://search.wcad.org'
config['defaults']['HOST_TAX_ACCESSOR_WWW_ADVANCED'] = \
'search.wcad.org'
config['defaults']['GIS_WWW'] = 'http://gisapp.wcad.org/?pin='
if __name__ == '__main__':
args = generate_argparser()
print(args)
```
#### File: getRealty/getrealty/printArray.py
```python
import logging
import pprint
import getrealty
pp = pprint.PrettyPrinter(width=1)
config = getrealty.settings.config
logger = logging.getLogger(__name__)
class MyPrintArray(object):
"""Initializes the structures for all of the heading saved into the
database
do_update - when updating from the server or cache, label which columns
to update
. do not want to update columns that have data which was
input from the user
. Ie. notes, property interest..
pts - (print to summary) these are values that will be printed to the
summary sheet
0 - (print to db only) values to db, but not to summary or
NOTESandContacts sheet
1 - (print to summary) values printed to summary sheet
2 - (print to rnumber page) values printed to NOTESandContacts sheet
"""
def __init__(self):
# self.printArrayHeadings = (
# "pts", "do_update", "db_type", "comment")
# self.printArray = [
# [ 2, "No" ],
# [ 0, "Yes"],
# ]
k_det = config['defaults_static']['PROP_DETAIL_RESULTS']
k_bill = config['defaults_static']['BILL_PAGE_RESULTS']
# k_hist = config.HISTORY_PAGE_RESULTS
k_data = config['defaults_static']['DATASHEET_PAGE']
k_calc = "calcs"
undef = ' '
# This is the list of entires to go into the worksheet.
# The order is kept from below.
# Merged Name for wkst key key name cond
# Do_update Header header found of measure type format format db_type comment
self.printArrayHeadings = (
"pts", "do_update", "merged_header", "wkst_header", "key", "measure_name", "type", "format", "condF", "db_type", "comment")
self.printArray = [
[ 2, "No", "", "NOTES_rnum_sheet", k_calc, "", undef, undef, undef, "", "" ],
# 2, "No", "", "CONTS_rnum_sheet", $k_calc, "", undef, undef, undef, "", ],
#[ 0, "Yes", "", "unique_key", $k_calc, "UniqueKey", undef, undef, undef, "", ],
[ 0, "Yes", "", "County", k_calc, "County", undef, undef, undef, "", " " ],
[ 1, "Yes", "", "rpg", k_calc, "", undef, "format1or0", undef, "", "Does the Rnumber have additional Notes (Rnumber tab)" ],
[ 1, "No", "", "Property_Interest", k_calc, "PropInterest", undef, undef, undef, "", " " ],
[ 0, "Yes", "Files", "BillsF", k_calc, "bills_wkt", "file", undef, undef, "", " " ],
[ 0, "Yes", "Files", "HistF", k_calc, "hist_wkt", "file", undef, undef, "", " " ],
[ 0, "Yes", "Files", "DetF", k_calc, "detail_wkt", "file", undef, undef, "", " " ],
[ 0, "Yes", "Files", "DataF", k_calc, "datasheet_wkt", "file", undef, undef, "", " " ],
[ 1, "Yes", "Links", "Bills", k_det, "bills_link", "link:Bills", undef, undef, "", " " ],
[ 1, "Yes", "Links", "Det", k_det, "details_link", "link:Det", undef, undef, "", " " ],
[ 1, "Yes", "Links", "Hist", k_det, "hist_link", "link:Hist", undef, undef, "", " " ],
[ 1, "Yes", "Links", "Data", k_det, "datasheet_link", "link:Data", undef, undef, "", " " ],
[ 1, "Yes", "Maps", "GIS", k_det, "GIS_link", "link:GIS", undef, undef, "", " " ],
[ 1, "Yes", "Maps", "Map", k_det, "Property Address", "goog", undef, undef, "", " " ],
[ 1, "No", "", "NOTES", k_calc, "", undef, undef, undef, "", " " ],
[ 1, "Yes", "", "Subd", k_calc, "subd", undef, undef, undef, "", "Subdivision" ],
[ 1, "Yes", "Prop_Details", "Acres", k_det, "Acreage", undef, undef, undef, "REAL", " " ],
[ 1, "Yes", "Prop_Details", "AssessVal", k_det, "Assessed", undef, undef, undef, "REAL", " " ],
[ 1, "Yes", "Prop_Details", "AssessMinDue", k_calc, "AssessMinDue", undef, undef, undef, "REAL", "Assessed Minus Tax Amount Due. Good for Tax Sale List" ],
[ 1, "Yes", "Tax_Due", "PctToAss", k_calc, "TaxToAssessVal", undef, "formatpct", undef, "REAL", "Percentage of Taxes Due to Assessed Value of the Property" ],
[ 1, "Yes", "Tax_Due", "TotAmtDue", k_calc, "TotAmtDue", undef, undef, undef, "REAL", "Total Amount of Taxes Due at the time the information was pulled" ],
[ 1, "Yes", "", "DatePulled", k_bill, "DateResponseCaptured", undef, undef, undef, "", "Date the Response files were pulled from the server" ],
[ 1, "Yes", "TaxDates", "OldestDue", k_calc, "OldestTaxDue", undef, undef, undef, "", "Oldes Taxes Due. Sometimes later year taxes are paid, but older taxes are still due" ],
[ 1, "Yes", "TaxDates", "LastPaid", k_calc, "LastDatePaid", undef, undef, undef, "", "Last Date Taxes were paid." ],
[ 1, "Yes", "TaxDates", "LastYrPaid", k_calc, "LastYearPaid", undef, undef, undef, "", "Last year taxes were paid." ],
[ 1, "Yes", "Different", "Addr", k_calc, "diff_addr", undef, "format1or0", undef, "", "Owner and Property addresses Differ" ],
[ 1, "Yes", "Different", "Zip", k_calc, "diff_zip", undef, "format1or0", undef, "", "Owner and Property zip codes Differ" ],
[ 1, "Yes", "Different", "State", k_calc, "diff_state", undef, "format1or0", undef, "", "Owner and Property states Differ" ],
[ 1, "Yes", "", "NHS_imp", k_calc, "NHS_improved", undef, "format1or0", undef, "", " " ],
[ 1, "Yes", "", "InstallYr", k_calc, "UtilInstallYr", undef, undef, undef, "", "Year Utilites were installed. Value > {}".format(config['defaults_static']['IMPNHS_VALUE_MIN']) ],
[ 1, "Yes", "TimesSold", "Num", k_data, "NumTimesSold", undef, undef, undef, "", "Number of Times the Property has been sold" ],
[ 1, "Yes", "TimesSold", "Last", k_data, "LastTimeSold", undef, undef, undef, "", "Last Time the Property was Sold" ],
[ 1, "Yes", "Appraised", "AppPctOfMax", k_calc, "AppPctOfMax", undef, "lt_grey", undef, "REAL", "Appraised: Percent Current Value / Max Value ever" ],
[ 1, "Yes", "Appraised", "AppLastVal", k_calc, "AppLastVal", undef, undef, undef, "REAL", "Appraised: Last Value Ever Recoreded" ],
[ 1, "Yes", "Appraised", "AppMaxVal", k_calc, "AppMaxVal", undef, undef, undef, "REAL", "Appraised: Maximum Value Ever Recoreded" ],
[ 1, "Yes", "AppraisedMaxReduced", "AppMaxAmt", k_calc, "AppMaxReduced", undef, undef, undef, "REAL", "Appraised: Maximum Reduced Amount from one year to the next" ],
[ 1, "Yes", "AppraisedMaxReduced", "AppMaxYr", k_calc, "AppMaxReducedYr", undef, undef, undef, "", "Appraised: Year Maximum Reduced Amount Occured" ],
[ 1, "Yes", "Assess", "AssPctOfMax", k_calc, "AssPctOfMax", undef, "lt_grey", undef, "REAL", "Assessed: Percent Current Value / Max Value ever" ],
[ 1, "Yes", "Assess", "AssLastVal", k_calc, "AssLastVal", undef, undef, undef, "REAL", "Assessed: Last Value Ever Recoreded" ],
[ 1, "Yes", "Assess", "AssMaxVal", k_calc, "AssMaxVal", undef, undef, undef, "REAL", "Assessed: Maximum Value Ever Recoreded" ],
[ 1, "Yes", "AssessMaxReduced", "AssMaxAmt", k_calc, "AssMaxReduced", undef, undef, undef, "REAL", "Assessed: Maximum Reduced Amount from one year to the next" ],
[ 1, "Yes", "AssessMaxReduced", "AssMaxYr", k_calc, "AssMaxReducedYr", undef, undef, undef, "", "Assessed: Year Maximum Reduced Amount Occured" ],
[ 1, "Yes", "ImpNHS", "LastPctOfMax", k_calc, "ImpNHSPctOfMax", undef, "lt_grey", undef, "REAL", "Improved Non-Homestead: Percent Current Value / Max Value ever" ],
[ 1, "Yes", "ImpNHS", "LastVal", k_calc, "ImpNHSLastVal", undef, undef, undef, "REAL", "Improved Non-Homestead: Last Value Ever Recoreded" ],
[ 1, "Yes", "ImpNHS", "MaxVal", k_calc, "ImpNHSMaxVal", undef, undef, undef, "REAL", "Improved Non-Homestead: Maximum Value Ever Recoreded" ],
[ 1, "Yes", "ImpNHSMaxReduced", "MaxAmt", k_calc, "ImpNHSMaxReduced", undef, undef, undef, "REAL", "Improved Non-Homestead: Maximum Reduced Amount from one year to the next" ],
[ 1, "Yes", "ImpNHSMaxReduced", "MaxYr", k_calc, "ImpNHSMaxReducedYr", undef, undef, undef, "", "Improved Non-Homestead: Year Maximum Reduced Amount Occured" ],
[ 1, "Yes", "ImpHS", "ihsPctOfMax", k_calc, "ImpHSPctOfMax", undef, "lt_grey", undef, "REAL", "Improved Homestead: Percent Current Value / Max Value ever" ],
[ 1, "Yes", "ImpHS", "ihsLastVal", k_calc, "ImpHSLastVal", undef, undef, undef, "REAL", "Improved Homestead: Last Value Ever Recoreded" ],
[ 1, "Yes", "ImpHS", "ihsMaxVal", k_calc, "ImpHSMaxVal", undef, undef, undef, "REAL", "Improved Homestead: Maximum Value Ever Recoreded" ],
[ 1, "Yes", "ImpHSMaxReduced", "ihsMaxAmt", k_calc, "ImpHSMaxReduced", undef, undef, undef, "REAL", "Improved Homestead: Maximum Reduced Amount from one year to the next" ],
[ 1, "Yes", "ImpHSMaxReduced", "ihsMaxYr", k_calc, "ImpHSMaxReducedYr", undef, undef, undef, "", "Improved Homestead: Year Maximum Reduced Amount Occured" ],
[ 1, "Yes", "LandHS", "lhsPctOfMax", k_calc, "LandHSPctOfMax", undef, "lt_grey", undef, "REAL", "Land Homestead: Percent Current Value / Max Value ever" ],
[ 1, "Yes", "LandHS", "lhsLastVal", k_calc, "LandHSLastVal", undef, undef, undef, "REAL", "Land Homestead: Last Value Ever Recoreded" ],
[ 1, "Yes", "LandHS", "lhsMaxVal", k_calc, "LandHSMaxVal", undef, undef, undef, "REAL", "Land Homestead: Maximum Value Ever Recoreded" ],
[ 1, "Yes", "LandHSMaxReduced", "lhsMaxAmt", k_calc, "LandHSMaxReduced", undef, undef, undef, "REAL", "Land Homestead: Maximum Reduced Amount from one year to the next" ],
[ 1, "Yes", "LandHSMaxReduced", "lhsMaxYr", k_calc, "LandHSMaxReducedYr", undef, undef, undef, "", "Land Homestead: Year Maximum Reduced Amount Occured" ],
[ 1, "Yes", "LandNHS", "lnhsPctOfMax", k_calc, "LandNHSPctOfMax", undef, "lt_grey", undef, "REAL", "Land Non-Homestead: Percent Current Value / Max Value ever" ],
[ 1, "Yes", "LandNHS", "lnhsLastVal", k_calc, "LandNHSLastVal", undef, undef, undef, "REAL", "Land Non-Homestead: Last Value Ever Recoreded" ],
[ 1, "Yes", "LandNHS", "lnhsMaxVal", k_calc, "LandNHSMaxVal", undef, undef, undef, "REAL", "Land Non-Homestead: Maximum Value Ever Recoreded" ],
[ 1, "Yes", "LandNHSMaxReduced", "lnhsMaxAmt", k_calc, "LandNHSMaxReduced", undef, undef, undef, "REAL", "Land Non-Homestead: Maximum Reduced Amount from one year to the next" ],
[ 1, "Yes", "LandNHSMaxReduced", "lnhsMaxYr", k_calc, "LandNHSMaxReducedYr", undef, undef, undef, "", "Land Non-Homestead: Year Maximum Reduced Amount Occured" ],
[ 1, "Yes", "DaysLate", "Curr", k_calc, "CurrDaysLate", undef, undef, undef, "REAL", "Current Number of Days Taxes are Late" ],
[ 1, "Yes", "DaysLate", "Max", k_calc, "max_days_late", undef, undef, undef, "REAL", "Maximum Days Late over all years. Used to determine history of times late" ],
[ 1, "Yes", "", "PctDiffAddr", k_calc, "pctDiffAddr", undef, "format1or0", undef, "REAL", "Owner and Property addresses Differ" ],
[ 1, "Yes", "", "PropAddr", k_det, "Property Address", undef, undef, undef, "", " " ],
[ 1, "Yes", "", "OwnerAddr", k_det, "Owner Address", undef, undef, undef, "", " " ],
[ 1, "Yes", "", "OwnerName", k_det, "Owner Name", undef, undef, undef, "", " " ],
[ 1, "Yes", "", "LegalDesc", k_det, "Legal Description", undef, undef, undef, "", " " ],
# [ 1, "Yes", "Prop_Details", "Interest",
# $k_det, "Undivided Interest", undef, undef, undef,
# "",],
# [ 1, "Yes", "", "MinDaysLate", $k_calc, "min_days_late", undef, undef, undef, "", ],
# [ 1, "Yes", "", "Utilities", $k_calc, "utilities", undef, undef, undef, "", ],
]
def getMyPrintArray(self):
return(self.printArray)
def getMyReturnHash(self):
return(storePrintArray(self.printArrayHeadings, self.printArray))
def getSecondValueFromHeadingValueCombo(heading, heading_name, heading2):
# heading = heading to search
# heading_name = heading to find in heading column
# heading2 = name of heading to find value
# this routine is an API to return the value of a second heading, given
# a heading and value.
#
# Ex. for wkst_header,NOTES combo, find value of do_update for that
# same row of data.
myPA = MyPrintArray()
myRH = myPA.getMyReturnHash()
# get the array entry number in print array data
array_number = getPrintArrayHashByValue(heading, heading_name)
# get value for 2nd heading
value = myRH['data'][array_number][heading2]
return(value)
def getPrintArrayHashByValue(heading, heading_name):
'''this routine is an API to return a single hash entry number that
represents the anonymous hash matching the heading and heading name value.
'''
myPA = MyPrintArray()
myRH = myPA.getMyReturnHash()
num_returns = 0
my_list = list(myRH['data'])
for PrintArrayEntry in my_list:
if not myRH['data'][PrintArrayEntry][heading]:
continue
if not myRH['data'][PrintArrayEntry][heading] is heading_name:
continue
if myRH['data'][PrintArrayEntry][heading] is heading_name:
return_hash_number = PrintArrayEntry
num_returns = num_returns + 1
return(return_hash_number)
def storePrintArray(printArrayHeadings, printArray):
return_hash = {'data': {}}
cnt = 0
for myprintArray in (printArray):
for i, heading in enumerate(printArrayHeadings):
heading_val = myprintArray[i]
if cnt not in return_hash['data']:
return_hash['data'][cnt] = {heading: heading_val}
else:
return_hash['data'][cnt].update({heading: heading_val})
cnt += 1
return_hash['headings'] = printArrayHeadings
return return_hash
def getHeadingsFromPrintArray(merged, pts_get):
'''returns an array of heading names.
merged = 1 (return the merged heading names)
merged = 0 (return the non-merged heading names)
If pts_is defined, only return headings for that type
DEFAULT is to grab all entries
'''
myPAarray = MyPrintArray().getMyPrintArray()
headingsArray = []
MERGED_SEPARATOR = config['defaults_static']['MERGED_SEPARATOR']
for arrayEntryPtr in myPAarray:
merged_heading = getPrintArrayValueByHeading(arrayEntryPtr,
"merged_header")
heading = getPrintArrayValueByHeading(arrayEntryPtr,
"wkst_header")
pts = getPrintArrayValueByHeading(arrayEntryPtr, "pts")
# when getting summary print headings, only pts == 1 is
# taken from printArray
if pts_get:
if pts is not pts_get:
continue
if merged:
if merged_heading is not "":
heading = merged_heading + MERGED_SEPARATOR + heading
db_heading = heading
headingsArray.append(db_heading)
return (headingsArray)
def getPrintArrayValueByHeading(arrayEntryPtr, heading):
# when iterating through following call this routine to get the value
# of the desired header(s);
myPA = MyPrintArray()
myRH = myPA.getMyReturnHash()
i = 0
found = 0
for printHeading in myRH['headings']:
if heading == printHeading:
heading_array_number = i
found = 1
break
i += 1
return(arrayEntryPtr[i])
def CreatePrintHash():
# this routine stores all of the print information to the excel worksheet.
# @header stores the headers and order of the headers.
# $print_hash stores all key value pairs each rnumber
printhash = {'type': {}, 'format': {}, 'cond_format': {}, 'comment': {},
'headers': [], 'headers_merged': [], 'headers_merged_num': {}}
myPA = MyPrintArray()
myPAarray = myPA.getMyPrintArray()
for arrayEntryPtr in myPAarray:
pts = getPrintArrayValueByHeading(arrayEntryPtr, "pts")
merged_heading = getPrintArrayValueByHeading(arrayEntryPtr,
"merged_header")
heading = getPrintArrayValueByHeading(arrayEntryPtr, "wkst_header")
my_type = getPrintArrayValueByHeading(arrayEntryPtr, "type")
my_format = getPrintArrayValueByHeading(arrayEntryPtr, "format")
cond_format = getPrintArrayValueByHeading(arrayEntryPtr, "condF")
comment = getPrintArrayValueByHeading(arrayEntryPtr, "comment")
# if not a summary entry, don't add, otw, headers get shifted
if (pts is not 1):
continue
if (merged_heading is ""):
printhash['headers_merged'].append("")
else:
if merged_heading not in printhash['headers_merged_num']:
printhash['headers_merged_num'].update({merged_heading: 1})
printhash['headers_merged'].append(merged_heading)
else:
old_merged_heading = printhash['headers_merged_num'][merged_heading]
new_merged_heading_val = old_merged_heading + 1
printhash['headers_merged_num'].update({merged_heading: new_merged_heading_val})
heading = "{}{}{}".format(merged_heading,config['defaults_static']['MERGED_SEPARATOR'],heading)
printhash['headers'].append(heading)
# get type of input, formatting, conditional formatting, and comment
if my_type is not " ":
printhash['type'].update({heading: my_type})
if my_format is not " ":
printhash['format'][heading] = my_format
if cond_format is not " ":
printhash['cond_format'][heading] = cond_format
if comment is not " ":
printhash['comment'][heading] = comment
return(printhash)
if __name__ == '__main__':
pass
```
|
{
"source": "jfrey-xx/godot-python",
"score": 2
}
|
#### File: godot-python/misc/pin_github_actions.py
```python
import re
import sys
import json
import argparse
from pathlib import Path
from functools import lru_cache
from urllib.request import urlopen
GITHUB_CONF_DIR = Path(".").joinpath("../.github").resolve()
REPO_REGEX = r"(?P<repo>[\w\-_]+/[\w\-_]+)"
SHA_REGEX = r"(?P<sha>[a-fA-F0-9]{40})"
TAG_REGEX = r"(?P<tag>[\w\-_]+)"
PIN_REGEX = r"(?P<pin>[\w\-_]+)"
USES_REGEX = re.compile(
rf"uses\W*:\W*{REPO_REGEX}@({SHA_REGEX}|{TAG_REGEX})(\W*#\W*pin@{PIN_REGEX})?", re.MULTILINE
)
def get_files(pathes):
for path in pathes:
if path.is_dir():
yield from path.rglob("*.yml")
elif path.is_file():
yield path
@lru_cache(maxsize=None)
def resolve_tag(repo, tag):
url = f"https://api.github.com/repos/{repo}/git/ref/tags/{tag}"
with urlopen(url) as f:
data = json.loads(f.read())
return data["object"]["sha"]
def add_pin(pathes):
for file in get_files(pathes):
txt = file.read_text()
overwrite_needed = False
# Start by the end so that we can use match.start/end to do in-place modifications
for match in reversed(list(USES_REGEX.finditer(txt))):
repo = match.group("repo")
tag = match.group("tag")
if tag is not None:
sha = resolve_tag(repo, tag)
print(f"Pinning github action {file}: {repo}@{tag} => {sha}")
txt = txt[: match.start()] + f"uses: {repo}@{sha} # pin@{tag}" + txt[match.end() :]
overwrite_needed = True
if overwrite_needed:
file.write_text(txt)
return 0
def check_pin(pathes):
ret = 0
for file in get_files(pathes):
for match in USES_REGEX.finditer(file.read_text()):
repo = match.group("repo")
tag = match.group("tag")
if tag is not None:
print(f"Unpinned github action {file}: {repo}@{tag}")
ret = 1
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("cmd", choices=["check", "add"])
parser.add_argument(
"files", nargs="*", type=Path, default=[Path(__name__).joinpath("../.github/").resolve()]
)
args = parser.parse_args()
if args.cmd == "check":
sys.exit(check_pin(args.files))
else:
sys.exit(add_pin(args.files))
```
|
{
"source": "jfrfonseca/kcmc_heuristic",
"score": 2
}
|
#### File: kcmc_heuristic/src/kcmc_instance.py
```python
from filelock import FileLock
from typing import List, Set, Tuple, Dict
try:
import igraph
except Exception as exp:
igraph = None
class KCMC_Instance(object):
color_dict = {
"p": "green",
"i": "red", "offline": "white",
"s": "black", "vsink": "grey",
"tree_0": "blue",
"tree_1": "orange",
"tree_2": "yellow",
"tree_3": "magenta",
"tree_4": "cyan"
}
def __repr__(self): return f'<{self.key_str} {self.random_seed} [{len(self.virtual_sinks)}]>'
def __init__(self, instance_string:str, accept_loose_pois=False, accept_loose_sensors=False, accept_loose_sinks=False):
self.string = instance_string.upper().strip()
assert self.string.startswith('KCMC;'), 'ALL INSTANCES MUST START WITH THE TAG <KCMC;>'
assert self.string.endswith(';END'), 'ALL INSTANCES MUST END WITH THE TAG <;END>'
# IDENTIFY AND UPGRADE STRING VERSION
self.version = 0.1 if ';SK;' in self.string else 1.0
if self.version == 0.1: self.string = self.string.replace('PS', 'PI').replace('SS', 'II').replace('SK', 'IS')
# Base-parse the string
instance = self.string.split(';')
# Parse the constants
try:
self.num_pois, self.num_sensors, self.num_sinks = instance[1].split(' ')
self.area_side, self.sensor_coverage_radius, self.sensor_communication_radius = instance[2].split(' ')
self.random_seed = instance[3]
self.num_pois, self.num_sensors, self.num_sinks, self.random_seed \
= map(int, [self.num_pois, self.num_sensors, self.num_sinks, self.random_seed])
self.area_side, self.sensor_coverage_radius, self.sensor_communication_radius \
= map(int, [self.area_side, self.sensor_coverage_radius, self.sensor_communication_radius])
except Exception as exp: raise AssertionError('INVALID INSTANCE PREAMBLE!')
# Prepare the buffers
self.poi_sensor = {}
self.sensor_poi = {}
self.sensor_sensor = {}
self.sink_sensor = {}
self.sensor_sink = {}
self.edges = {}
self.virtual_sinks_map = {}
tag = None
for i, token in enumerate(instance[4:-1]):
if token in {'PI', 'II', 'IS'}:
tag = token
continue
elif tag is None:
raise AssertionError(f'INVALID TAG PARSING AT TOKEN {i+4} - {token}')
alpha, beta = map(int, token.strip().split(' '))
if tag == 'PI':
self._add_to(self.edges, f'p{alpha}', f'i{beta}')
self._add_to(self.poi_sensor, alpha, beta)
self._add_to(self.sensor_poi, beta, alpha)
elif tag == 'II':
assert alpha != beta, "SELF-DIRECTED EDGES ARE NOT SUPPORTED"
self._add_to(self.edges, f'i{alpha}', f'i{beta}')
self._add_to(self.sensor_sensor, alpha, beta)
self._add_to(self.sensor_sensor, beta, alpha)
elif tag == 'IS':
self._add_to(self.edges, f'i{alpha}', f's{beta}')
self._add_to(self.sensor_sink, alpha, beta)
self._add_to(self.sink_sensor, beta, alpha)
else: raise AssertionError(f'IMPOSSIBLE TAG {tag}')
# Reading validations
assert max(self.poi_sensor.keys()) < self.num_pois, f'INVALID POI IDs! {[p for p in self.poi_sensor.keys() if p > self.num_pois]}'
assert max(self.sensor_sensor.keys()) < self.num_sensors, f'INVALID SENSOR IDs! {[p for p in self.sensor_sensor.keys() if p > self.num_sensors]}'
assert max(self.sink_sensor.keys()) < self.num_sinks, f'INVALID SINK IDs! {[p for p in self.sink_sensor.keys() if p > self.num_sinks]}'
# Optional validations
assert ((len(self.poi_sensor) == self.num_pois) or accept_loose_pois), \
f'INVALID NUMBER OF POIS ({self.num_pois} {len(self.poi_sensor)})'
assert ((len(self.sink_sensor) == self.num_sinks) or accept_loose_sinks), \
f'INVALID NUMBER OF SINKS ({self.num_sinks} {len(self.sink_sensor)})'
assert ((len(self.sensor_sensor) == self.num_sensors) or accept_loose_sensors), \
f'INVALID NUMBER OF SENSORS ({self.num_sensors} {len(self.sensor_sensor)} {[i for i in range(self.num_sensors) if i not in self.sensor_sensor]})'
# BASIC PROPERTIES #################################################################################################
@property
def key(self) -> tuple:
return self.num_pois, self.num_sensors, self.num_sinks, \
self.area_side, self.sensor_coverage_radius, self.sensor_communication_radius
@property
def key_str(self) -> str:
return ':'.join([f'KCMC_{self.version}'] + list(map(str, self.key)))
@property
def is_single_sink(self) -> bool: return self.num_sinks == 1
@property
def pois(self) -> List[str]: return [f'p{p}' for p in self.poi_sensor.keys()]
@property
def poi_degree(self) -> Dict[str, int]: return {f'p{p}': len(i) for p, i in self.poi_sensor.items()}
@property
def poi_edges(self) -> List[Tuple[str, str]]: return [(f'p{p}', f'i{i}') for p, sensors in self.poi_sensor.items() for i in sensors]
@property
def sensors(self) -> List[str]: return [f'i{s}' for s in range(0, self.num_sensors) if f'i{s}' not in self.connected_sensors]
@property
def sensor_degree(self) -> Dict[str, int]: return {f'i{p}': len(i) for p, i in self.sensor_sensor.items()}
@property
def original_sensors(self) -> Set[str]: return set([s for s in self.sensors if s not in self.virtual_sinks])
@property
def sensor_edges(self) -> List[Tuple[str, str]]: return [(f'i{i}', f'i{ii}') for i, sensors in self.sensor_sensor.items() for ii in sensors]
@property
def sinks(self) -> List[str]: return [f's{k}' for k in self.sink_sensor]
@property
def sink_degree(self) -> Dict[str, int]: return {f's{p}': len(i) for p, i in self.sink_sensor.items()}
@property
def sink_edges(self) -> List[Tuple[str, str]]: return [(f'i{i}', f's{s}') for i, sinks in self.sensor_sink.items() for s in sinks]
@property
def coverage_density(self) -> float: return (self.sensor_coverage_radius*self.num_sensors)/(self.area_side*self.area_side*self.num_pois)
@property
def communication_density(self) -> float: return (self.sensor_communication_radius*self.num_sensors*self.num_sinks)/(self.area_side*self.area_side)
@property
def virtual_sinks(self) -> Set[str]: return set([f'i{s}' for vsinks in self.virtual_sinks_map.values() for s in vsinks])
@property
def original_sinks(self) -> Set[str]: return set([f'i{s}' for s in self.virtual_sinks_map.keys()])
@property
def virtual_sinks_dict(self) -> Dict[str, str]:
inverted_virtual_sinks_map = {}
for osink, virtual_sinks in self.virtual_sinks_map.items():
for vsink in virtual_sinks:
inverted_virtual_sinks_map[f'i{vsink}'] = f's{osink}'
return inverted_virtual_sinks_map
@property
def num_virtual_sinks(self) -> bool: return len(self.virtual_sinks)
@property
def dual_edges(self):
return sorted(list(set(
[(a, b) for a, l in self.edges.items() for b in l if (a != b)]
+ [(b, a) for a, l in self.edges.items() for b in l if (a != b)]
)))
@property
def linear_edges(self):
edges = set()
for a, b in self.dual_edges:
if (a, b) not in edges:
if (b, a) not in edges:
edges.add((a, b))
return sorted(list(edges))
@property
def coverage_graph(self) -> Dict[str, Set[str]]: return {f'i{i}': set([f'p{p}' for p in pois]) for i, pois in self.sensor_poi.items()}
@property
def inverse_coverage_graph(self) -> Dict[str, Set[str]]: return {f'p{p}': set([f'i{i}' for i in sensors]) for p, sensors in self.poi_sensor.items()}
@property
def communication_graph(self) -> Dict[str, Set[str]]: return {f'i{i}': set([f'i{p}' for p in sensors]) for i, sensors in self.sensor_sensor.items()}
@property
def disposable_sensors(self) -> Set[str]:
# We consider DISPOSABLE all sensors that fill ALL this criteria:
# - Do not connects to at least two other sensors
# - Do not connects to at least one POI
# - Do not connects to at least sink (direct-bridge sensors)
if not hasattr(self, '_disposable_sensors'):
self._disposable_sensors = {f'i{i}' for i in range(self.num_sensors)
if all([len(self.sensor_sensor.get(i, [])) < 2,
i not in self.sensor_poi,
i not in self.sensor_sink])}
return self._disposable_sensors
@property
def connected_sensors(self) -> Set[str]:
# Sensors that have at least one connection to some other component
if not hasattr(self, '_connected_sensors'):
self._connected_sensors = {f'i{i}' for i in range(self.num_sensors)
if all([i not in self.sensor_sensor,
i not in self.sensor_poi,
i not in self.sensor_sink])}
return self._connected_sensors
# SERVICES #########################################################################################################
@staticmethod
def _add_to(_dict:dict, key, value):
if key not in _dict:
_dict[key] = set()
_dict[key].add(value)
def to_single_sink(self, MAX_M=10):
if self.is_single_sink: return self
# For each sink and corresponding vsink
vsinks_added = []
virtual_sinks_map = {}
new_sensor_sensor = {s: ls.copy() for s, ls in self.sensor_sensor.items()}
for sink, sensors in self.sink_sensor.items():
for vsink in range(self.num_sensors+(sink*MAX_M), self.num_sensors+((sink+1)*MAX_M)):
vsinks_added.append(vsink)
self._add_to(virtual_sinks_map, sink, vsink)
for i in sensors:
self._add_to(new_sensor_sensor, vsink, i)
self._add_to(new_sensor_sensor, i, vsink)
# Start a new instance with an modified string
result = KCMC_Instance(
instance_string=';'.join([
'KCMC',
f'{self.num_pois} {self.num_sensors+(MAX_M*self.num_sinks)} 1',
f'{self.area_side} {self.sensor_coverage_radius} {self.sensor_communication_radius}',
f'{self.random_seed}'
] +['PI']+[f'{p} {i}' for p, sensors in self.poi_sensor.items() for i in sensors]
+['II']+[f'{p} {i}' for p, sensors in new_sensor_sensor.items() for i in sensors]
+['IS']+[f'{vs} 0' for vs in vsinks_added]
+['END']),
accept_loose_pois=True, accept_loose_sensors=True, accept_loose_sinks=True
)
result.virtual_sinks_map = virtual_sinks_map
return result
def get_node_label(self, node, installation=None):
result = 'V'+node if node in self.virtual_sinks else node
if installation is not None:
if node.startswith('i'):
tree = installation.get(node)
if tree is not None:
result = result + f'.{tree}'
return result
def get_node_color(self, node, installation=None):
result = self.color_dict["vsink"] if node in self.virtual_sinks else self.color_dict[node[0]]
if installation is not None:
if node.startswith('p'): return self.color_dict['p']
if node.startswith('s'): return self.color_dict['s']
tree = installation.get(node)
if tree is None:
result = self.color_dict['offline']
else:
result = self.color_dict[f'tree_{int(tree) % 5}']
return result
def plot(self, labels=False, installation=None, minimal=False):
assert igraph is not None, 'IGRAPH NOT INSTALLED!'
g = igraph.Graph()
if minimal:
showing = set(self.pois+self.sinks)
if installation is None:
showing = showing.union(self.sensors)
else:
showing = showing.union(set([i for i, v in installation.items() if v is not None]))
g.add_vertices(list(showing))
g.add_edges([(i, j) for i, j in self.linear_edges if ((i in showing) and (j in showing))])
else:
g.add_vertices(self.pois+self.sinks+self.sensors)
g.add_edges(self.linear_edges)
layout = g.layout("kk") # Kamada-Kawai Force-Directed algorithm
# Set the COLOR of every NODE
g.vs["color"] = [self.get_node_color(node, installation) for node in g.vs["name"]]
# Set the NAME of the node as its label. Add the TREE it is installed on, if exists
if labels:
g.vs["label"] = [self.get_node_label(node, installation) for node in g.vs["name"]]
g.vs["label_size"] = 6
# Print as UNIDIRECTED
g.to_undirected()
return igraph.plot(g, layout=layout)
# ######################################################################################################################
# RUNTIME
def parse_block(df):
# Parse the instance as a KCMC_Instance object
df.loc[:, 'obj_instance'] = df['instance'].apply(
lambda instance: KCMC_Instance(instance,
accept_loose_pois=True,
accept_loose_sensors=True,
accept_loose_sinks=True)
)
# Extract basic attributes of the instance
df.loc[:, 'key'] = df['obj_instance'].apply(lambda i: i.key_str)
df.loc[:, 'random_seed'] = df['obj_instance'].apply(lambda i: i.random_seed)
df.loc[:, 'pois'] = df['obj_instance'].apply(lambda i: i.num_pois)
df.loc[:, 'sensors'] = df['obj_instance'].apply(lambda i: i.num_sensors)
df.loc[:, 'sinks'] = df['obj_instance'].apply(lambda i: i.num_sinks)
df.loc[:, 'area_side'] = df['obj_instance'].apply(lambda i: i.area_side)
df.loc[:, 'coverage_r'] = df['obj_instance'].apply(lambda i: i.sensor_coverage_radius)
df.loc[:, 'communication_r'] = df['obj_instance'].apply(lambda i: i.sensor_communication_radius)
# Extract attributes of the instance that cannot be calculated from other attributes
# Reformat the dataframe
df = df.fillna(False).drop_duplicates(
subset=(['key', 'random_seed'] + [col for col in df.columns if (col.startswith('K') or col.startswith('M'))])
).reset_index(drop=True)
return df
def parse_key(instance_key):
key = instance_key.replace('INSTANCE', 'KCMC').replace(':', '_')
if os.path.exists(sys.argv[1] + '/' + key + '.pq'):
return key, -1
# If we have to reprocess, start our own redis connection and extract the data
df = []
redis = StrictRedis(sys.argv[3], decode_responses=True)
for random_seed, instance in redis.hscan_iter(instance_key):
df.append({'instance': instance})
redis.close()
# With the connection closed, parse and save the data
df = pd.DataFrame(df)[['instance']]
if '--no-parsing' not in sys.argv:
df = parse_block(df)
target = sys.argv[1]
if target.endswith('.parquet'):
df.drop(columns=['obj_instance']).copy().to_parquet(target + f'/{key}.pq')
else:
with FileLock(target, timeout=30, delay=0.2):
df.to_csv(target, mode='a', header=False, index=None)
return key, len(df)
if __name__ == "__main__":
import sys, os, multiprocessing
import pandas as pd
from redis import StrictRedis
redis = StrictRedis(sys.argv[3], decode_responses=True)
list_keys = list(redis.scan_iter('INSTANCE:*'))
redis.close()
# Parse the REDIS data as a DataFrame
pool = multiprocessing.Pool(int(sys.argv[2]))
for num, pair in enumerate(pool.imap_unordered(parse_key, list_keys)):
key, qtd = pair
print(round(num / len(list_keys), 3), '\t', qtd, '\t', key)
pool.close()
```
|
{
"source": "jfrfonseca/symbolic_regression",
"score": 3
}
|
#### File: symbolic_regression/symbolic_regression/population.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 <NAME>"
# ======================================================================================================================
# IMPORTS
import pandas as pd
from numpy import asarray, power, subtract
from symbolic_regression.genotype import generate_decoded_genotype, evaluate_decoded_genotype
# ======================================================================================================================
# CREATION
def create_decoded_population(population_size, individual_size, input_size=1):
return [generate_decoded_genotype(individual_size, input_size) for _ in population_size]
# ======================================================================================================================
# FITNESS
def fitness_decoded_genotype(decoded_genotype, input_array, output_array, fitness_function_name):
# Compute the output for every input
observed_outputs = asarray([
evaluate_decoded_genotype(decoded_genotype, *inp if isinstance(inp, (list, tuple)) else inp)
for inp in input_array
], dtype=float)
# Compute the fitness function
if fitness_function_name in {'MSE', 'MSD', 'MEAN_SQUARE_ERROR', 'MEAN_SQUARE_DEVIATION',
'MEAN_SQUARED_ERROR', 'MEAN_SQUARED_DEVIATION'}:
fitness = power(subtract(output_array, observed_outputs), 2).mean()
elif fitness_function_name in {'RMSE', 'RMSD', 'ROOT_MEAN_SQUARE_ERROR', 'ROOT_MEAN_SQUARE_DEVIATION',
'ROOT_MEAN_SQUARED_ERROR', 'ROOT_MEAN_SQUARED_DEVIATION'}:
fitness = power(power(subtract(output_array, observed_outputs), 2).mean(), 1/2)
else:
raise TypeError('UNKNOWN FITNESS FUNCTION: {}'.format(fitness_function_name))
return fitness
# ======================================================================================================================
# SELECTION
```
|
{
"source": "jfrfonseca/two_step_vrptw",
"score": 4
}
|
#### File: two_step_vrptw/two_step_vrptw/utils.py
```python
from math import sqrt
from copy import deepcopy
from sys import maxsize as int_inf
from typing import List, Tuple, Union, Dict, Iterator
from dataclasses import dataclass, field
from functools import lru_cache as memoized
from pandas import DataFrame
from numpy import power, sqrt
# ######################################################################################################################
# DATA CLASSES BÁSICAS
@dataclass(frozen=True)
class Parametros(object):
peso_distancia: float
peso_urgencia: float
peso_recursoes: float
limite_recursoes: int
clientes_recursao: int
limite_iteracoes: int = 1000
qtd_novos_carros_por_rodada: int = 4
@dataclass(frozen=True)
class Posicao(object):
x: float
y: float
def __repr__(self): return f'({self.x}, {self.y})'
def __str__(self): return self.__repr__()
# @memoized(maxsize=100) # TODO: Verificar a pegada de memória antes de ativar memoização!
def distancia(self, outro) -> float:
return round(sqrt(float(self.x-outro.x)**2 + float(self.y-outro.y)**2), 3)
@dataclass(frozen=True)
class Cliente(Posicao):
demanda: float
inicio: int
fim: int
servico: int
tipo = 'Cliente'
def __repr__(self): return f'CLIENTE({self.demanda} ({self.x}, {self.y}) [{self.inicio}, {self.fim}] {self.servico})'
def __post_init__(self):
assert (self.fim - self.inicio) >= self.servico, f'CLIENTE COM JANELA DE SERVICO INVALIDA! {self}'
@dataclass(frozen=True)
class Deposito(Posicao):
demanda = 0.0
inicio = 0
fim = int_inf
servico = 0
tipo = 'Depósito'
def __repr__(self): return f'DEPOSITO({self.x}, {self.y})'
@dataclass(frozen=True, init=False)
class Mapa(object):
arquivo: str
nome: str
max_carros: int
capacidade_carro: float
deposito: Deposito
clientes: List[Cliente]
matriz_de_distancias: DataFrame
dict_referencias: Dict
def __init__(self, arquivo: str):
object.__setattr__(self, 'arquivo', arquivo)
nome, max_carros, capacidade_carro, deposito, clientes = self.parse_arquivo(arquivo)
matriz_de_distancias, dict_referencias = self.cria_matriz_de_distancias(deposito, clientes)
object.__setattr__(self, 'nome', nome)
object.__setattr__(self, 'max_carros', max_carros)
object.__setattr__(self, 'capacidade_carro', capacidade_carro)
object.__setattr__(self, 'deposito', deposito)
object.__setattr__(self, 'clientes', clientes)
object.__setattr__(self, 'matriz_de_distancias', matriz_de_distancias)
object.__setattr__(self, 'dict_referencias', dict_referencias)
def __repr__(self): return f'MAPA({self.nome}: {self.max_carros}x{self.capacidade_carro} ${len(self.clientes)})'
def __str__(self): return self.__repr__()
@staticmethod
def parse_arquivo(arquivo):
clientes = []
with open(arquivo, 'r') as fin:
for num_linha, linha in enumerate(fin):
linha = linha.strip()
if len(linha) == 0: continue
if num_linha == 0:
nome_teste = linha
continue
if num_linha == 4:
max_carros, capacidade_carro = [int(v) for v in linha.split(' ') if len(v) > 0]
continue
if num_linha >= 9:
_, x, y, demanda, inicio, fim, servico = [int(v) for v in linha.split(' ') if len(v) > 0]
if num_linha == 9:
deposito = Deposito(x=x, y=y)
else:
clientes.append(Cliente(x=x, y=y, demanda=demanda, inicio=inicio, fim=fim, servico=servico))
return nome_teste, max_carros, capacidade_carro, deposito, clientes
@staticmethod
def cria_matriz_de_distancias(deposito: Deposito, clientes: List[Cliente]) -> (DataFrame, dict):
lista_referencia = [deposito] + clientes
str_lista_referencia = list(map(str, lista_referencia))
# Calculamos as distancias entre cada ponto no mapa
df_x = DataFrame([[i.x for i in lista_referencia]] * len(lista_referencia),
columns=str_lista_referencia, index=str_lista_referencia)
df_x = (df_x - df_x.T).applymap(lambda x: power(x, 2))
df_y = DataFrame([[i.y for i in lista_referencia]] * len(lista_referencia),
columns=str_lista_referencia, index=str_lista_referencia)
df_y = (df_y - df_y.T).applymap(lambda y: power(y, 2))
df_distancias = (df_x + df_y).applymap(sqrt).astype(float).round(3)
return df_distancias, dict(zip(str_lista_referencia, lista_referencia))
# ######################################################################################################################
# DATA CLASSES DE AGENTES
@dataclass
class Carro(object):
id: str
origem: Deposito
velocidade: int
capacidade: float
carga: float = 0.0
agenda: List[Union[Cliente, Deposito]] = field(default_factory=list)
fim: int = 0
_inicio = None
def __post_init__(self):
self.agenda = [self.origem]
self.carga = self.capacidade
self.fim = 0
def __repr__(self): return f'Carro{self.id}(O+{len(self.agenda)}>>{self.posicao} |{self.carga}| [{self.inicio}, {self.fim}])'
def __str__(self): return self.__repr__()
@property
def posicao(self): return self.agenda[-1]
@property
def inicio(self):
return 0 if len(self.agenda) == 0 else max([
0,
self.agenda[1].inicio - self.tempo_deslocamento(origem=self.agenda[0], destino=self.agenda[1])
])
@property
def clientes_atendidos(self) -> set:
return set([str(cli) for cli in self.agenda if cli.tipo == 'Cliente'])
def tempo_deslocamento(self, destino:Union[Cliente, Deposito], distancia=None, origem=None) -> int:
pos = self.posicao if origem is None else origem
distancia = pos.distancia(destino) if distancia is None else distancia # Speedup com pre-computado
return int(distancia / self.velocidade) + 1
def reabastecimento(self, deposito: Deposito) -> (float, int, int):
distancia = self.posicao.distancia(deposito)
tempo_deslocamento = self.tempo_deslocamento(deposito, distancia=distancia)
delta_fim = tempo_deslocamento + deposito.servico
self.fim += delta_fim
self.agenda.append(deposito)
self.carga = self.capacidade
return distancia, tempo_deslocamento, delta_fim-tempo_deslocamento
def atendimento(self, cliente: Cliente) -> (float, int, int):
distancia = self.posicao.distancia(cliente)
tempo_deslocamento = self.tempo_deslocamento(cliente, distancia=distancia)
delta_fim = max([self.fim + tempo_deslocamento + cliente.servico, cliente.inicio + cliente.servico]) - self.fim
assert delta_fim > 0, f'ABASTECIMENTO INVALIDO {self} -> {cliente}'
self.fim += delta_fim
self.agenda.append(cliente)
self.carga = self.carga - cliente.demanda
return distancia, tempo_deslocamento, delta_fim-tempo_deslocamento
def resultado(self, display=True) -> Tuple[int, float, int, int, int]:
if display: print(self)
dummy = Carro(id='DUMMY:'+self.id, origem=self.origem, velocidade=self.velocidade, capacidade=self.capacidade)
distancia_total = 0.0
tempo_deslocamento_total = 0
tempo_layover_total = 0
for pos, item in enumerate(self.agenda):
fim_anterior = dummy.fim
if item.tipo == 'Cliente':
distancia, tempo_deslocamento, tempo_layover = dummy.atendimento(item)
else:
distancia, tempo_deslocamento, tempo_layover = dummy.reabastecimento(item)
if display:
print('\t', item)
if pos < (len(self.agenda)-1):
print('\t\t', distancia, '~', fim_anterior, '>>', tempo_deslocamento, '+', tempo_layover,
'>>', fim_anterior+tempo_deslocamento+tempo_layover)
distancia_total += distancia
tempo_deslocamento_total += tempo_deslocamento
tempo_layover_total += tempo_layover
return self.inicio, distancia_total, tempo_deslocamento_total, tempo_layover_total, self.fim
def copia_carro(og: Carro):
carro = Carro(id='COPY:' + og.id, origem=og.origem, velocidade=og.velocidade, capacidade=og.capacidade)
for item in og.agenda[1:]:
if item.tipo == 'Cliente':
carro.atendimento(item)
else:
carro.reabastecimento(item)
return carro
def unifica_agendas_carros(pri: Carro, seg: Carro):
assert pri.id != seg.id, 'TENTATIVA DE UNIFICAR CARROS DE MESMO ID!'
assert len({str(pri.origem), pri.velocidade, pri.capacidade}.intersection(
{str(seg.origem), seg.velocidade, seg.capacidade})
) == 3, 'TENTATIVA DE UNIFICAR CARROS DE CONFIGURAÇÕES DIFERENTES!'
carro = Carro(id=f'{pri.id}+{seg.id}', origem=pri.origem, velocidade=pri.velocidade, capacidade=pri.capacidade)
for item in pri.agenda[1:]:
if item.tipo == 'Cliente':
carro.atendimento(deepcopy(item))
else:
carro.reabastecimento(deepcopy(item))
for item in seg.agenda[1:]:
if item.tipo == 'Cliente':
carro.atendimento(deepcopy(item))
else:
carro.reabastecimento(deepcopy(item))
return carro
@dataclass(frozen=False, init=False)
class Frota(object):
mapa: Mapa
max_carros: int
capacidade_carro: float
velocidade_carro: int
carros: Dict
deposito: Deposito
def __init__(self, mapa: Mapa, velocidade_carro: int):
self.velocidade_carro = velocidade_carro
self.mapa = mapa
self.max_carros = mapa.max_carros
self.capacidade_carro = mapa.capacidade_carro
self.deposito = mapa.deposito
self.carros = {}
def __repr__(self): return f'Frota<{self.mapa.nome}>(|{len(self.carros)}/{self.mapa.max_carros}| x {len(self.clientes_atendidos)}/{len(self.mapa.clientes)}])'
def __str__(self): return self.__repr__()
def __len__(self): return len(self.carros)
def __getitem__(self, item): return self.mapa.dict_referencias[item]
def __iter__(self) -> Iterator[Carro]:
for carro in self.carros.values():
yield carro
@property
def clientes_atendidos(self) -> set:
result = set()
for carro in self.carros.values():
result = result.union(carro.clientes_atendidos)
return result
@property
def clientes_faltantes(self) -> set:
return set(map(str, self.mapa.clientes)) - self.clientes_atendidos
@property
def sumario(self) -> DataFrame:
sumario = []
for carro in self.carros.values():
inicio, distancia, t_desloc, t_layover, fim = carro.resultado(display=False)
sumario.append({'carro': carro.id, 'inicio': inicio, 'fim': fim, 'distancia': distancia,
'tempo_deslocamento': t_desloc, 'tempo_layover': t_layover,
'qtd_clientes': len(carro.clientes_atendidos)})
sumario = DataFrame(sumario)
sumario.loc[:, 'tempo_atividade'] = sumario['fim'] - sumario['inicio']
return sumario
def novo_carro(self) -> Carro:
carro = Carro(str(len(self.carros)), self.deposito, self.velocidade_carro, self.capacidade_carro)
self.carros[carro.id] = carro
return carro
def limpa_carros_sem_agenda(self):
para_remover = []
for id_carro, carro in self.carros.items():
if len(carro.agenda) < 2:
para_remover.append(id_carro)
for id_carro in para_remover:
self.carros.pop(id_carro)
return para_remover
def substitui_carros(self, novos_carros: List[Carro]):
self.carros = {c.id: c for c in novos_carros}
```
|
{
"source": "jfri2/Automated-Plant-System",
"score": 3
}
|
#### File: Automated-Plant-System/site/timelapse.py
```python
import datetime
import time
import os
import cv2
import shutil
import threading
import traceback
from pathlib import Path
class Timelapse:
def __init__(self):
self.timelapse_generation_in_process = False
self.TIMELAPSE_IMAGE_PATH = "/share/aps/timelapse/images/"
self.TIMELAPSE_VIDEO_PATH = "/share/aps/timelapse/videos/"
self.latest_timelapse_filename = "No timelapse video generated yet"
self.timelapse_progress = 0
self.timelapse_progress_enable = True
self.frame_generation_time = 0.35 # Seconds
def start_timelapse_video_generation(self):
# Start video generation
gen_timelapse_thread = threading.Thread(
target=self._generate_timelapse,
)
gen_timelapse_thread.daemon = True
gen_timelapse_thread.start()
def save_frame(self, frame):
# Only save images between 5:00am and 11:00pm local time
current_time = datetime.datetime.now()
# override limit on time to save frames
override = True
if (
(int(current_time.hour) < 23)
and ((int(current_time.hour) >= 5))
or override
):
# Save images
existing_files = os.listdir(self.TIMELAPSE_IMAGE_PATH)
if len(existing_files) == 0:
image_number = 0
else:
image_number = len(existing_files)
filename = (
self.TIMELAPSE_IMAGE_PATH + "{:010d}".format(image_number) + ".jpg"
)
cv2.imwrite(filename, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
# print('File written: {}'.format(filename))
image_number = image_number + 1
else:
pass
return
def _order_frames(self):
# Check files to see if they are in sequential order, rename any that are not in order
file_list = []
for filename in os.listdir(self.TIMELAPSE_IMAGE_PATH):
if filename.endswith(".jpg"):
file_list.append(os.path.join(self.TIMELAPSE_IMAGE_PATH, filename))
else:
continue
file_list.sort()
missing_files = []
last_filename = "-1"
for file in file_list:
filename = Path(os.path.basename(file)).stem
if int(filename) - 1 != int(last_filename):
missing_files.append(int(filename) - 1)
last_filename = filename
# Copy filename + 1 to filename and rename
for filename in missing_files:
new_filename = (
self.TIMELAPSE_IMAGE_PATH + "{:010d}".format(filename) + ".jpg"
)
filename_to_copy = (
self.TIMELAPSE_IMAGE_PATH + "{:010d}".format(filename + 1) + ".jpg"
)
shutil.copyfile(filename_to_copy, new_filename)
print("Missing file detected. Created new file {}".format(filename_to_copy))
def _generate_timelapse_progress(self, num_frames):
self.timelapse_progress_enable = True
print(
"{0:d} Frames to process, expected to take {1:.2f} seconds".format(
num_frames, num_frames * self.frame_generation_time
)
)
generate_timelapse_progress_thread = threading.Thread(
target=self._generate_timelapse_progress_exec, args=(num_frames,)
)
generate_timelapse_progress_thread.daemon = True
generate_timelapse_progress_thread.start()
def _generate_timelapse_progress_exec(self, num_frames):
start_time = time.time()
expected_end_time = start_time + (num_frames * self.frame_generation_time)
self.timelapse_progress = 0
while (time.time() <= expected_end_time) and (
self.timelapse_progress_enable == True
):
self.timelapse_progress = int(
((time.time() - start_time) / (expected_end_time - start_time)) * 100
)
if self.timelapse_progress >= 99:
self.timelapse_progress = 99
print("Next timelapse is {}% complete".format(self.timelapse_progress))
time.sleep(5)
self.timelapse_progress = 100
def _generate_timelapse(self):
old_timelapse_video_filename = ""
while True:
# Check if timelapse is currently being generated
if self.timelapse_generation_in_process == False:
self.timelapse_generation_in_process = True
# Order frames and fix gaps
self._order_frames()
print("Timelapse video generation started")
num_frames = len(os.listdir(self.TIMELAPSE_IMAGE_PATH))
self._generate_timelapse_progress(num_frames)
start_time = time.time()
timestamp = datetime.datetime.now()
time_string = timestamp.strftime("%Y-%m-%d_%H-%M-%S")
temp_timelapse_filename = time_string + ".mp4"
fps = 60
ffmpeg_command = (
"ffmpeg -f image2 -r {} -i ".format(fps)
+ self.TIMELAPSE_IMAGE_PATH
+ "%10d.jpg -vcodec libx264 -crf 18 -pix_fmt yuv420p -y "
+ self.TIMELAPSE_VIDEO_PATH
+ temp_timelapse_filename
+ " > /dev/null 2>&1"
)
os.system(ffmpeg_command)
generationTime = time.time() - start_time
print(
"Timelapse video generation complete, took {0:.2f} seconds".format(
generationTime
)
)
self.timelapse_progress_enable = False
old_timelapse_video_filename = self.latest_timelapse_filename
self.timelapse_generation_in_process = False
self.latest_timelapse_filename = temp_timelapse_filename
# Remove old timelapse file
old_timelapse_video_full_path = (
self.TIMELAPSE_VIDEO_PATH + old_timelapse_video_filename
)
if os.path.exists(old_timelapse_video_full_path):
try:
os.remove(old_timelapse_video_full_path)
print("Removed {}".format(old_timelapse_video_filename))
except Exception:
traceback.print_exc()
else:
print("Cannot delete file as it does not exist")
# Wait 5 minutes between generation of timelapse videos
time.sleep(60 * 5)
else:
pass
```
|
{
"source": "jfri3d/PlantBot",
"score": 2
}
|
#### File: PlantBot/scripts/inky_alert.py
```python
import inspect
import json
import logging
import math
import os
from datetime import datetime as dt
from PIL import Image, ImageFont, ImageDraw
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.cron import CronTrigger
from dotenv import load_dotenv
from font_fredoka_one import FredokaOne
from inky import InkyWHAT
from constants import THIRSTY_PATH, HEALTHY_PATH, PLANT_DEF, INTERVAL, SUN_PATH, MOON_PATH, MAX_LUX, PLANT_ICON_PATH
from utils import latest_data, _build_header, _calculate_spacing, _load_image
load_dotenv(dotenv_path='.envrc')
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
scheduler = BlockingScheduler()
@scheduler.scheduled_job(CronTrigger(minute='1/{}'.format(INTERVAL), hour='*', day='*', month='*', day_of_week='*'))
def inky_update():
"""
PlantBot update via inkyWHAT based on which registered plant is below its respective moisture threshold.
"""
func_name = inspect.stack()[0][3]
logging.info('[{}] -> Starting Job'.format(func_name))
# initialize inky
inky = InkyWHAT("black")
inky.set_border(inky.BLACK)
# draw empty image
img = Image.new("P", (inky.WIDTH, inky.HEIGHT))
draw = ImageDraw.Draw(img)
# load the plant definition file
with open(PLANT_DEF, 'r') as src:
plant_def = json.load(src)
# determine number of plants
n = len(plant_def['plants'])
# build header
img = _build_header(inky, draw, img, n)
# determine spacing
dy = _calculate_spacing(inky, n)
# iterate through plants
font = lambda fs: ImageFont.truetype(FredokaOne, fs)
name_font = font(25)
time_font = font(12)
val_font = font(20)
for ind, p in enumerate(plant_def['plants']):
logging.info('[{}] -> Updating {}'.format(func_name, p['name']))
# query latest plant information
data = latest_data(p['name'], num=1)[0]
# define placement for plant X
edge = 5 # edge
y = dy * (ind + 1)
gap = lambda pct: 2 * math.ceil(dy * pct / 2)
# add icon
icon = _load_image(os.path.join(PLANT_ICON_PATH, p['icon']), dy - gap(0.2))
img.paste(icon, box=(edge, y + gap(0.2) // 2))
# add name
message = p['name']
w, h = name_font.getsize(message)
loc = y + dy // 2 - h // 2
draw.text((dy + edge, loc - edge), message, inky.BLACK, name_font)
# add measurement time
message = dt.strptime(data['date'], "%Y/%m/%d, %H:%M:%S").strftime("%d.%m.%Y %H:%M")
w, h = time_font.getsize(message)
time_edge = 2
draw.text((dy + edge, y + dy - h - time_edge), message, inky.BLACK, time_font)
# add vertical line
draw.line((200, y, 200, y + dy), fill=inky.BLACK, width=2)
# add moisture information
message = "{}%".format(int(data['moisture']))
w, h = val_font.getsize(message)
loc = y + dy // 2 - h // 2
draw.text((200 + edge, loc - edge), message, inky.BLACK, val_font)
# logic based on moisture [different logo]
if data['moisture'] < p['min_moisture']:
logging.info('[{}] -> Need to water {} [{}%]!!!'.format(func_name, p['name'], data['moisture']))
icon = _load_image(THIRSTY_PATH, dy - gap(0.1))
else:
logging.info('[{}] -> Healthy moisture ({} %)!'.format(func_name, data['moisture']))
icon = _load_image(HEALTHY_PATH, dy - gap(0.1))
# "paste" image
img.paste(icon, box=(250 + dy // 2 - icon.size[0] // 2, y + gap(0.1) // 2))
# add vertical line
draw.line((300, y, 300, y + dy), fill=inky.BLACK, width=2)
# add temperature/light information
message = "{}°C".format(int(data['temperature']))
w, h = val_font.getsize(message)
loc = y + dy // 2 - h // 2
draw.text((300 + edge, loc - edge), message, inky.BLACK, val_font)
# logic based on light [different logo + scaling]
dy_sun = 0
if data['light'] < 10:
icon = _load_image(MOON_PATH, dy - gap(0.1))
else:
sun_size = max([gap(0.1), dy - int((math.log(MAX_LUX) - math.log(data['light'])) * 6)])
icon = _load_image(SUN_PATH, sun_size)
dy_sun = dy // 2 - icon.size[1] // 2
# "paste" image
img.paste(icon, box=(350 + dy // 2 - icon.size[0] // 2, y + dy_sun + gap(0.1) // 2))
# display on inky
inky.set_image(img)
inky.show()
if __name__ == "__main__":
scheduler.add_job(inky_update)
scheduler.start()
```
#### File: PlantBot/scripts/plantbot.py
```python
import inspect
import logging
import os
from datetime import datetime as dt
from datetime import timedelta
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.combining import OrTrigger
from apscheduler.triggers.cron import CronTrigger
from dotenv import load_dotenv
from constants import INTERVAL
from utils import get_daylight_hours, get_plant_data
load_dotenv(dotenv_path='.envrc')
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
scheduler = BlockingScheduler()
# get geolocation from ENV
LAT = os.environ.get("LAT")
LON = os.environ.get("LON")
@scheduler.scheduled_job(CronTrigger(minute='0', hour='0', day='*', month='*', day_of_week='*'))
def daily_trigger():
"""
PlantBot trigger for connecting to all Mi Flora sensors (based on MAC address). Note that data is collected in
different frequencies between day and night. Night is hourly, while day is based on INTERVAL between sunrise and
sunset.
"""
func_name = inspect.stack()[0][3]
logging.info('[{}] -> Starting Job'.format(func_name))
# get daylight times based on location
today = dt.now().date()
sunrise, sunset = get_daylight_hours(LAT, LON, today)
# build trigger based on time of the day
tomorrow = today + timedelta(days=1)
morning = CronTrigger(minute='0', hour='0-{}'.format(sunrise), day='*', month='*', day_of_week='*',
end_date=tomorrow)
day = CronTrigger(minute='0/{}'.format(INTERVAL), hour='{}-{}'.format(sunrise, sunset), day='*', month='*',
day_of_week='*', end_date=tomorrow)
night = CronTrigger(minute='0', hour='{}-23'.format(sunset), day='*', month='*', day_of_week='*', end_date=tomorrow)
trigger = OrTrigger([morning, day, night])
# add new scheduled jobs
logging.info("[{}] -> Adding scheduled job".format(func_name))
scheduler.add_job(get_plant_data, trigger)
if __name__ == "__main__":
scheduler.add_job(daily_trigger)
scheduler.start()
```
|
{
"source": "jfri3d/wfh",
"score": 3
}
|
#### File: wfh/api/client.py
```python
import logging
import sqlite3
from datetime import datetime
from wfh.api.model import Actions
class DBClient:
def __init__(self, db_path, table_name):
self.db_path = db_path
self.table_name = table_name
self.client = self._connect()
self._create_table()
def _connect(self):
logging.info(f"[client] connecting -> {self.db_path}")
return sqlite3.connect(self.db_path)
def _create_table(self):
logging.info(f"[client] creating table -> {self.table_name}")
cur = self.client.cursor()
command = f"""
CREATE TABLE IF NOT EXISTS {self.table_name} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
d DATE DEFAULT (datetime('now','localtime')),
action str NOT NULL)"""
cur.execute(command)
self.client.commit()
def insert_action(self, action: Actions):
logging.info(f"[client] insert action -> {action.name}")
self.client = self._connect()
cur = self.client.cursor()
command = f"INSERT INTO {self.table_name} (action) VALUES(?)"
cur.execute(command, (action.name,))
self.client.commit()
def get_actions(self, action: Actions, limit_today=False):
logging.info(f"[client] get action -> {action.name}")
self.client = self._connect()
cur = self.client.cursor()
command = f"SELECT datetime(d, 'localtime'), action FROM {self.table_name} WHERE action = ?"
values = (action.name,)
if limit_today:
logging.info(f"[client] limit query to today")
command += " AND date(d, 'localtime') = ?"
values += (datetime.now().strftime('%Y-%m-%d'),)
result = cur.execute(command, values).fetchall()
return {"action": action.name, "dates": [v[0] for v in result]}
def get_today(self, today: str):
logging.info(f"[client] get today -> {today}")
self.client = self._connect()
cur = self.client.cursor()
command = f"SELECT action, datetime(d, 'localtime') FROM {self.table_name} WHERE date(d, 'localtime') = ?"
raw = cur.execute(command, (today,)).fetchall()
# insert
out = {action.name: [] for action in Actions}
for r in raw:
out[r[0]].append(r[1])
return out
```
#### File: wfh/display/exceptions.py
```python
class AppError(Exception):
def __init__(self, msg):
self.msg = msg
class InvalidConnection(AppError):
def __init__(self, msg: str):
super().__init__(f"InvalidConnection: {msg}")
```
|
{
"source": "jfriant/backuppc-archive-s3",
"score": 2
}
|
#### File: jfriant/backuppc-archive-s3/backup-manager.py
```python
import optparse
import os
import pwd
import secrets
import sys
import time
from boto.s3.connection import S3Connection
from collections import defaultdict
from math import log10
from subprocess import Popen
class BackupManager:
def __init__(self, accesskey, sharedkey):
self._accesskey = accesskey
self._connection = S3Connection(accesskey, sharedkey)
self._buckets = None
self._bucketbackups = {}
self._backups = None
def _generate_backup_buckets(self):
bucket_prefix = self._accesskey.lower() + '-bkup-'
buckets = self._connection.get_all_buckets()
self._buckets = []
for bucket in buckets:
if bucket.name.startswith(bucket_prefix):
self._buckets.append(bucket)
@property
def backup_buckets(self): # property
if self._buckets is None:
self._generate_backup_buckets()
return self._buckets
def _list_backups(self, bucket):
"""Returns a dict of backups in a bucket, with dicts of:
{hostname (str):
{Backup number (int):
{'date': Timestamp of backup (int),
'keys': A list of keys comprising the backup,
'hostname': Hostname (str),
'backupnum': Backup number (int),
'finalized': 0, or the timestamp the backup was finalized
}
}
}
"""
backups = {}
for key in bucket.list():
keyparts = key.key.split('.')
final = False
if keyparts[-1] == 'COMPLETE':
final = True
keyparts.pop() # back to tar
keyparts.pop() # back to backup number
else:
if keyparts[-1] == 'gpg':
keyparts.pop()
if keyparts[-1] != 'tar' and len(keyparts[-1]) == 2:
keyparts.pop()
if keyparts[-1] == 'tar':
keyparts.pop()
nextpart = keyparts.pop()
if nextpart == 'COMPLETE':
print(("Stray file: %s" % key.key))
continue
backupnum = int(nextpart)
hostname = '.'.join(keyparts)
lastmod = time.strptime(key.last_modified,
'%Y-%m-%dT%H:%M:%S.000Z')
if hostname in list(backups.keys()):
if not backupnum in list(backups[hostname].keys()):
backups[hostname][backupnum] = {
'date': lastmod,
'hostname': hostname,
'backupnum': backupnum,
'finalized': 0,
'keys': [],
'finalkey': None,
'finalized_age': -1,
}
else:
backups[hostname] = {
backupnum: {
'date': lastmod,
'hostname': hostname,
'backupnum': backupnum,
'finalized': 0,
'keys': [],
'finalkey': None,
'finalized_age': -1,
}
}
if final:
backups[hostname][backupnum]['finalized'] = lastmod
backups[hostname][backupnum]['finalkey'] = key
timestamp = time.mktime(lastmod)
delta = int(time.time() - timestamp + time.timezone)
backups[hostname][backupnum]['finalized_age'] = delta
else:
if lastmod < backups[hostname][backupnum]['date']:
backups[hostname][backupnum]['date'] = lastmod
backups[hostname][backupnum]['keys'].append(key)
return backups
def get_backups_by_bucket(self, bucket):
if bucket.name not in self._bucketbackups:
self._bucketbackups[bucket.name] = self._list_backups(bucket)
return self._bucketbackups[bucket.name]
@property
def all_backups(self): # property
if self._backups is None:
sys.stderr.write("Enumerating backups")
self._backups = {}
for bucket in self.backup_buckets:
backups_dict = self.get_backups_by_bucket(bucket)
for hostname, backups in list(backups_dict.items()):
sys.stderr.write('.')
sys.stderr.flush()
if hostname not in self._backups:
self._backups[hostname] = {}
self._backups[hostname].update(backups)
sys.stderr.write("\n")
return self._backups
def invalidate_host_cache(self, hostname):
nuke = []
for bucket in self._bucketbackups:
if hostname in self._bucketbackups[bucket]:
nuke.append(bucket)
for bucket in nuke:
if bucket in self._bucketbackups:
del self._bucketbackups[bucket]
self._backups = None
@property
def backups_by_age(self): # property
"Returns a dict of {hostname: [(backupnum, age), ...]}"
results = defaultdict(list)
for hostname, backups in list(self.all_backups.items()):
for backupnum, statusdict in list(backups.items()):
results[hostname].append((backupnum,
statusdict['finalized_age']))
return results
def choose_host_to_backup(agedict, target_count=2):
"Takes a dict from backups_by_age, returns a hostname to back up."
host_scores = defaultdict(int)
for hostname, backuplist in list(agedict.items()):
bl = sorted(backuplist, key=lambda x: x[1])
if len(bl) > 0 and bl[0][1] == -1:
# unfinalized backup alert
host_scores[hostname] += 200
bl.pop(0)
if len(bl) >= target_count:
host_scores[hostname] -= 100
host_scores[hostname] -= len(bl)
if len(bl) > 0:
# age of oldest backup helps score
oldest = bl[0]
host_scores[hostname] += log10(oldest[1])
# recency of newest backup hurts score
newest = bl[-1]
host_scores[hostname] -= log10(max(1, (oldest[1] - newest[1])))
for candidate, score in sorted(list(host_scores.items()),
key=lambda x: x[1], reverse=True):
yield (candidate, score)
def choose_backups_to_delete(agedict, target_count=2, max_age=30):
"Takes a dict from backups_by_age, returns a list of backups to delete"
decimate = defaultdict(list)
for hostname, backuplist in list(agedict.items()):
bl = []
for backup in sorted(backuplist, key=lambda x: x[1]):
if backup[1] > 0:
bl.append(backup)
while len(bl) > target_count:
backup = bl.pop()
if backup[1] > (max_age * 24 * 60 * 60):
decimate[hostname].append(backup)
return decimate
def iter_urls(keyset, expire=86400):
"""Given a list of keys and an optional expiration time (in seconds),
returns an iterator of URLs to fetch to reassemble the backup."""
for key in keyset:
yield key.generate_url(expires_in=expire)
def make_restore_script(backup, expire=86400):
"""Returns a quick and easy restoration script to restore the given system,
requires a backup, and perhaps expire"""
myhostname = backup['hostname']
mybackupnum = backup['backupnum']
myfriendlytime = time.strftime('%Y-%m-%d at %H:%M GMT', backup['date'])
myexpiretime = time.strftime('%Y-%m-%d at %H:%M GMT',
time.gmtime(time.time() + expire))
myexpiretimestamp = time.time() + expire
output = []
output.append('#!/bin/sh\n')
output.append('# Restoration script for %s backup %s,\n' % (
myhostname, mybackupnum))
output.append('# a backup created on %s.\n' % (myfriendlytime))
output.append('# To use: bash scriptname /path/to/put/the/files\n\n')
output.append('# WARNING: THIS FILE EXPIRES AFTER %s\n' % (myexpiretime))
output.append('if [ "`date +%%s`" -gt "%i" ];\n' % (myexpiretimestamp))
output.append(' then echo "Sorry, this restore script is too old.";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('if [ -z "$1" ];\n')
output.append(' then echo "Usage: ./scriptname /path/to/restore/to";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('# Check the destination\n')
output.append('if [ ! -d $1 ];\n')
output.append(' then echo "Target $1 does not exist!";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('if [ -n "`ls --almost-all $1`" ];\n')
output.append(' then echo "Target $1 is not empty!";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('# cd to the destination, create a temporary workspace\n')
output.append('cd $1\n')
output.append('mkdir .restorescript-scratch\n\n')
output.append('# retrieve files\n')
mysortedfilelist = []
for key in backup['keys']:
output.append('wget -O $1/.restorescript-scratch/%s "%s"\n' % (
key.name, key.generate_url(expires_in=expire)))
mysortedfilelist.append('.restorescript-scratch/' + key.name)
mysortedfilelist.sort()
output.append('\n# decrypt files\n')
output.append('gpg --decrypt-files << EOF\n')
output.append('\n'.join(mysortedfilelist))
output.append('\nEOF\n')
output.append('\n# join and untar files\n')
output.append('cat .restorescript-scratch/*.tar.?? | tar -xf -\n\n')
output.append('echo "DONE! Have a nice day."\n##\n')
return output
def start_archive(hosts):
"Starts an archive operation for a list of hosts."
if 'LOGNAME' in os.environ:
username = os.environ['LOGNAME']
else:
try:
username = pwd.getpwuid(os.getuid()).pw_name
except KeyError:
username = 'nobody'
scriptdir = os.path.dirname(sys.argv[0])
cmd = [os.path.join(scriptdir, 'BackupPC_archiveStart'), 'archives3',
username]
cmd.extend(hosts)
proc = Popen(cmd)
proc.communicate()
def main():
# check command line options
parser = optparse.OptionParser(
usage="usage: %prog [options] [list|delete|script]",
description="" +
"Companion maintenance script for BackupPC_archiveHost_s3. " +
"By default, it assumes the 'list' command, which displays all " +
"of the backups currently archived on S3. The 'delete' command " +
"is used to delete backups. The 'script' command produces a " +
"script that can be used to download and restore a backup.")
parser.add_option("-H", "--host", dest="host",
help="Name of backed-up host")
parser.add_option("-b", "--backup-number", dest="backupnum",
help="Backup number")
parser.add_option("-a", "--age", dest="age",
help="Delete backups older than AGE days")
parser.add_option("-k", "--keep", dest="keep",
help="When used with --age, keep this many recent " +
"backups (default=1)", default=1)
parser.add_option("-f", "--filename", dest="filename",
help="Output filename for script")
parser.add_option("-x", "--expire", dest="expire",
help="Maximum age of script, default 86400 seconds")
parser.add_option("-t", "--test", dest="test", action="store_true",
help="Test mode; don't actually delete")
parser.add_option("-u", "--unfinalized", dest="unfinalized",
action="store_true", help="Consider unfinalized backups")
parser.add_option("-s", "--start-backups", dest="start",
action="store_true",
help="When used with --age, start backups for hosts " +
"with fewer than keep+1 backups")
parser.add_option("-l", "--list", dest="list", action="store_true",
help="List stored backups after completing operations")
(options, args) = parser.parse_args()
bmgr = BackupManager(secrets.accesskey, secrets.sharedkey)
if options.backupnum and not options.host:
parser.error('Must specify --host when specifying --backup-number')
if options.backupnum:
options.backupnum = int(options.backupnum)
if len(args) == 0:
args.append('list')
if len(args) > 1:
parser.error('Too many arguments.')
if args[0] != 'delete' and options.age:
parser.error('--age only makes sense with delete')
if options.start and not (args[0] == 'delete' and options.age):
parser.error('--start-backups only makes sense with delete and --age')
if args[0] != 'script' and (options.expire or options.filename):
parser.error('--expire and --filename only make sense with script')
if args[0] in ['list', 'script', 'delete']:
if options.host:
if options.host not in bmgr.all_backups:
parser.error('No backups found for host "%s"' % options.host)
else:
if len(bmgr.all_backups) == 0:
parser.error('No buckets found!')
else:
parser.error('Invalid option: %s' + args[0])
if args[0] == 'script':
if not options.host:
parser.error('Must specify --host to generate a script for')
if not options.backupnum and options.unfinalized:
# assuming highest number
options.backupnum = max(bmgr.all_backups[options.host].keys())
elif not options.backupnum:
# assuming highest finalized number
options.backupnum = 0
for backup in list(bmgr.all_backups[options.host].keys()):
if bmgr.all_backups[options.host][backup]['finalized'] > 0:
options.backupnum = max(options.backupnum, backup)
if options.backupnum == 0:
parser.error('No finalized backups found! Try '
'--unfinalized if you dare')
backup = bmgr.all_backups[options.host][options.backupnum]
if not options.expire:
options.expire = "86400"
if options.filename:
fd = open(options.filename, 'w')
fd.writelines(make_restore_script(backup,
expire=int(options.expire)))
else:
sys.stdout.writelines(make_restore_script(backup,
expire=int(options.expire)))
elif args[0] == 'delete':
to_ignore = int(options.keep)
to_delete = []
if options.host and options.backupnum:
print(("Will delete backup: %s %i (forced)" % (
options.host, options.backupnum)))
to_delete.append((options.host, options.backupnum))
elif options.age:
to_delete_dict = choose_backups_to_delete(bmgr.backups_by_age,
target_count=to_ignore,
max_age=int(options.age))
for hostname, backuplist in list(to_delete_dict.items()):
for backupstat in backuplist:
print(("Will delete backup: %s %i (expired at %g days)" % (
hostname, backupstat[0], backupstat[1] / 86400.0)))
to_delete.append((hostname, backupstat[0]))
else:
parser.error('Need either an age or a host AND backup number.')
if len(to_delete) > 0:
for deletehost, deletebackupnum in to_delete:
hostbackups = bmgr.all_backups.get(deletehost, {})
deletebackup = hostbackups.get(deletebackupnum, {})
deletekeys = deletebackup.get('keys', [])
finalkey = deletebackup.get('finalkey', None)
if len(deletekeys) > 0:
sys.stdout.write("Deleting backup: %s %d (%d keys)" % (
deletehost, deletebackupnum, len(deletekeys)))
for key in deletekeys:
if options.test:
sys.stdout.write('_')
else:
key.delete()
sys.stdout.write('.')
sys.stdout.flush()
if finalkey is not None:
if options.test:
sys.stdout.write('+')
else:
finalkey.delete()
sys.stdout.write('!')
sys.stdout.flush()
sys.stdout.write('\n')
if options.start:
for deletehost, deletebackupnum in to_delete:
bmgr.invalidate_host_cache(deletehost)
score_iter = choose_host_to_backup(bmgr.backups_by_age,
target_count=int(options.keep) + 1)
for candidate, score in score_iter:
if score > 0:
sys.stdout.write('Starting archive operation for host: '
'%s (score=%g)\n' % (candidate, score))
start_archive([candidate])
break
if args[0] == 'list' or options.list:
sys.stdout.write('%25s | %5s | %20s | %5s\n' % (
"Hostname", "Bkup#", "Age", "Files"))
sys.stdout.write(('-' * 72) + '\n')
for hostname, backups in list(bmgr.all_backups.items()):
for backupnum in sorted(backups.keys()):
filecount = len(backups[backupnum]['keys'])
datestruct = backups[backupnum]['date']
if backups[backupnum]['finalized'] > 0:
inprogress = ''
else:
inprogress = '*'
timestamp = time.mktime(datestruct)
delta = int(time.time() - timestamp + time.timezone)
if delta < 3600:
prettydelta = '%i min ago' % (delta / 60)
elif delta < 86400:
prettydelta = '%i hr ago' % (delta / 3600)
else:
days = int(delta / 60 / 60 / 24)
if days == 1:
s = ''
else:
s = 's'
prettydelta = '%i day%s ago' % (days, s)
sys.stdout.write('%25s | %5i | %20s | %5i%s\n' % (
hostname, backupnum, prettydelta, filecount, inprogress))
sys.stdout.write('* == not yet finalized (Age == time of '
'last activity)\n')
if __name__ == '__main__':
main()
```
|
{
"source": "j-friedrich/neuralOFC",
"score": 2
}
|
#### File: j-friedrich/neuralOFC/figS5_CL_alignment.py
```python
from ofc import System, parmap
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
from scipy.ndimage import median_filter
from scipy.stats import ttest_ind
# Okabe & Ito's colorblind friendly palette
colors = ["#E69F00", "#56B4E9", "#009E73", "#F0E442",
"#0072B2", "#D55E00", "#CC79A7", "#000000"]
plt.rc('axes', prop_cycle=plt.cycler('color', colors))
plt.rc('font', size=18)
plt.rc('legend', **{'fontsize': 14})
s0 = System(A=np.array([[1, 1], [0, 1]]),
B=np.array([[0], [1]]),
C=np.eye(2),
V=.01 * np.eye(2),
W=np.diag([.04, .25]),
Q=np.array([[1, 0], [0, 0]]),
R=np.ones((1, 1)),
T=11)
# EVs of initial L*pseudoinv(C') are 1-2*lam and 1
def foo(lam):
L0 = np.array([[1-lam, lam], [lam, 1-lam]], dtype=float)
def objective(lr, seed):
try:
return s0.SysID(s0.A.astype(float), s0.B.astype(float), s0.C.astype(float), L0,
(0, 0, 0, lr), init_seed=seed, episodes=10000)[-1].mean()
except:
return 1000
opt = minimize(lambda lr: np.mean(parmap(lambda seed: objective(lr, seed*10000),
range(20))), .05 if lam < .5 else .0001)
return opt, np.array(parmap(lambda seed: s0.SysID(
s0.A.astype(float), s0.B.astype(float), s0.C.astype(float), L0,
(0, 0, 0, opt.x), init_seed=seed*10000, episodes=10000)[-1], range(20)))
lams = np.hstack([np.arange(0, .49, .05), np.arange(.46, .491, .01)])
try:
res, res2 = list((np.load('results/CL_alignment.npz', allow_pickle=True)[r]
for r in ('res', 'res2')))
except:
res = list(map(foo, lams))
res2 = list(map(foo, (.5, .51)))
np.savez('results/CL_alignment.npz', res=res, res2=res2)
plt.figure(figsize=(6, 4))
plt.errorbar(1-2*lams, [r[1].mean() for r in res],
[r[1].mean(1).std()/np.sqrt(19) for r in res], label='all 10000 epochs')
plt.errorbar(1-2*lams, [r[1][:, -100:].mean() for r in res],
[r[1][:, -100:].mean(1).std()/np.sqrt(19) for r in res], label='last 100 epochs')
plt.legend(title='MSE averaged over')
plt.xlabel(r'Minimal eigenvalue $\lambda_1$ of initial $LC^{\top+}$')
plt.ylabel('MSE')
plt.tight_layout(pad=.05)
plt.savefig('fig/CL_alignment_avg.pdf')
pvalues = np.array([[ttest_ind(r[1][:, -100:].mean(1), rr[1]
[:, -100:].mean(1)).pvalue for r in res] for rr in res])
print(pvalues.min())
print(pvalues[:-1,:-1].min())
plt.figure(figsize=(6, 4))
for i, (ev, d) in enumerate(((1, res[0][1]), (.04, res[-2][1]), (.02, res[-1][1]),
(0, res2[0][1]), (-.02, res2[1][1]))[::-1]):
plt.fill_between(range(d.shape[1]),
median_filter(d.mean(0)+d.std(0)/np.sqrt(len(d)-1), 101),
median_filter(d.mean(0)-d.std(0)/np.sqrt(len(d)-1), 101),
alpha=.3, color='C%g' % i)
plt.plot(median_filter(d.mean(0), 101), label=str(ev), c='C%g' % i)
plt.legend(title='Minimal eigenvalue $\lambda_1$', ncol=3)
plt.xlabel('Episodes')
plt.ylabel('MSE')
plt.tight_layout(pad=.05)
plt.savefig('fig/CL_alignment.pdf')
```
|
{
"source": "j-friedrich/neuronalGPR",
"score": 3
}
|
#### File: j-friedrich/neuronalGPR/Dropout.py
```python
import numpy as np
import os
import sys
from time import time
import net
# pass the name of the UCI Dataset directory as 1st argument,
# the number of hidden layers as 2nd.
try:
data_directory = sys.argv[1]
except:
data_directory = 'bostonHousing'
try:
n_layers = int(sys.argv[2])
except:
n_layers = 1
epochs = 40
epochs_multiplier = 100
_UCI_DIRECTORY_PATH = "DropoutUncertaintyExps-master/UCI_Datasets/"
subfolders = [f.name for f in os.scandir(_UCI_DIRECTORY_PATH) if f.is_dir()]
subfolders.sort()
if data_directory not in subfolders:
raise ValueError("data directory must be one of the following " +
repr(subfolders) + " but was " + data_directory)
_DATA_DIRECTORY_PATH = _UCI_DIRECTORY_PATH + data_directory + "/data/"
data = np.loadtxt(_DATA_DIRECTORY_PATH + "data.txt")
index_features = np.loadtxt(_DATA_DIRECTORY_PATH + "index_features.txt")
index_target = np.loadtxt(_DATA_DIRECTORY_PATH + "index_target.txt")
X = data[:, [int(i) for i in index_features.tolist()]]
y = data[:, int(index_target.tolist())]
n_splits = int(np.loadtxt(_DATA_DIRECTORY_PATH + 'n_splits.txt'))
n_hidden = int(np.loadtxt(_DATA_DIRECTORY_PATH + "n_hidden.txt"))
validation_ll = np.genfromtxt(_UCI_DIRECTORY_PATH + data_directory + "/results/validation_ll_" +
str(epochs_multiplier) + "_xepochs_1_hidden_layers.txt")
def _get_index_train_test_path(split_num, train=True):
"""
Method to generate the path containing the training/test split for the given
split number (generally from 1 to 20).
@param split_num Split number for which the data has to be generated
@param train Is true if the data is training data. Else false.
@return path Path of the file containing the requried data
"""
if train:
return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt"
else:
return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
perf = np.nan * np.zeros((n_splits, 6, 4))
np.random.seed(1)
for split in range(n_splits):
# We load the indexes of the training and test sets
print('Loading file: ' + _get_index_train_test_path(split, train=True))
print('Loading file: ' + _get_index_train_test_path(split, train=False))
index_train = np.loadtxt(_get_index_train_test_path(split, train=True))
index_test = np.loadtxt(_get_index_train_test_path(split, train=False))
X_train = X[[int(i) for i in index_train.tolist()]]
y_train = y[[int(i) for i in index_train.tolist()]]
X_test = X[[int(i) for i in index_test.tolist()]]
y_test = y[[int(i) for i in index_test.tolist()]]
tmp = validation_ll[split * len(validation_ll) // n_splits:
(split + 1) * len(validation_ll) // n_splits, 1::2]
best_dropout, best_tau = tmp[np.argmax(tmp[:, 2]), :2]
best_network = net.net(X_train, y_train, ([n_hidden] * n_layers), normalize=True,
n_epochs=int(epochs * epochs_multiplier), tau=best_tau,
dropout=best_dropout)
for j, T in enumerate((1, 1, 10, 100, 1000, 10000)):
# there's apparenlty some overhead for the first prediction, hence it's done twice
t = -time()
error, MC_error, ll = best_network.predict(X_test, y_test, T)
t += time()
perf[split, j] = error, MC_error, ll, t
np.save('results/Dropout/%s_%glayers' % (data_directory, n_layers), perf)
```
#### File: j-friedrich/neuronalGPR/figS5_stream.py
```python
from copy import deepcopy
from GPnet import RMSE, NLPD, logpdf
import GPy
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.append('streaming_sparse_gp-master/code')
plt.rc('font', size=18)
plt.rc('legend', **{'fontsize': 12})
plt.rc('lines', linewidth=3)
plt.rc('pdf', fonttype=42)
# create figure directory if not existent yet
os.makedirs('fig', exist_ok=True)
os.makedirs('results/streamingGP', exist_ok=True)
# <NAME>'s example data
X = np.genfromtxt('snelson_data/train_inputs')[:, None]
Y = np.genfromtxt('snelson_data/train_outputs')[:, None]
N = len(X)
# full GP
kernel = GPy.kern.RBF(1)
truth = GPy.models.GPRegression(X, Y, kernel)
truth.optimize()
np.random.seed(0)
Xstream, Ystream = [], []
for _ in range(100):
x = np.random.rand(N // 2)[:, None] * 6
y = truth.posterior_samples_f(x, 1)[..., 0] + \
np.sqrt(truth.Gaussian_noise.variance) * np.random.randn(N // 2, 1)
Xstream.append(x)
Ystream.append(y)
Xstream = np.ravel(Xstream)[:, None]
Ystream = np.ravel(Ystream)[:, None]
# np.savez_compressed('stream.npz', Xstream=Xstream, Ystream=Ystream)
# Xstream = np.load('stream.npz')['Xstream']
# Ystream = np.load('stream.npz')['Ystream']
def d(x, pm=-1):
return np.mean(x, 0) + pm * np.std(x, 0) / np.sqrt(len(x) - 1)
# sparse GP
def sparseSGD(run, eta, etaV, T):
np.random.seed(run)
idx_train = np.sort(np.random.choice(range(N), N // 2, False))
idx_test = np.setdiff1d(range(N), idx_train)
Xtest = X[idx_test]
Ytest = Y[idx_test]
vfe = GPy.models.SparseGPRegression(Xstream, Ystream, GPy.kern.RBF(1), num_inducing=6)
vfe.Gaussian_noise.variance = truth.Gaussian_noise.variance
vfe.optimize()
"""Computation of weights w for mean prediction and weigths w^Sigma & bias
b^Sigma (wV & bV) for variance prediction using stochastic gradient decent"""
K_uf_test = vfe.kern.K(vfe.Z, Xtest)
w = np.zeros((6, 1))
wV, bV = 1, truth.Gaussian_noise.variance[0]
rmse = np.zeros(T)
nlpd = np.zeros(T)
for t in range(T):
genX = Xstream[t * 100:(t + 1) * 100]
genY = Ystream[t * 100:(t + 1) * 100]
K_uf = vfe.kern.K(vfe.Z, genX)
for i in range(len(genX)):
delta = (K_uf.T[i].dot(w) - genY[i])[0]
w -= eta * K_uf[:, i:i + 1] * delta
rho = np.maximum(1 - np.sum(K_uf[:, i]**2, 0), 0)
deltaV = wV * rho + bV - delta**2
wV -= etaV * rho * deltaV
bV -= etaV * deltaV
mu = K_uf_test.T.dot(w)
rho = np.maximum(1 - np.sum(K_uf_test**2, 0), 0)[:, None]
Sigma = wV * rho + bV
rmse[t] = np.sqrt(np.mean((mu - Ytest)**2))
nlpd[t] = - logpdf(mu - Ytest, Sigma).mean()
return (rmse, nlpd), [RMSE(vfe, Xtest, Ytest), NLPD(vfe, Xtest, Ytest)]
def init_Z(cur_Z, new_X, use_old_Z=True):
if use_old_Z:
Z = np.copy(cur_Z)
else:
M = cur_Z.shape[0]
M_old = int(0.7 * M)
M_new = M - M_old
old_Z = cur_Z[np.random.permutation(M)[0:M_old], :]
new_Z = new_X[np.random.permutation(new_X.shape[0])[0:M_new], :]
Z = np.vstack((old_Z, new_Z))
return Z
def streamingGP(run, M=6, use_old_Z=True):
# N.B.: need to run in a different environment with e.g.
# python 2.7, gpflow=0.5 and tensorflow=1.4.1
import tensorflow as tf
import gpflow as GPflow
import osgpr
np.random.seed(run)
idx_train = np.sort(np.random.choice(range(N), N // 2, False))
idx_test = np.setdiff1d(range(N), idx_train)
Xtest = X[idx_test]
Ytest = Y[idx_test]
rmse = np.zeros(T)
nlpd = np.zeros(T)
# get the first portion and call sparse GP regression
X1 = Xstream[:100]
y1 = Ystream[:100]
Z1 = X1[np.random.permutation(X1.shape[0])[0:M], :]
tf.reset_default_graph()
model1 = GPflow.sgpr.SGPR(X1, y1, GPflow.kernels.RBF(1), Z=Z1)
model1.likelihood.variance = 0.1
model1.kern.variance = .3
model1.kern.lengthscales = 0.6
model1.optimize(disp=1)
mu, Sigma = model1.predict_y(Xtest)
rmse[0] = np.sqrt(np.mean((mu - Ytest)**2))
nlpd[0] = - logpdf(mu - Ytest, Sigma).mean()
Zopt = model1.Z.value
mu1, Su1 = model1.predict_f_full_cov(Zopt)
if len(Su1.shape) == 3:
Su1 = Su1[:, :, 0]
# now call online method on the other portions of the data
for t in range(1, T):
X2 = Xstream[t * 100:(t + 1) * 100]
y2 = Ystream[t * 100:(t + 1) * 100]
x_free = tf.placeholder('float64')
model1.kern.make_tf_array(x_free)
X_tf = tf.placeholder('float64')
with model1.kern.tf_mode():
Kaa1 = tf.Session().run(
model1.kern.K(X_tf),
feed_dict={x_free: model1.kern.get_free_state(), X_tf: model1.Z.value})
Zinit = init_Z(Zopt, X2, use_old_Z)
model2 = osgpr.OSGPR_VFE(X2, y2, GPflow.kernels.RBF(1), mu1, Su1, Kaa1,
Zopt, Zinit)
model2.likelihood.variance = model1.likelihood.variance.value
model2.kern.variance = model1.kern.variance.value
model2.kern.lengthscales = model1.kern.lengthscales.value
model2.optimize(disp=1)
model1 = deepcopy(model2)
Zopt = model1.Z.value
mu1, Su1 = model1.predict_f_full_cov(Zopt)
if len(Su1.shape) == 3:
Su1 = Su1[:, :, 0]
mu, Sigma = model1.predict_y(Xtest)
rmse[t] = np.sqrt(np.mean((mu - Ytest)**2))
nlpd[t] = - logpdf(mu - Ytest, Sigma).mean()
np.savez_compressed('results/streamingGP/%g.npz' % run, rmse=rmse, nlpd=nlpd)
return rmse, nlpd
runs, T = 10, 100
perf = np.empty((runs, 2, T))
VFE = np.empty((runs, 2))
for run in range(runs):
perf[run], VFE[run] = sparseSGD(run, 250, .005, T)
stream = np.empty((runs, 2, T))
for run in range(runs):
try: # load saved result
stream[run] = (np.load('results/streamingGP/%g.npz' % run)['rmse'],
np.load('results/streamingGP/%g.npz' % run)['nlpd'])
except:
stream[run] = streamingGP(run)
def plot(typ):
plt.figure(figsize=(6, 4))
T = perf.shape[-1]
j = ('RMSE', 'NLPD').index(typ)
data = perf[:, j]
for c, label in ((1, 'VFE'),):
plt.axhline(np.mean(VFE[:, j]), c='C{}'.format(c), label=label)
plt.fill_between((0, T), [d(VFE[:, j], -1)] * 2, [d(VFE[:, j], +1)] * 2,
color='C{}'.format(c), alpha=.3)
plt.plot(range(1, 1 + T), np.mean(data, 0), c='C2', label='BioNN')
plt.fill_between(range(1, 1 + T), d(data, -1), d(data, +1),
color='C2', alpha=.3)
plt.plot(range(1, 1 + T), np.mean(stream, 0)[j], c='C3', label='streamingGP')
plt.fill_between(range(1, 1 + T), d(stream[:, j], -1), d(stream[:, j], +1),
color='C3', alpha=.3)
plt.xticks(range(0, 100, 20), range(0, 10000, 2000))
plt.xlabel('Samples')
plt.ylabel(typ)
plt.xlim(0, T)
plt.legend()
plt.tight_layout(.05)
plot('RMSE')
plt.savefig('fig/snelson_stream-RMSE.pdf', transparent=True)
plot('NLPD')
plt.savefig('fig/snelson_stream-NLPD.pdf', transparent=True)
```
#### File: j-friedrich/neuronalGPR/net.py
```python
import warnings
warnings.filterwarnings("ignore")
import math
from scipy.special import logsumexp
import numpy as np
from keras.regularizers import l2
from keras import Input
from keras.layers import Dropout
from keras.layers import Dense
from keras import Model
import time
class net:
def __init__(self, X_train, y_train, n_hidden, n_epochs = 40,
normalize = False, tau = 1.0, dropout = 0.05):
"""
Constructor for the class implementing a Bayesian neural network
trained with the probabilistic back propagation method.
@param X_train Matrix with the features for the training data.
@param y_train Vector with the target variables for the
training data.
@param n_hidden Vector with the number of neurons for each
hidden layer.
@param n_epochs Numer of epochs for which to train the
network. The recommended value 40 should be
enough.
@param normalize Whether to normalize the input features. This
is recommended unles the input vector is for
example formed by binary features (a
fingerprint). In that case we do not recommend
to normalize the features.
@param tau Tau value used for regularization
@param dropout Dropout rate for all the dropout layers in the
network.
"""
# We normalize the training data to have zero mean and unit standard
# deviation in the training set if necessary
if normalize:
self.std_X_train = np.std(X_train, 0)
self.std_X_train[ self.std_X_train == 0 ] = 1
self.mean_X_train = np.mean(X_train, 0)
else:
self.std_X_train = np.ones(X_train.shape[ 1 ])
self.mean_X_train = np.zeros(X_train.shape[ 1 ])
X_train = (X_train - np.full(X_train.shape, self.mean_X_train)) / \
np.full(X_train.shape, self.std_X_train)
self.mean_y_train = np.mean(y_train)
self.std_y_train = np.std(y_train)
y_train_normalized = (y_train - self.mean_y_train) / self.std_y_train
y_train_normalized = np.array(y_train_normalized, ndmin = 2).T
# We construct the network
N = X_train.shape[0]
batch_size = 128
lengthscale = 1e-2
reg = lengthscale**2 * (1 - dropout) / (2. * N * tau)
inputs = Input(shape=(X_train.shape[1],))
inter = Dropout(dropout)(inputs, training=True)
inter = Dense(n_hidden[0], activation='relu', W_regularizer=l2(reg))(inter)
for i in range(len(n_hidden) - 1):
inter = Dropout(dropout)(inter, training=True)
inter = Dense(n_hidden[i+1], activation='relu', W_regularizer=l2(reg))(inter)
inter = Dropout(dropout)(inter, training=True)
outputs = Dense(y_train_normalized.shape[1], W_regularizer=l2(reg))(inter)
model = Model(inputs, outputs)
model.compile(loss='mean_squared_error', optimizer='adam')
# We iterate the learning process
start_time = time.time()
model.fit(X_train, y_train_normalized, batch_size=batch_size, nb_epoch=n_epochs, verbose=0)
self.model = model
self.tau = tau
self.running_time = time.time() - start_time
# We are done!
def predict(self, X_test, y_test, T=10000):
"""
Function for making predictions with the Bayesian neural network.
@param X_test The matrix of features for the test data
@return m The predictive mean for the test target variables.
@return v The predictive variance for the test target
variables.
@return v_noise The estimated variance for the additive noise.
"""
X_test = np.array(X_test, ndmin = 2)
y_test = np.array(y_test, ndmin = 2).T
# We normalize the test set
X_test = (X_test - np.full(X_test.shape, self.mean_X_train)) / \
np.full(X_test.shape, self.std_X_train)
# We compute the predictive mean and variance for the target variables
# of the test data
model = self.model
standard_pred = model.predict(X_test, batch_size=500, verbose=1)
standard_pred = standard_pred * self.std_y_train + self.mean_y_train
rmse_standard_pred = np.mean((y_test.squeeze() - standard_pred.squeeze())**2.)**0.5
Yt_hat = np.array([model.predict(X_test, batch_size=500, verbose=0) for _ in range(T)])
Yt_hat = Yt_hat * self.std_y_train + self.mean_y_train
MC_pred = np.mean(Yt_hat, 0)
rmse = np.mean((y_test.squeeze() - MC_pred.squeeze())**2.)**0.5
# We compute the test log-likelihood
if Yt_hat.size > 5e8:
ll = (np.log(np.sum([np.sum(np.exp(-0.5 * self.tau *
(y_test[None] - Yt_hat[i*1000:(i+1)*1000])**2.), 0)
for i in range((T-1)//1000+1)], 0)) - np.log(T)
- 0.5*np.log(2*np.pi) + 0.5*np.log(self.tau))
else:
ll = (logsumexp(-0.5 * self.tau * (y_test[None] - Yt_hat)**2., 0) - np.log(T)
- 0.5*np.log(2*np.pi) + 0.5*np.log(self.tau))
test_ll = np.mean(ll)
# We are done!
return rmse_standard_pred, rmse, test_ll
```
|
{
"source": "jfrob27/pywavan",
"score": 3
}
|
#### File: pywavan/pywavan/edges.py
```python
import numpy as np
def apodize(ny, nx, radius):
"""
Create edges apodization tapper
Parameters
----------
nx, ny : integers
size of the tapper
radius : float
radius must be lower than 1 and greater than 0.
Returns
-------
tapper : numpy array ready to multiply on your image
to apodize edges
"""
if (radius >= 1) or (radius <= 0.):
print('Error: radius must be lower than 1 and greater than 0.')
return
ni = np.fix(radius*nx)
dni = int(nx-ni)
nj = np.fix(radius*ny)
dnj = int(ny-nj)
tap1d_x = np.ones(nx)
tap1d_y = np.ones(ny)
tap1d_x[0:dni] = (np.cos(3. * np.pi/2. + np.pi/2.* (1.* np.arange(dni)/(dni-1)) ))
tap1d_x[nx-dni:] = (np.cos(0. + np.pi/2. * (1.* np.arange(dni)/(dni-1)) ))
tap1d_y[0:dnj] = (np.cos(3. * np.pi/2. + np.pi/2. * (1.* np.arange( dnj )/(dnj-1)) ))
tap1d_y[ny-dnj:] = (np.cos(0. + np.pi/2. * (1.* np.arange(dnj)/(dnj-1)) ))
tapper = np.zeros((ny, nx))
for i in range(nx):
tapper[:,i] = tap1d_y
for i in range(ny):
tapper[i,:] = tapper[i,:] * tap1d_x
return tapper
###############################################
def padding(input, y, x):
width = input.shape[1]
height = input.shape[0]
output = np.zeros((y,x))
xpos = np.int(x/2 - width/2)
ypos = np.int(y/2 - height/2)
output[ypos:height+ypos,xpos:width+xpos] = input
return output
###############################################
def depad(input, y, x):
width = input.shape[1]
height = input.shape[0]
output = np.zeros((y,x))
xpos = np.int(width/2 - x/2)
ypos = np.int(height/2 - y/2)
output = input[ypos:y+ypos,xpos:x+xpos]
return output
```
#### File: pywavan/pywavan/steer_trans.py
```python
import numpy as np
from astropy.io import fits
from .wavan import uv_plane
def steer_trans(im,Nite=2):
#figfile = '/Users/robitaij/postdoc/Herschel/W43_density_galcut_nan.fits'
#HDU = fits.open(figfile)
#im = HDU[0].data
#Nite = 6
N=11 #Number of directions (odd)
#--------------------Definitions----------------------#
na = im.shape[1]
nb = im.shape[0]
x, y, shiftx, shifty, ishiftx, ishifty = uv_plane(na, nb)
x *= na/2.
y *= nb/2.
rt = np.sqrt(x**2.+y**2.)
x[x == 0] = 1
theta = np.arctan(y/x)
del x, y
#-----------------------------------------
#Angular function
#-----------------------------------------
Gk = np.zeros((N,nb,na))
Gkt = np.zeros((nb,na))
for j in range(N):
AA = 2**(N-1) * np.math.factorial(N-1)/np.sqrt(np.float(N*np.math.factorial(2*(N-1))))
#Condition A
#-------------------
ca = np.where(theta-(np.pi*j)/N < np.pi/2.)
Gkt[ca] = AA * np.cos( theta[ca] - (np.pi*j)/N )**(N-1)
#Condition B
#-------------------
cb = np.where(theta-(np.pi*j)/N >= np.pi/2)
Gkt[cb] = 0.
#Delete reflection
#-------------------
if (j >= 0) & (j <= 2):
Gkt[0:nb,0:na/2] = 0.
if (j > 2) and (j < 9):
Gkt[0:nb/2,0:na] = 0.
if j >= 9:
Gkt[0:nb,(na/2)-1:na] = 0.
#-------------------
Gk[j,:,:] = Gkt
Gkt=0
#-----------------------------------------
#First division uv-plan
#-----------------------------------------
H0 = np.zeros((nb,na))
L0 = np.zeros((nb,na))
max = np.pi/2.
r = rt*max/(na/2.)
#Condition A
#-------------------
ca = np.where((r > np.pi/4.) & (r < np.pi/2.))
H0[ca] = np.cos( np.pi/2. * np.log(2. * r[ca] / np.pi)/np.log(2.) )
L0[ca] = 2.*np.cos( np.pi/2. * np.log(4. * r[ca] / np.pi)/np.log(2.) )
#Condition B
#-------------------
cb = np.where(r <= np.pi/4.)
H0[cb] = 0.
L0[cb] = 2.
#Condition C
#-------------------
cc = np.where(r >= np.pi/2.)
H0[cc] = 1.
L0[cc] = 0.
#-----------------------------------------
#Iterative division uv-plan
#-----------------------------------------
Hi = np.zeros((Nite,nb,na))
Li = np.zeros((Nite,nb,na))
Hit = np.zeros((nb,na))
Lit = np.zeros((nb,na))
for ite in range(Nite):
max = np.pi/2.
r = (rt*max)/(na/(2.**(ite+1.)))
#Condition A
#-------------------
ca = np.where((r > np.pi/4.) & (r < np.pi/2.))
Hit[ca] = np.cos( np.pi/2. * np.log(2. * r[ca] / np.pi)/np.log(2.) )
Lit[ca] = 2.*np.cos( np.pi/2. * np.log(4. * r[ca] / np.pi)/np.log(2.) )
#Condition B
#-------------------
cb = np.where(r <= np.pi/4)
Hit[cb] = 0.
Lit[cb] = 2.
#Condition C
#-------------------
cc = np.where(r >= np.pi/2)
Hit[cc] = 1.
Lit[cc] = 0.
#-------------------
Li[ite,:,:] = Lit
Hi[ite,:,:] = Hit
Lit = Lit * 0.
Hit = Hit * 0.
#-----------------------------------------
#Wavelet transform
#-----------------------------------------
steer = np.zeros((Nite,N,nb,na), dtype=complex)
#High-pass filter
#--------------------
uvWav = np.copy(H0)
imFT = np.fft.fft2(im)
uvplan = np.roll(uvWav,int(ishiftx), axis=1)
uvplan = np.roll(uvplan,int(ishifty), axis=0)
WimFT = imFT*np.conj(uvplan)
Him = np.fft.ifft2(WimFT)
#Scaling filter
#--------------------
Scal = Li[Nite-1,:,:]
uvWav = Scal/2.
uvplan = np.roll(uvWav,int(ishiftx), axis=1)
uvplan = np.roll(uvplan,int(ishifty), axis=0)
WimFT = imFT * np.conj(uvplan)
Sclim = np.fft.ifft2(WimFT)
#Steerable wavelets
#--------------------
for ite in range(Nite):
for j in range(N):
if ite == 0:
Bp = Hi[0,:,:] * L0/2.
else:
Bp = Hi[ite,:,:] * Li[ite-1,:,:]
uvWav = Bp * Gk[j,:,:]
uvplan = np.roll(uvWav,int(ishiftx), axis=1)
uvplan = np.roll(uvplan,int(ishifty), axis=0)
WimFT = imFT * np.conj(uvplan)
steer[ite,j,:,:] = np.fft.ifft2(WimFT)
return steer,Him,Sclim
```
|
{
"source": "jfroche/groovylint",
"score": 2
}
|
#### File: jfroche/groovylint/fetch_jars.py
```python
import argparse
import logging
import os
import zipfile
import requests
def download_file(url, output_dir, force=False):
"""Download a file from a URL to the download directory."""
output_file_name = url.split('/')[-1]
output_file_path = os.path.join(output_dir, output_file_name)
if force:
try:
os.remove(output_file_path)
except FileNotFoundError:
pass
elif os.path.exists(output_file_path):
logging.debug('%s already exists, skipping download', output_file_path)
return output_file_path
logging.debug('Downloading %s to %s', url, output_file_path)
response = requests.get(url, stream=True)
response.raise_for_status()
with open(output_file_path, mode='wb') as output_file:
for chunk in response.iter_content(chunk_size=256):
output_file.write(chunk)
logging.info('Downloaded %s', output_file_name)
return output_file_path
def fetch_jars(args):
"""Fetch JAR file dependencies."""
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
jar_urls = [
(
'https://github.com/CodeNarc/CodeNarc/releases/download'
f'/v{args.codenarc_version}/CodeNarc-{args.codenarc_version}.jar'
),
(
'https://github.com/dx42/gmetrics/releases/download'
f'/v{args.gmetrics_version}/GMetrics-{args.gmetrics_version}.jar'
),
(
f'https://repo1.maven.org/maven2/org/slf4j/slf4j-api/{args.slf4j_version}'
f'/slf4j-api-{args.slf4j_version}.jar'
),
(
f'https://repo1.maven.org/maven2/org/slf4j/slf4j-simple/{args.slf4j_version}'
f'/slf4j-simple-{args.slf4j_version}.jar'
),
]
for url in jar_urls:
verify_jar(download_file(url, args.output_dir, args.force))
def parse_args():
"""Parse arguments from the command line."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--codenarc-version', help='Version of CodeNarc to download.', required=True
)
arg_parser.add_argument(
'--gmetrics-version', help='Version of GMetrics to download.', required=True
)
arg_parser.add_argument(
'--slf4j-version', help='Version of SLF4J to download.', required=True
)
arg_parser.add_argument(
'-f',
'--force',
action='store_true',
help='Download JAR files regardless of whether or not they already exist.',
)
arg_parser.add_argument(
'-o',
'--output-dir',
default=os.path.abspath(os.path.curdir),
help='Directory to save JAR files to.',
)
arg_parser.add_argument(
'-v', '--verbose', action='store_true', help='Show verbose output.'
)
args = arg_parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level)
return args
def verify_jar(file_path):
"""Verify that a file is a valid JAR file."""
logging.debug('Verifying %s', file_path)
with zipfile.ZipFile(file_path, 'r') as jar_file:
if 'META-INF/MANIFEST.MF' not in jar_file.namelist():
raise ValueError(f'{file_path} does not appear to be a valid JAR')
if __name__ == '__main__':
fetch_jars(parse_args())
```
|
{
"source": "jfroejk/cartridge_quickpay",
"score": 2
}
|
#### File: jfroejk/cartridge_quickpay/admin.py
```python
import django
from django.core.urlresolvers import reverse
from django.contrib import admin
from .models import QuickpayPayment
try:
from cartridge_subscription.models import Subscription, SubscriptionPeriod
except ImportError:
Subscription, SubscriptionPeriod = None, None
class QuickpayPaymentAdmin(admin.ModelAdmin):
list_display = ['qp_id', 'shop_order', 'requested_amount', 'requested_currency', 'accepted',
'state', 'balance', 'accepted_date', 'captured_date', 'test_mode']
list_select_related = ('order',)
search_fields = ['order__username', 'order__reference', 'order__billing_detail_email',
'order__membership_id', 'qp_id']
list_filter = ['state', 'accepted_date', 'accepted', 'test_mode']
readonly_fields = ['qp_id', 'shop_order', 'requested_amount', 'requested_currency', 'accepted', 'test_mode',
'type', 'text_on_statement', 'acquirer', 'state', 'balance',
'last_qp_status', 'last_qp_status_msg', 'last_aq_status', 'last_aq_status_msg',
'accepted_date', 'captured_date']
def has_add_permission(self, request):
return False
def shop_order(self, item: QuickpayPayment):
from cartridge.shop.models import Order
order_id = item.order_id
if order_id is not None:
admin_url = reverse("admin:%s_%s_change"
% (Order._meta.app_label, Order._meta.model_name), args=(order_id,))
return "<a href='{}'>{}</a>".format(admin_url, order_id)
else:
return "-"
shop_order.allow_tags = True
def subscription(self, item: QuickpayPayment):
try:
subscription_id = item.order.subscriptionperiod.subscription_id
except (AttributeError, SubscriptionPeriod.DoesNotExist):
subscription_id = None
if subscription_id is not None:
admin_url = reverse(
"admin:%s_%s_change"
% (Subscription._meta.app_label, Subscription._meta.model_name), args=(subscription_id,))
return "<a href='{}'>{}</a>".format(admin_url, subscription_id)
else:
return "-"
subscription.allow_tags = True
if Subscription is not None:
QuickpayPaymentAdmin.list_select_related = QuickpayPaymentAdmin.list_select_related + ('order__subscriptionperiod',)
QuickpayPaymentAdmin.list_display[2:2] = ['subscription']
admin.site.register(QuickpayPayment, QuickpayPaymentAdmin)
```
#### File: jfroejk/cartridge_quickpay/middleware.py
```python
from django.http import HttpRequest, HttpResponse
from django.utils.deprecation import MiddlewareMixin
from typing import Optional
import logging
class QuickpayMiddleware(MiddlewareMixin):
def process_view(self, request: HttpRequest, view_func, view_args, view_kwargs) -> Optional[HttpResponse]:
from cartridge.shop.views import checkout_steps
from cartridge.shop.checkout import CHECKOUT_STEP_FIRST
logging.debug("Quickpay.process_view: method={}, at checkout={}, step={}"
.format(request.method, view_func is checkout_steps, request.POST.get('step', 0)))
step_str = request.POST.get('step', '0')
step = int(step_str) if step_str.isdigit() else 0
if (request.method == 'POST'
and view_func is checkout_steps
and step == CHECKOUT_STEP_FIRST):
logging.debug("Quickpay.process_view: Making QP checkout view")
from .views import quickpay_checkout
return quickpay_checkout(request)
else:
return None
```
#### File: jfroejk/cartridge_quickpay/models.py
```python
import logging
from decimal import Decimal
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.db.models.signals import post_delete
from django.utils.timezone import now
from django.conf import settings
from cartridge.shop.models import Order, OrderItem, Product
from cartridge.shop.checkout import CheckoutError
from cartridge.shop import fields
from quickpay_api_client import QPClient
from quickpay_api_client.exceptions import ApiError
from datetime import datetime
try:
from typing import Optional
except ImportError:
Optional = None
__author__ = '<EMAIL>'
def quickpay_client(currency: Optional[str] = None) -> QPClient:
"""Get QuickPay client proxy object"""
secret = ":{0}".format(get_api_key(currency))
return QPClient(secret)
def get_api_key(currency: Optional[str] = None) -> str:
"""Get API key for the agreement for the given currency"""
try:
return settings.QUICKPAY_API_KEY
except AttributeError:
raise ImproperlyConfigured("QUICKPAY_API_KEY missing or empty in settings")
def get_private_key(currency: Optional[str] = None) -> str:
"""Get private key for the agreement for the given currency"""
try:
return settings.QUICKPAY_PRIVATE_KEY
except AttributeError:
raise ImproperlyConfigured("QUICKPAY_PRIVATE_KEY missing or empty in settings")
class QuickpayPayment(models.Model):
order = models.ForeignKey(Order, editable=False)
# When an order is deleted, associated payments are deleted. If possible, they are cancelled in Quickpay
# by post_delete handler
# Uses integer for requested_amount because QuickPay operates in minor units (cents, øre)
requested_amount = models.IntegerField(editable=False,
help_text="Requested amount in minor unit, e.g. cent. "
"NB: for subscriptions this is the period amount with tax. "
"The captured amount may be smaller if the previous period was (partly) refunded." ) # type: int
requested_currency = models.CharField(max_length=3, editable=False) # type: str
card_last4 = models.CharField(max_length=4, editable=False, help_text="Last 4 digits of card number") # type: str
qp_id = models.IntegerField(
null=True, db_index=True, editable=False, help_text="ID of Payment in Quickpay") # type: int
accepted = models.BooleanField(default=False, editable=False) # type: bool
test_mode = models.BooleanField(default=True, editable=False) # type: bool
type = models.CharField(null=True, max_length=31, editable=False) # type: str
text_on_statement = models.TextField(null=True, editable=False) # type: str
acquirer = models.CharField(null=True, max_length=31, editable=False) # type: str
state = models.CharField(null=True, max_length=31, editable=False) # type: str
balance = models.IntegerField(null=True, editable=False,
help_text="Captured amount in minor unit, e.g. cent") # type: int
last_qp_status = models.CharField(null=True, max_length=31, editable=False,
help_text="Last status code from Quickpay") # type: str
last_qp_status_msg = models.CharField(null=True, max_length=255, editable=False,
help_text="Last status message from Quickpay") # type: str
last_aq_status = models.CharField(null=True, max_length=31, editable=False,
help_text="Last status code from acquirer") # type: str
last_aq_status_msg = models.CharField(null=True, max_length=255, editable=False,
help_text="Last status message from acquirer") # type: str
accepted_date = models.DateTimeField(null=True, editable=False) # type: datetime
captured_date = models.DateTimeField(null=True, editable=False) # type: datetime
# Only known if the payment has been captured through cartridge_quickpay. Unknown if autocaptured
class Meta:
ordering = ['order']
@classmethod
def create_card_payment(cls, order: Order, amount: Decimal, currency: str, card_last4: str) -> 'QuickpayPayment':
"""Create new payment attempt for Order. Fail if order already paid
# Args:
order : Order = Order to pay
amount : Decimal = Order amount
currency : string = The currency of the payment
card_last4 : string = Last 4 digits of card number
"""
assert isinstance(order, Order)
assert isinstance(amount, Decimal)
succeeded_payment = cls.objects.filter(order=order, accepted=True)
if succeeded_payment:
raise CheckoutError("Order already paid!")
int_amount = int(amount * 100)
res = cls.objects.create(order=order, requested_amount=int_amount,
requested_currency=currency, card_last4=card_last4, state='new')
return res
@classmethod
def get_order_payment(cls, order: Order, lock: bool = True) -> Optional['QuickpayPayment']:
"""Get the latest payment associated with the Order. Lock it for update.
Return None if no payment found"""
payments = order.quickpaypayment_set.all().order_by('-id')[:1]
if lock:
payments = payments.select_for_update()
return payments[0] if payments else None
@property
def is_accepted(self) -> bool:
return bool(self.accepted_date)
@property
def is_captured(self) -> bool:
return bool(self.captured_date)
@property
def may_capture(self) -> bool:
"""Whether payment may be captured"""
return bool(self.accepted_date and self.captured_date is None)
def capture(self, amount: 'Optional[Decimal]'=None) -> bool:
"""Capture this payment. May only capture once. Extra capture() calls have no effect.
TODO: this function hasn't been used much for newer versions of Quickpay. Needs test and correction.
# Args:
amount : Decimal | None = Decimal amount, default requested amount
# Returns bool = Whether capture succeeded.
SHOULD return an error code to tell the user what went wrong!
"""
assert amount is None or isinstance(amount, Decimal)
self.update_from_quickpay() # Make sure we have the latest data form QP
if amount is not None:
assert isinstance(amount, Decimal)
int_amount = min(self.requested_amount, int(amount * 100))
else:
int_amount = self.requested_amount
client = quickpay_client(self.requested_currency)
try:
client.post('/payments/%s/capture' % self.qp_id, **{'amount': int_amount})
# print("capture res", qp_res)
self.captured_date = now()
# Have to get object again, the returned object is with the old data
self.update_from_quickpay()
res = True
except ApiError as e:
logging.error("QuickPay API error: %s" % e.body)
res = False
self.save()
return res
def refund(self, amount: 'Optional[Decimal]'=None) -> bool:
"""Refund this payment
# Args:
amount : Decimal | None = Decimal amount, default captured amount
# Returns bool = Whether refund succeeded.
SHOULD return an error code to tell the user what went wrong!
"""
assert amount is None or isinstance(amount, Decimal)
self.update_from_quickpay() # Make sure we have the latest data form QP
if amount is not None:
assert isinstance(amount, Decimal)
int_amount = min(self.balance, int(amount * 100))
else:
int_amount = self.balance
client = quickpay_client(self.requested_currency)
try:
# print("Attempt to refund %d" % int_amount)
client.post('/payments/%s/refund' % self.qp_id, **{'amount': int_amount})
# print("RES", qp_res)
# Have to get object again, the returned object is with the old data
self.update_from_quickpay()
if self.balance == 0:
self.captured_date = None
res = True
except ApiError as e:
logging.error("QuickPay API error: %s" % e.body)
res = False
self.save()
return res
def update_from_quickpay(self):
"""Update data from QuickPay"""
if self.qp_id is None:
return
client = quickpay_client(self.requested_currency)
try:
qp_res = client.get('/payments/%s' % self.qp_id)
except ApiError as e:
logging.error("QuickPay API error: %s" % e.body)
return
# print("qp res=", qp_res)
self.update_from_res(qp_res)
def update_from_res(self, res: dict):
"""Update payment data from QuickPay result. Doesn't save"""
self.qp_id = res['id']
self.accepted = res['accepted']
self.test_mode = res['test_mode']
self.type = res['type']
self.text_on_statement = res['text_on_statement'] or ''
self.acquirer = res['acquirer']
self.state = res['state']
self.balance = res.get('balance', 0)
self.card_last4 = res.get('metadata', {}).get('last4', self.card_last4) or '9999'
operations = res.get('operations', [])
if operations:
last_op = operations[-1]
self.last_qp_status = last_op['qp_status_code']
self.last_qp_status_msg = last_op['qp_status_msg']
self.last_aq_status = last_op['aq_status_code']
self.last_aq_status_msg = last_op['aq_status_msg']
if self.accepted:
timestamp = now()
if not self.accepted_date:
self.accepted_date = timestamp
if self.state == 'processed' and not self.captured_date:
self.captured_date = timestamp
@receiver(post_delete, sender=QuickpayPayment)
def _quickpay_payment_post_delete(sender, instance: QuickpayPayment, **kwargs):
"""Delete payment link in Quickpay if it hasn't been accepted.
The payment itself cannot be deleted."""
from .payment import delete_payment_link
delete_payment_link(instance)
@receiver(post_delete, sender=Order)
def _order_post_delete_subscription(sender, instance: Order, **kwargs):
"""Delete subscription in Quickpay if order deleted before subscription activated/paid.
"""
# Applicable when subscription has been created in Quickpay but not in cartridge_subscription.
# cartridge_subscription.Subscription is created when the subscription has been paid!
from .payment import delete_order_subscription
delete_order_subscription(instance)
```
|
{
"source": "jfrog/jfrog-npm-tools",
"score": 3
}
|
#### File: package_checker/src/package_checker.py
```python
import json
import semver
import datetime
import subprocess
import click
import termcolor
from dateutil import parser
MAX_VERSIONS_BETWEEN = 10
MAX_PACKAGE_AGE = datetime.timedelta(days=14)
MIN_SURPRISE_AGE = datetime.timedelta(days=365)
class PackageVersionInfo:
def __init__(self, package_name):
self.package_name = package_name
res = subprocess.run(
f'npm view {self.package_name} time --json',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
check=True,
)
self.version_to_time = json.loads(res.stdout)
self.all_versions = [
semver.parse(ver, loose=True)
for ver in self.version_to_time.keys()
if ver not in ['modified', 'created']
]
def time_of_version(self, version):
return self.version_to_time[str(version)]
def find_latest_match(self, version, max_update_to):
newest_version = version
if max_update_to == 'minor':
for candidate in self.all_versions:
if candidate.major != version.major:
continue
if version.minor < candidate.minor:
newest_version = candidate
elif newest_version.minor == candidate.minor and newest_version.patch < candidate.patch:
newest_version = candidate
elif max_update_to == 'patch':
for candidate in self.all_versions:
if candidate.major != newest_version.major and candidate.minor != newest_version.minor:
continue
if newest_version.patch < candidate.patch:
newest_version = candidate
return newest_version
def version_between(self, first, second):
versions_between = 0
for version in self.all_versions:
if first.compare(version) == -1 and second.compare(version) == 1:
versions_between += 1
return versions_between
def closest_previous(self, version):
closest_previous = semver.parse('0.0.0', loose=True)
for candidate in self.all_versions:
if closest_previous.compare(candidate) == -1 and version.compare(candidate) == 1:
closest_previous = candidate
return closest_previous
class Version:
def __init__(self, package_name, version):
self.package_name = package_name
self.original_version = version
self.version = version
self.max_update_to = None
self.package_version_info = None
if version.startswith('^'):
self.max_update_to = 'minor'
elif version.startswith('~'):
self.max_update_to = 'patch'
if self.max_update_to:
self.version = version[1:]
self.semver = semver.parse(self.version, loose=True)
@click.group()
def cli():
pass
@cli.command()
@click.argument('package_name')
@click.argument('version')
def scan_single_package(package_name, version):
parsed_version = Version(package_name, version)
package_version_info = PackageVersionInfo(package_name)
newest_version = package_version_info.find_latest_match(parsed_version.semver, parsed_version.max_update_to)
versions_between = package_version_info.version_between(parsed_version.semver, newest_version)
if versions_between > MAX_VERSIONS_BETWEEN:
termcolor.cprint(
f'[Warning - {package_name}] There are {versions_between} versions between the pinned version and actual version that will be installed. That might be too much',
'red'
)
new_time = parser.parse(package_version_info.time_of_version(newest_version))
package_age = datetime.datetime.now(tz=datetime.timezone.utc) - new_time
if package_age < MAX_PACKAGE_AGE:
termcolor.cprint(
f'[Warning - {package_name}] Newest package {newest_version} age is {package_age}. It might be too new.',
'red'
)
penultimate = package_version_info.closest_previous(newest_version)
penultimate_time = parser.parse(package_version_info.time_of_version(penultimate))
penultimate_age = new_time - penultimate_time
if penultimate_age > MIN_SURPRISE_AGE:
termcolor.cprint(
f'[Warning - {package_name}] Package was recently updated (to {newest_version}) after a long time ({penultimate_age}. This might be a bad sign.',
'red'
)
if __name__ == '__main__':
cli()
```
|
{
"source": "jfrog/quickstart-jfrog-artifactory-eks",
"score": 2
}
|
#### File: source/xray-db/lambda_function.py
```python
from __future__ import print_function
from crhelper import CfnResource
import logging
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
logger = logging.getLogger(__name__)
# Initialise the helper, all inputs are optional, this example shows the defaults
helper = CfnResource(json_logging=False, log_level='DEBUG',
boto_level='CRITICAL', sleep_on_delete=120)
@helper.create
def create(event, context):
conn = None
try:
logger.info("Got Create. Connecting to db")
conn = psycopg2.connect(
dbname=event['ResourceProperties']['MasterDatabaseName'],
user=event['ResourceProperties']['MasterDatabaseUser'],
host=event['ResourceProperties']['MasterDatabaseHost'],
password=event['ResourceProperties']['MasterDatabasePassword']
)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
logger.info("Start Queries")
cur.execute(
f"CREATE USER {event['ResourceProperties']['XrayDatabaseUser']} WITH PASSWORD \'{event['ResourceProperties']['XrayDatabasePassword']}\';")
cur.execute(
f"GRANT {event['ResourceProperties']['XrayDatabaseUser']} to {event['ResourceProperties']['MasterDatabaseUser']};")
cur.execute(
f"CREATE DATABASE xraydb WITH OWNER={event['ResourceProperties']['XrayDatabaseUser']};")
cur.execute(
f"GRANT ALL PRIVILEGES ON DATABASE xraydb TO {event['ResourceProperties']['XrayDatabaseUser']};")
cur.close()
logger.info("End Queries")
except psycopg2.DatabaseError as e:
raise ValueError(e)
finally:
if conn:
conn.close()
@helper.update
@helper.delete
def noop(event, context):
pass
def handler(event, context):
helper(event, context)
```
|
{
"source": "jfromme/ieegpy",
"score": 3
}
|
#### File: jfromme/ieegpy/hbt_auth.py
```python
import hashlib
import base64
import datetime
import requests
import xml.etree.ElementTree as ET
from hbt_dataset import Dataset as DS
class Session:
""" Class representing Session on the platform """
#host = "localhost"
host = "www.ieeg.org"
#port = ":8886"
port = ""
method = 'https://'
username = ""
password = ""
def __init__(self, name, pwd):
self.username = name
self.password = <PASSWORD>)
def urlBuilder(self, path):
return Session.method + Session.host + Session.port + path
def _createWSHeader(self, path, httpMethod, query, payload):
dTime = datetime.datetime.now().isoformat()
sig = self._signatureGenerator(path, httpMethod, query, payload, dTime)
return {'username': self.username,
'timestamp': dTime,
'signature': sig,
'Content-Type': 'application/xml'}
def _signatureGenerator(self, path, httpMethod, query, payload, dTime):
""" Signature Generator, used to authenticate user in portal """
m = hashlib.sha256()
queryStr = ""
if len(query):
for k, v in query.items():
queryStr += k + "=" + str(v) + "&"
queryStr = queryStr[0:-1]
m.update(payload)
payloadHash = base64.standard_b64encode(m.digest())
toBeHashed = (self.username+"\n" +
self.password + "\n" +
httpMethod+"\n" +
self.host+"\n" +
path+"\n" +
queryStr+"\n" +
dTime+"\n" +
payloadHash)
m2 = hashlib.sha256()
m2.update(toBeHashed)
return base64.standard_b64encode(m2.digest())
def openDataset(self, name):
""" Returning a dataset object """
# Request location
getIDbySnapNamePath = "/services/timeseries/getIdByDataSnapshotName/"
# Create request content
httpMethod = "GET"
reqPath = getIDbySnapNamePath + name
payload = self._createWSHeader(reqPath, httpMethod, "", "")
url = Session.method + Session.host + Session.port + reqPath
r = requests.get(url, headers=payload, verify=False)
# Check response
print(r.text)
if r.status_code != 200:
raise ConnectionError('Cannot find Study,')
# Request location
getTimeSeriesStr = '/services/timeseries/getDataSnapshotTimeSeriesDetails/'
# Create request content
snapshotID = r.text
reqPath = getTimeSeriesStr + snapshotID
payload = self._createWSHeader(reqPath, httpMethod, "", "")
url = Session.method + Session.host + Session.port + reqPath
r = requests.get(url, headers=payload, verify=False)
# Return Habitat Dataset object
return DS(ET.fromstring(r.text), snapshotID, self)
def md5(userString):
""" Returns MD5 hashed string """
m = hashlib.md5()
m.update(userString)
return m.hexdigest()
class ConnectionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
```
|
{
"source": "jfrostburke/PyZOGY",
"score": 3
}
|
#### File: PyZOGY/test/mock_image_class.py
```python
import numpy as np
from astropy.io import fits
class MockImageClass(np.ndarray):
"""Creates a mock version of ImageClass for testing"""
def __new__(cls, image_filename='', psf_filename='', mask_filename=None, n_stamps=1, saturation=np.inf, variance=np.inf, shape=(50,50)):
raw_image, header = np.ones(shape), fits.Header()#fits.getdata(image_filename, header=True)
raw_psf = np.ones(shape)
mask = np.zeros(shape)
background_std, background_counts = np.ones(shape), np.zeros(shape)
image_data = np.ones(shape)
obj = np.asarray(image_data).view(cls)
obj.header = header
obj.raw_image = raw_image
obj.raw_psf = raw_psf
obj.background_std = background_std
obj.background_counts = background_counts
obj.image_filename = image_filename
obj.psf_filename = psf_filename
obj.saturation = saturation
obj.mask = mask
obj.psf = raw_psf
obj.zero_point = 1.
obj.variance = variance
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.raw_image = getattr(obj, 'raw_image', None)
self.header = getattr(obj, 'header', None)
self.raw_psf = getattr(obj, 'raw_psf', None)
self.background_std = getattr(obj, 'background_std', None)
self.background_counts = getattr(obj, 'background_counts', None)
self.image_filename = getattr(obj, 'image_filename', None)
self.psf_filename = getattr(obj, 'psf_filename', None)
self.saturation = getattr(obj, 'saturation', None)
self.mask = getattr(obj, 'mask', None)
self.psf = getattr(obj, 'psf', None)
self.zero_point = getattr(obj, 'zero_point', None)
self.variance = getattr(obj, 'variance', None)
```
|
{
"source": "j-frost/kata",
"score": 4
}
|
#### File: python/tests/character_test.py
```python
import unittest
from kata.character import Character
class TestCharacter(unittest.TestCase):
def test_character_is_class(self):
self.assertTrue(issubclass(Character, object))
def test_character_has_health(self):
character = Character()
self.assertTrue(hasattr(character, 'health'), 'character must have attribute "health"')
def test_new_character_has_starting_health(self):
character = Character()
self.assertTrue(character.health == 1000)
def test_character_has_level(self):
character = Character()
self.assertTrue(hasattr(character, 'level'), 'character must have attribute "level"')
def test_new_character_has_starting_level(self):
character = Character()
self.assertTrue(type(character.level) == int, 'character level must be an integer')
self.assertEqual(character.level, 1)
def test_character_has_function_is_alive(self):
character = Character()
self.assertTrue(hasattr(character, 'is_alive'), 'character must have attribute "is_alive"')
self.assertTrue(callable(character.is_alive), 'character "is_alive" attribute must be callable')
def test_new_character_is_alive(self):
character = Character()
self.assertTrue(character.is_alive())
def test_character_has_function_attack(self):
character = Character()
self.assertTrue(hasattr(character, 'attack'), 'character must have attribute "attack"')
self.assertTrue(callable(character.attack), 'character "attack" attribute must be callable')
def test_character_can_attack_another_character(self):
attacker = Character()
receiver = Character()
attacker.attack(receiver)
```
#### File: python/tests/leap_year_test.py
```python
import unittest
from kata.leap_year import is_leap_year
class TestIsLeapYear(unittest.TestCase):
def test_is_leap_year_is_a_function(self):
self.assertTrue(callable(is_leap_year))
def test_400_should_return_true(self):
self.assertTrue(is_leap_year(400))
def test_800_should_return_true(self):
self.assertTrue(is_leap_year(800))
def test_1_should_return_false(self):
self.assertFalse(is_leap_year(1))
def test_500_should_return_false(self):
self.assertFalse(is_leap_year(2500))
def test_4_should_return_true(self):
self.assertTrue(is_leap_year(4))
def test_600_should_return_false(self):
self.assertFalse(is_leap_year(2600))
def test_1700_should_return_false(self):
self.assertFalse(is_leap_year(1700))
def test_1800_should_return_false(self):
self.assertFalse(is_leap_year(1800))
def test_1900_should_return_false(self):
self.assertFalse(is_leap_year(1900))
def test_2100_should_return_false(self):
self.assertFalse(is_leap_year(2100))
def test_2008_should_return_true(self):
self.assertTrue(is_leap_year(2008))
def test_2012_should_return_true(self):
self.assertTrue(is_leap_year(2012))
def test_2016_should_return_true(self):
self.assertTrue(is_leap_year(2016))
def test_2017_should_return_false(self):
self.assertFalse(is_leap_year(2017))
def test_2018_should_return_false(self):
self.assertFalse(is_leap_year(2018))
def test_2019_should_return_false(self):
self.assertFalse(is_leap_year(2019))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JFRWKievits/Deep-Learning",
"score": 2
}
|
#### File: Deep-Learning/internal/math.py
```python
import jax
import jax.numpy as jnp
import jax.scipy as jsp
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def safe_trig_helper(x, fn, t=100 * jnp.pi):
return fn(jnp.where(jnp.abs(x) < t, x, x % t))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
def mse_to_psnr(mse):
"""Compute PSNR given an MSE (we assume the maximum pixel value is 1)."""
return -10. / jnp.log(10.) * jnp.log(mse)
def psnr_to_mse(psnr):
"""Compute MSE given a PSNR (we assume the maximum pixel value is 1)."""
return jnp.exp(-0.1 * jnp.log(10.) * psnr)
def compute_avg_error(psnr, ssim, lpips):
"""The 'average' error used in the paper."""
mse = psnr_to_mse(psnr)
dssim = jnp.sqrt(1 - ssim)
return jnp.exp(jnp.mean(jnp.log(jnp.array([mse, dssim, lpips]))))
def compute_ssim(img0,
img1,
max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
"""Computes SSIM from two images.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Args:
img0: array. An image of size [..., width, height, num_channels].
img1: array. An image of size [..., width, height, num_channels].
max_val: float > 0. The maximum magnitude that `img0` or `img1` can have.
filter_size: int >= 1. Window size.
filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering.
k1: float > 0. One of the SSIM dampening parameters.
k2: float > 0. One of the SSIM dampening parameters.
return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((jnp.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = jnp.exp(-0.5 * f_i)
filt /= jnp.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return jsp.signal.convolve2d(
z, f, mode='valid', precision=jax.lax.Precision.HIGHEST)
filt_fn1 = lambda z: convolve2d(z, filt[:, None])
filt_fn2 = lambda z: convolve2d(z, filt[None, :])
# Vmap the blurs to the tensor size, and then compose them.
num_dims = len(img0.shape)
map_axes = tuple(list(range(num_dims - 3)) + [num_dims - 1])
for d in map_axes:
filt_fn1 = jax.vmap(filt_fn1, in_axes=d, out_axes=d)
filt_fn2 = jax.vmap(filt_fn2, in_axes=d, out_axes=d)
filt_fn = lambda z: filt_fn1(filt_fn2(z))
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = jnp.maximum(0., sigma00)
sigma11 = jnp.maximum(0., sigma11)
sigma01 = jnp.sign(sigma01) * jnp.minimum(
jnp.sqrt(sigma00 * sigma11), jnp.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = jnp.mean(ssim_map, list(range(num_dims - 3, num_dims)))
return ssim_map if return_map else ssim
def linear_to_srgb(linear):
# Assumes `linear` is in [0, 1]. https://en.wikipedia.org/wiki/SRGB
eps = jnp.finfo(jnp.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (211 * jnp.maximum(eps, linear)**(5 / 12) - 11) / 200
return jnp.where(linear <= 0.0031308, srgb0, srgb1)
def srgb_to_linear(srgb):
# Assumes `srgb` is in [0, 1]. https://en.wikipedia.org/wiki/SRGB
eps = jnp.finfo(jnp.float32).eps
linear0 = 25 / 323 * srgb
linear1 = jnp.maximum(eps, ((200 * srgb + 11) / (211)))**(12 / 5)
return jnp.where(srgb <= 0.04045, linear0, linear1)
def learning_rate_decay(step,
lr_init,
lr_final,
max_steps,
lr_delay_steps=0,
lr_delay_mult=1):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1))
else:
delay_rate = 1.
t = jnp.clip(step / max_steps, 0, 1)
log_lerp = jnp.exp(jnp.log(lr_init) * (1 - t) + jnp.log(lr_final) * t)
return delay_rate * log_lerp
def sorted_piecewise_constant_pdf(key, bins, weights, num_samples, randomized):
"""Piecewise-Constant PDF sampling from sorted bins.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
bins: jnp.ndarray(float32), [batch_size, num_bins + 1].
weights: jnp.ndarray(float32), [batch_size, num_bins].
num_samples: int, the number of samples.
randomized: bool, use randomized samples.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
# Pad each weight vector (only if necessary) to bring its sum to `eps`. This
# avoids NaNs when the input is zeros or small, but has no effect otherwise.
eps = 1e-5
weight_sum = jnp.sum(weights, axis=-1, keepdims=True)
padding = jnp.maximum(0, eps - weight_sum)
weights += padding / weights.shape[-1]
weight_sum += padding
# Compute the PDF and CDF for each weight vector, while ensuring that the CDF
# starts with exactly 0 and ends with exactly 1.
pdf = weights / weight_sum
cdf = jnp.minimum(1, jnp.cumsum(pdf[..., :-1], axis=-1))
cdf = jnp.concatenate([
jnp.zeros(list(cdf.shape[:-1]) + [1]), cdf,
jnp.ones(list(cdf.shape[:-1]) + [1])
],
axis=-1)
# Draw uniform samples.
if randomized:
s = 1 / num_samples
u = jnp.arange(num_samples) * s
u += jax.random.uniform(
key,
list(cdf.shape[:-1]) + [num_samples],
maxval=s - jnp.finfo('float32').eps)
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u = jnp.minimum(u, 1. - jnp.finfo('float32').eps)
else:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
u = jnp.linspace(0., 1. - jnp.finfo('float32').eps, num_samples)
u = jnp.broadcast_to(u, list(cdf.shape[:-1]) + [num_samples])
# Identify the location in `cdf` that corresponds to a random sample.
# The final `True` index in `mask` will be the start of the sampled interval.
mask = u[..., None, :] >= cdf[..., :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[..., None], x[..., :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[..., None], x[..., -1:, None]), -2)
return x0, x1
bins_g0, bins_g1 = find_interval(bins)
cdf_g0, cdf_g1 = find_interval(cdf)
t = jnp.clip(jnp.nan_to_num((u - cdf_g0) / (cdf_g1 - cdf_g0), 0), 0, 1)
samples = bins_g0 + t * (bins_g1 - bins_g0)
return samples
```
|
{
"source": "JFryy/twist-moe-downloader",
"score": 2
}
|
#### File: twist-moe-downloader/twistdl/cli.py
```python
import re
import sys
from PyInquirer import prompt
from argparse import ArgumentParser
from pathlib2 import Path
from six.moves import filter
from six.moves import input
from six.moves import map
from six.moves import range
from tqdm import tqdm
from twistdl import TwistDL
class TwistDLCLI(object):
def __init__(self):
self.client = TwistDL()
self.base_path = Path('anime')
def main(self):
user_input = self.parse_args()
title, search_for_anime = self.parse_title(user_input.title)
if search_for_anime:
anime = self.choose_anime(title)
else:
anime = self.client.get_anime_by('slug_name', title)
if not anime:
print('Error: It looks like the title you entered is not found.')
exit(1)
episode_range = user_input.range
if not episode_range:
episode_range = self.choose_range(anime)
if not episode_range:
first_episode, last_episode = anime.first_episode.number, anime.last_episode.number
else:
first_episode, last_episode = self.validate_range(episode_range, anime)
sources = self.get_sources_from_range(first_episode, last_episode, anime)
if not user_input.directory:
path = self.get_path(anime)
else:
path = Path(user_input.directory)
path.mkdir(parents=True, exist_ok=True)
self.download_files(path, sources)
def get_path(self, anime):
path = Path('anime') / anime.slug_name
path.mkdir(parents=True, exist_ok=True)
return path
def download_files(self, path, sources):
for source in sources:
filename = path / '{} - {:0>2}.mp4'.format(source.anime.title, source.number).replace('/', '-')
downloader = self.client.download_stream(source.url, filename, 1024 * 1024)
def parse_title(self, title):
url_match = re.findall('twist.moe/a/([a-z0-9-]+)', title)
if url_match:
return url_match[0], False
return title, True
def choose_anime(self, query):
animes = self.client.search_animes(title=query, slug=query)
if not animes:
return None
questions = [
{
'type': 'list',
'name': 'title',
'message': 'Anime(s) found. Please choose a series to download:',
'choices': [{'name': anime.title, 'value': anime} for anime in animes]
}
]
answers = prompt(questions)
if 'title' not in answers:
exit()
return answers['title']
def choose_range(self, anime):
episode_begin, episode_end = anime.first_episode.number, anime.last_episode.number
episode_range = input(
'Episode selection between {}-{}. To download a range enter "1-5", for a single episode enter "5" or leave '
'it empty press "Enter" to download all episodes. \nInput: '.format(episode_begin, episode_end))
return episode_range
def validate_range(self, episode_range, anime):
if isinstance(episode_range, int) or '-' not in episode_range:
episode_number = self.validate_episode(episode_range, anime)
return episode_number, episode_number
else:
try:
episode_begin, episode_end = episode_range.split('-')
except ValueError:
print('"{}" does not match the range pattern "XX-YY"'.format(episode_range))
exit(1)
episode_begin = self.validate_episode(episode_begin, anime)
episode_end = self.validate_episode(episode_end, anime)
return episode_begin, episode_end
def validate_episode(self, episode, anime):
try:
episode = int(episode)
if not anime.episode(episode):
print('"{}" has no episode "{}"'.format(anime.title, episode))
exit(1)
return episode
except ValueError:
print('Episode range "{}" invalid'.format(episode))
exit(1)
def get_sources_from_range(self, first_episode, last_episode, anime):
episodes = list(range(first_episode, last_episode + 1))
return list(filter(lambda source: source.number in episodes, anime.sources))
def parse_args(self):
usage = 'python cli.py [title] [--range RANGE][--directory DIRECTORY] [-h]'
parser = ArgumentParser(
description='Twist-dl is a small python tool for downloading video contents of series available on the website '
'twist.moe locally! To download a list of particular series, enter a keyword of the series name. '
'i.e. "code geass" can be found by simply entering "code".',
usage=usage
)
parser.add_argument(
'title',
help='To download a particular series, use the series\'s url like so "https://twist.moe/a/made-in-abyss" '
'and to search for a series enter a part of its name as found in a twist.moe\'s url string like "fate".'
)
parser.add_argument(
'--directory',
dest='directory',
help='Directory path to save downloaded contents',
required=False,
default=None
)
parser.add_argument(
'--range',
dest='range',
help='Range of episodes to download. i.e. --range=1-24 or for a single episode --range=1',
required=False,
default=''
)
if len(sys.argv) < 2:
parser.print_help()
exit(1)
return parser.parse_args()
def main():
try:
cli = TwistDLCLI()
cli.main()
except (KeyboardInterrupt, EOFError):
print('Exit')
```
|
{
"source": "jfs60/Group-147-PartIA-Flood-Warning-System",
"score": 4
}
|
#### File: Group-147-PartIA-Flood-Warning-System/floodsystem/plot.py
```python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from floodsystem.analysis import polyfit
#Task 2F
def plot_water_level_with_fit(station, dates, levels, p):
plt.plot(dates,levels,'.',label =("Past measuremnts of water level"))
poly,x1 = polyfit(dates,levels,p)
x = matplotlib.dates.date2num(dates)
x_initial = np.linspace(x[0],x[-1],30)
plt.plot(x_initial,poly(x_initial - x1),label = "Polynomial for water level")
plt.title(station[0].name)
plt.ylim(0,max(levels)+0.05 )
plt.axhline(station[0].typical_range[0],label = "High and low typical range")
plt.axhline(station[0].typical_range[1])
plt.xlabel("Date and Time")
plt.ylabel("Relative Water Level")
plt.legend()
plt.show()
#upper typical range when relative water level = 1
#lower typical range when relaytive water level = 0
#Task 2E
def plot_water_levels(station, dates, levels):
"""Takes an input of dates and water levels and plots both on a graph.
Also shows the typical high and low for the station on the same graph.
Does not show the graph."""
# Plot
plt.plot(dates, levels, label="$water levels$")
# add lines of typical high and low
plt.axhline(station[0].typical_range[1], color='brown', label="$typical low$")
plt.axhline(station[0].typical_range[0], color='blue', label="$typical high$")
# Add axis labels, rotate date labels and add plot title
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45)
plt.title("Station: " + station[0].name)
plt.legend()
# Display stuff
plt.tight_layout() # This makes sure plot does not cut off date labels
plt.show()
```
#### File: jfs60/Group-147-PartIA-Flood-Warning-System/task2C.py
```python
from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level, stations_level_over_threshold
def run ():
stations = build_station_list()
update_water_levels(stations)
list = stations_highest_rel_level(stations, 9)
return(list)
stations_Task_2C = run()
print (stations_Task_2C)
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run()
```
|
{
"source": "jfsalcedo10/mda-kuwait",
"score": 3
}
|
#### File: components/scripts/sentiment_analysis.py
```python
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
from datetime import datetime
import os
import sys
import stanza
import pandas as pd
#----------------------------------------------------------------------------------------------------------------#
# Utils functions
# Function to clean an specific character (expand if you need to clean more)
def clean_txt(string):
string = string.replace(" -- ", ", ")
return string
#----------------------------------------------------------------------------------------------------------------#
# Functions to call the models
# Stanza
def stanza_fn (string, j, max_j):
now = datetime.now()
time = now.strftime("%H:%M:%S")
if j == 0:
print(f"Stanza starts working at {time}")
print(f"\rStanza working in speech {j} at {time}", end="") # This reprint the line in the same space
nlp = stanza.Pipeline('en', processors='tokenize, mwt, pos, lemma, depparse,sentiment',
use_gpu=False, verbose=False, pos_batch_size=3000)
doc = nlp(string)
doc_sent = []
for i, sentence in enumerate(doc.sentences):
doc_sent.append(sentence.sentiment)
result = (sum(doc_sent)/len(doc_sent)) - 1 # Change the reference
time = now.strftime("%H:%M:%S")
if j == max_j:
print(f"\nStanza finished working at {time}")
return result # [0 negative, 1 neutral, 2 positive] Now -1 negative, 0 neutral, 1 positive
# TextBlob (now with subjectivity parameter)
def textblob_fn (string, j, max_j, var="polr"): # Change polr to subj to get the subjectivity of the text
now = datetime.now()
time = now.strftime("%H:%M:%S")
if j == 0:
print(f"TextBlob starts working at {time}")
print(f"\rTextBlob working in speech {j} at {time}", end="") # This reprint the line in the same space
if var=="polr":
tb_speech = TextBlob(string)
result = round(tb_speech.polarity, 3)
elif var== "subj":
tb_speech = TextBlob(string)
result = round(tb_speech.subjectivity, 3)
else:
print("Check spelling. Only polr or subj")
result = np.nan
time = now.strftime("%H:%M:%S")
if j == max_j:
print(f"\nTextBlob finished working at {time}")
return result # -1 negative, 1 positive
# Vader
def vader_fn (string, j, max_j):
now = datetime.now()
time = now.strftime("%H:%M:%S")
if j == 0:
print(f"Vader starts working at {time}")
print(f"\rVader working in speech {j} at {time}", end="") # This reprint the line in the same space
analyser = SentimentIntensityAnalyzer()
score = analyser.polarity_scores(string)
result = score["compound"] # Author says that is the main statistic you need to see (-1 negative, 1 positive, between -0.05 and 0.05 neutral)
time = now.strftime("%H:%M:%S")
if j == max_j:
print(f"\nVader finished working at {time}")
return result
```
#### File: components/scripts/topic_classification.py
```python
import pandas as pd
import nltk
from nltk.corpus import wordnet
# code adapted from https://github.com/despiegj/goz39a/blob/mda_2022/textmining/nltk/normalisation.ipynb
# function to convert nltk tag to wordnet tag
def nltk_tag_to_wordnet_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
# code adapted from https://github.com/despiegj/goz39a/blob/mda_2022/textmining/nltk/normalisation.ipynb
def lemmatize_sentence(sentence, tokenizer, lemmatizer):
# tokenize the sentence and find the POS tag for each token
nltk_tagged = nltk.pos_tag(tokenizer.tokenize(sentence))
# tuple of (word, wordnet_tag)
wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)
lemmatized_sentence = []
for word, tag in wordnet_tagged:
if tag is None:
# if there is no available tag, append the token as is
lemmatized_sentence.append(word)
else:
# else use the tag to lemmatize the token
lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))
return " ".join(lemmatized_sentence)
def normalize_text(text, tokenizer, lemmatizer):
normalized_text = ""
for sentence in text:
normalized_text += " " + lemmatize_sentence(sentence, tokenizer, lemmatizer)
return normalized_text
def remove_stopwords(text, stop_words):
return " ".join([word for word in str(text).split() if word not in stop_words])
def classify_topics(lda_model, tf_matrix, titles):
classification = 100 * lda_model.transform(tf_matrix) # converted to percentages
cnames = ['Topic ' + str(i) for i in range(1, lda_model.n_components + 1)]
classification_df = pd.DataFrame(classification, columns=cnames)
classification_df["title"] = titles
return classification_df
def nb_topics_above_percentage(classification_df, percentage):
cname = "nb_topics_above_" + str(percentage) + "_percent"
select = classification_df.columns.str.startswith("Topic ")
classification_df.loc[:, cname] = classification_df.loc[:, select].gt(percentage).sum(axis=1)
def percentage_speeches_above_percentage(percentage, nb_speeches, classification_df, topics_df):
cname = 'percentage_speeches_above_' + str(percentage) + '_percent'
select = classification_df.columns.str.startswith("Topic ")
topics_df[cname] = classification_df.loc[:, select].gt(percentage).sum(axis=0) * 100 / nb_speeches
def sum_of_n_largest(n, classification_df):
cname = 'sum_of_' + str(n) + '_largest'
select = classification_df.columns.str.startswith("Topic ")
classification_df.loc[:, cname] = classification_df.loc[:, select].apply(
lambda row: row.nlargest(n).sum(), axis=1)
def sort_topics_per_speech(n_comp, classification_df):
for i in range(1, n_comp + 1):
cname = 'main_topic_' + str(i)
select = classification_df.columns.str.startswith("Topic ")
classification_df.loc[:, cname] = classification_df.loc[:, select].apply(
lambda row: row.nlargest(i).index.values[i - 1], axis=1)
select = classification_df.columns.str.startswith("Topic ")
classification_df.loc[:, cname + '_perc'] = classification_df.loc[:, select].apply(
lambda row: row.nlargest(i).values[i - 1], axis=1)
```
|
{
"source": "JFsanchezherrero/PhiSpy",
"score": 2
}
|
#### File: JFsanchezherrero/PhiSpy/PhiSpy.py
```python
import os
import sys
import re
import subprocess
import argparse
## get file path
toolDir = os.path.dirname(os.path.realpath(__file__)) + '/'
INSTALLATION_DIR = toolDir
sys.path.append(toolDir)
## load modules
from PhiSpy_tools import makeTest
from PhiSpy_tools import classification
from PhiSpy_tools import evaluation
from PhiSpy_tools import unknownFunction
#################################################
def call_phiSpy(organismPath, output_dir, trainingFlag, INSTALLATION_DIR, evaluateOnly, threshold_for_FN,
phageWindowSize, quietMode, keep):
sys.stderr.write("Running PhiSpy on " + organismPath + "\n")
if (not evaluateOnly):
if (quietMode == 0):
print ('Making Test Set... (need couple of minutes)')
my_make_test_flag = makeTest.call_make_test_set(organismPath, output_dir, INSTALLATION_DIR)
if (my_make_test_flag == 0):
print ('The input organism is too small to predict prophages. Please consider large contig (having at least 40 genes) to use PhiSpy.')
return
if (quietMode == 0):
print ('Start Classification Algorithm')
classification.call_classification(organismPath, output_dir, trainingFlag, phageWindowSize, INSTALLATION_DIR)
if (quietMode == 0):
print ('Done with classification Algorithm')
###### added in this version 2.2 #####
if (trainingFlag == 0):
if (quietMode == 0):
print ('As training flag is zero, considering unknown functions')
unknownFunction.consider_unknown(output_dir)
######################################
if (quietMode == 0):
print ('Start evaluation...')
evaluation.call_start_end_fix(output_dir, organismPath, INSTALLATION_DIR, threshold_for_FN, phageWindowSize)
if (quietMode == 0):
print ('Done!!!')
#################################################
def print_list():
printstr = ''
try:
f = open(INSTALLATION_DIR + "/data/trainingGenome_list.txt", "r")
except:
print ('cannot find list')
for line in f:
line = line.strip()
temp = re.split('\t', line)
if int(temp[3]) == 1:
printstr = printstr + temp[0] + ' ' + temp[2] + '\n'
print (printstr)
f.close()
#################################################
def start_propgram(argv):
## check Rscript is installed
args_parser = argparse.ArgumentParser(
description="phiSpy is a program for identifying prophages from among microbial genome sequences",
epilog="(c) 2008-2018 <NAME>, <NAME>, <NAME>, San Diego State University, San Diego, CA")
args_parser.add_argument('-t', '--training_set', default=0, type=int,
help='Choose a training set from the list of training sets.')
args_parser.add_argument('-l', '--list', type=bool, default=False, const=True, nargs='?',
help='List the available training sets and exit')
args_parser.add_argument('-c', '--choose', type=bool, default=False, const=True, nargs='?',
help='Choose a training set from a list (overrides -t)')
args_parser.add_argument('-e', '--evaluate', type=bool, default=False, const=True, nargs='?',
help='Run in evaluation mode -- does not generate new data, but reruns the evaluation')
args_parser.add_argument('-n', '--number', default=5, type=int,
help='Number of consecutive genes in a region of window size that must be prophage genes to be called')
args_parser.add_argument('-w', '--window_size', default=30, type=int,
help='Window size of consecutive genes to look through to find phages')
args_parser.add_argument('-i', '--input_dir', help='The input directory that holds the genome')
args_parser.add_argument('-o', '--output_dir', help='The output directory to write the results')
args_parser.add_argument('-qt', '--quiet', type=bool, default=False, const=True, nargs='?',
help='Run in quiet mode')
args_parser.add_argument('-k', '--keep', type=bool, default=False, const=True, nargs='?',
help='Do not delete temp files')
args_parser = args_parser.parse_args()
if (args_parser.list):
print_list()
sys.exit(0)
if not args_parser.input_dir and not args_parser.output_dir:
print(sys.argv[0] + " [-h for help] [-l to list training sets] OPTIONS")
print("Input and output directories are required")
sys.exit(0)
output_dir = args_parser.output_dir
organismPath = args_parser.input_dir
trainingFlag = args_parser.training_set
output_dir = output_dir.strip()
if output_dir[len(output_dir) - 1] != '/':
output_dir = output_dir + '/'
try:
f = open(output_dir + 'testing.txt', 'w')
except:
try:
os.makedirs(output_dir)
f = open(output_dir + 'testing.txt', 'w')
except:
print ("Cannot create the output directory or write file in the output directory", output_dir)
return
f.close()
os.system("rm " + output_dir + 'testing.txt')
organismPath = organismPath.strip()
if organismPath[len(organismPath) - 1] == '/':
organismPath = organismPath[0:len(organismPath) - 1]
try:
f_dna = open(organismPath + '/contigs', 'r')
f_dna.close()
except:
print ("Cannot open", organismPath + '/contigs')
return
try:
f = open(organismPath + '/Features/peg/tbl', 'r')
f.close()
except:
print ("Cannot open", organismPath + '/Features/peg/tbl')
return
try:
f = open(organismPath + '/assigned_functions', 'r')
f.close()
except:
print ("Cannot open", organismPath + '/assigned_functions')
# return
try:
f = open(organismPath + '/Features/rna/tbl', 'r')
f.close()
except:
print ("Cannot open", organismPath + '/Features/rna/tbl')
# return
if (args_parser.choose):
while (1):
print_list()
temp = raw_input(
"Please choose the number for a closely related organism we can use for training, or choose 0 if you don't know: ")
try:
trainingFlag = int(temp)
except:
continue
if trainingFlag < 0 or trainingFlag > 30:
continue
break
print ()
call_phiSpy(organismPath, output_dir, trainingFlag, INSTALLATION_DIR, args_parser.evaluate, args_parser.number,
args_parser.window_size, args_parser.quiet, args_parser.keep)
#################################################
## main
#################################################
if __name__== "__main__":
start_propgram(sys.argv)
```
#### File: PhiSpy/PhiSpy_tools/makeTest.py
```python
import re
import math
import string
import pprint
import sys
import array
#################################################
class ShannonScore:
def __init__(self, INSTALLATION_DIR):
# Create a hash of the kmers that points to the index of an array that holds the value
self._key_to_index = {}
self._values = array.array('i')
self.total = 0
try:
infile = open(INSTALLATION_DIR + 'data/mer_ORF_list.txt', 'r')
except:
sys.exit('ERROR: Cannot open data/mer_ORF_list.txt')
for line in infile:
line = line.strip()
self._values.append(0)
self._key_to_index[line] = len(self._values) - 1
def reset(self):
self.total = 0
self._values = array.array('i', b'\x00' * self._values.itemsize * len(self._values))
def addValue(self, seq):
mer = 12
seq = seq.strip().upper()
pos = 0
while (pos <= (len(seq) - mer)):
substr = seq[pos:pos + mer]
pos = pos + mer
if substr in self._key_to_index:
self._values[self._key_to_index[substr]] += 1
self.total += 1
def getSlope(self):
if self.total == 0:
return 0
H = 0.0
found_total = 0.0
for i in self._key_to_index:
p = float(self._values[self._key_to_index[i]]) / self.total
if (p > 0):
H = H + p * (math.log(p) / math.log(2))
found_total = found_total + self._values[self._key_to_index[i]]
H = -H
if H <= 0:
return 0
freq_found = found_total / float(self.total)
myslope = freq_found / H
return myslope
#################################################
def read_contig(organismPath):
try:
f_dna = open(organismPath + '/contigs', 'r')
except:
print('cant open contig file ', organismPath)
return ''
dna = {}
seq = ''
name = ''
for i in f_dna:
if i[0] == '>':
if len(seq) > 10:
dna[name] = seq
name = i.strip()
if ' ' in name:
temp = re.split(' ', name)
name = temp[0]
'''
if '_' in name:
temp = re.split('_',name)
name = temp[len(temp)-1]
else:
'''
name = name[1:len(name)]
seq = ''
else:
seq = seq + i.strip()
dna[name] = seq
f_dna.close()
return dna
#################################################
def my_sort(orf_list):
n = len(orf_list)
i = 0
while (i < n):
j = i + 1
while (j < n):
flag = 0
# direction for both
if orf_list[i]['start'] < orf_list[i]['stop']:
dir_i = 1
else:
dir_i = -1
if orf_list[j]['start'] < orf_list[j]['stop']:
dir_j = 1
else:
dir_j = -1
# check whether swap need or not
if dir_i == dir_j:
if orf_list[i]['start'] > orf_list[j]['start']:
flag = 1
else:
if dir_i == 1:
if orf_list[i]['start'] > orf_list[j]['stop']:
flag = 1
else:
if orf_list[i]['stop'] > orf_list[j]['start']:
flag = 1
# swap
if flag == 1:
temp = orf_list[i]
orf_list[i] = orf_list[j]
orf_list[j] = temp
j += 1
i += 1
return orf_list
#################################################
def complement(gene):
complements = str.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
rcseq = gene.translate(complements)[::-1]
return rcseq
#################################################
def find_all_median(x):
all_len = []
for i in x:
all_len.append((abs(x[i]['start'] - x[i]['stop'])) + 1)
return find_median(all_len)
#################################################
def find_median(all_len):
n = int(len(all_len) / 2)
all_len.sort()
if len(all_len) == n * 2:
return (all_len[n] + all_len[n - 1]) / float(2)
else:
return all_len[n]
#################################################
def find_avg_length(orf_list):
x = []
for i in orf_list:
x.append(abs(orf_list[i]['start'] - orf_list[i]['stop']))
return sum(x) / len(x)
#################################################
def find_atgc_skew(seq):
seq = seq.upper()
total_at = 0.0
total_gc = 0.0
# A G C T
scores = {
'A': [1.0, 0.0, 0.0, 0.0],
'T': [0.0, 0.0, 0.0, 1.0],
'G': [0.0, 1.0, 0.0, 0.0],
'C': [0.0, 0.0, 1.0, 0.0],
'R': [0.5, 0.5, 0.0, 0.0], #ag
'Y': [0.0, 0.0, 0.5, 0.5], #ct
'S': [0.0, 0.5, 0.5, 0.0], #gc
'W': [0.5, 0.0, 0.0, 0.5], #at
'K': [0.0, 0.5, 0.0, 0.5], #gt
'M': [0.5, 0.0, 0.5, 0.0], #ac
'B': [0.0, 0.3, 0.3, 0.3], #cgt
'D': [0.3, 0.3, 0.0, 0.3], #agt
'H': [0.3, 0.0, 0.3, 0.3], #act
'V': [0.3, 0.3, 0.3, 0.0], #acg
'N': [0.25, 0.25, 0.25, 0.25], #acgt
}
counts = [0, 0, 0, 0]
for base in seq:
if base not in scores:
sys.stderr.write("ERROR: found base " + base + " that is not in the iupac code. Skipped\n")
continue
for i,j in enumerate(scores[base]):
counts[i] += j
total_at = counts[0] + counts[3]
total_gc = counts[1] + counts[2]
if (total_at * total_gc) == 0:
sys.exit("a total of zero")
return float(counts[0]) / total_at, float(counts[3]) / total_at, float(counts[1]) / total_gc, float(counts[2]) / total_gc
#################################################
def find_avg_atgc_skew(orf_list, mycontig, dna):
a_skew = []
t_skew = []
g_skew = []
c_skew = []
for i in orf_list:
start = orf_list[i]['start']
stop = orf_list[i]['stop']
if start < stop:
bact = dna[mycontig][start - 1:stop]
else:
bact = dna[mycontig][stop - 1:start]
bact = bact[::-1]
bact = complement(bact)
if len(bact) < 3:
continue
xa, xt, xg, xc = find_atgc_skew(bact)
a_skew.append(xa)
t_skew.append(xt)
g_skew.append(xg)
c_skew.append(xc)
a = sum(a_skew) / len(a_skew)
t = sum(t_skew) / len(t_skew)
g = sum(g_skew) / len(g_skew)
c = sum(c_skew) / len(c_skew)
at = math.fabs(a - t)
gc = math.fabs(g - c)
return at, gc
######################################################################################
def make_set_test(organismPath, output_dir, window, INSTALLATION_DIR):
my_shannon_scores = ShannonScore(INSTALLATION_DIR)
all_orf_list = {}
try:
infile = open(organismPath + '/Features/peg/tbl', 'r')
except:
sys.exit('ERROR: Cannot open file ' + organismPath + '/Features/peg/tbl')
dna = read_contig(organismPath)
# open host/bact dna file which has a contig
for line in infile:
temp = re.split('\t', line.strip())
if ',' in temp[1]:
ttemp = re.split(',', temp[1])
temp[1] = ttemp[len(ttemp) - 1]
temp1 = re.split('_', temp[1])
contig = temp[1][:temp[1][:temp[1].rfind('_')].rfind('_')]
start = int(temp1[len(temp1) - 2])
stop = int(temp1[len(temp1) - 1])
# save info for sorting orf
if contig in all_orf_list:
x = len(all_orf_list[contig])
else:
x = 0
all_orf_list[contig] = {}
all_orf_list[contig][x] = {}
all_orf_list[contig][x]['start'] = start
all_orf_list[contig][x]['stop'] = stop
all_orf_list[contig][x]['peg'] = temp[0]
infile.close()
try:
outfile = open(output_dir + 'testSet.txt', 'w')
except:
sys.exit('ERROR: Cannot open file for writing: testSet.txt')
outfile.write('orf_length_med\tshannon_slope\tat_skew\tgc_skew\tmax_direction\n')
for mycontig in all_orf_list:
orf_list = my_sort(all_orf_list[mycontig])
######################
# avg_length = find_avg_length(orf_list)
all_median = find_all_median(orf_list)
avg_at_skew, avg_gc_skew = find_avg_atgc_skew(orf_list, mycontig, dna)
#####################
i = 0
# while i<len(orf_list)-window +1:
while (i < len(orf_list)):
# initialize
my_shannon_scores.reset()
length = []
direction = []
a_skew = []
t_skew = []
g_skew = []
c_skew = []
for j in range(i - int(window / 2), i + int(window / 2)):
if ((j < 0) or (j >= len(orf_list))):
continue
start = orf_list[j]['start']
stop = orf_list[j]['stop']
if start < stop:
bact = dna[mycontig][start - 1:stop]
direction.append(1) # direction
else:
bact = dna[mycontig][stop - 1:start]
bact = bact[::-1]
bact = complement(bact)
direction.append(-1) # direction
if len(bact) < 3:
print('Short Protein Found!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
j += 1
continue
# at skew
xa, xt, xg, xc = find_atgc_skew(bact)
a_skew.append(xa)
t_skew.append(xt)
g_skew.append(xg)
c_skew.append(xc)
# length
length.append(len(bact))
# shannon
my_shannon_scores.addValue(bact)
j += 1
# write in file for one window
mylength = find_median(length) - all_median # find_mean(length)
fileWriteStr = ''
fileWriteStr += str(mylength) + '\t'
fileWriteStr += str(my_shannon_scores.getSlope()) + '\t'
a = sum(a_skew) / len(a_skew)
t = sum(t_skew) / len(t_skew)
c = sum(c_skew) / len(c_skew)
g = sum(g_skew) / len(g_skew)
at = math.fabs(a - t) / avg_at_skew if avg_at_skew else 0
gc = math.fabs(g - c) / avg_gc_skew if avg_gc_skew else 0
fileWriteStr += str(at) + '\t'
fileWriteStr += str(gc) + '\t'
# orf direction
orf = []
x = 0
flag = 0
for ii in direction:
if (ii == 1):
if (flag == 0):
x += 1
else:
orf.append(x)
x = 1
flag = 0
else:
if (flag == 1):
x += 1
else:
if (flag < 1 and x > 0):
orf.append(x)
x = 1
flag = 1
orf.append(x)
orf.sort()
if len(orf) == 1:
fileWriteStr += str(orf[len(orf) - 1]) + '\n'
else:
fileWriteStr += str(orf[len(orf) - 1] + orf[len(orf) - 2]) + '\n'
outfile.write(fileWriteStr)
i += 1
outfile.close()
##################### function call #################################
def call_make_test_set(organismPath, output_dir, INSTALLATION_DIR):
window = 40
make_set_test(organismPath, output_dir, window, INSTALLATION_DIR)
# Check whether the output file has data. For shorter genomes (less that 40 genes) phiSpy will not work)
num_lines = sum(1 for line in open(output_dir + 'testSet.txt', 'r'))
if (num_lines > 0):
return 1
else:
return 0
```
#### File: PhiSpy/PhiSpy_tools/tab2seed.py
```python
import os
import sys
import argparse
#################################################
def parse_tab(filename, outputdir):
"""
Parse a patric tab file
:param filename: the file to parse
:return: ummm
"""
if not (os.path.exists(os.path.join(outputdir, "Features"))):
os.mkdir(os.path.join(outputdir, "Features"))
if not (os.path.exists(os.path.join(outputdir, "Features/peg"))):
os.mkdir(os.path.join(outputdir, "Features/peg"))
if not (os.path.exists(os.path.join(outputdir, "Features/rna"))):
os.mkdir(os.path.join(outputdir, "Features/rna"))
peg = open(os.path.join(outputdir, "Features/peg/tbl"), 'w')
rna = open(os.path.join(outputdir, "Features/rna/tbl"), 'w')
asf = open(os.path.join(outputdir, "assigned_functions"), 'w')
wrote_genome = False
with open(filename, 'r') as fin:
for l in fin:
if l.startswith('genome_id'):
continue
# genome_id genome_name accession annotation feature_type patric_id refseq_locus_tag alt_locus_tag
# uniprotkb_accession start end strand na_length gene product figfam_id plfam_id pgfam_id
# go ec pathway
l = l.replace("\n", "") # this is a hack because I can't figure out how to do chomp
p = l.split("\t")
if not wrote_genome:
with open(os.path.join(outputdir, "GENOME"), 'w') as gout:
gout.write("{}\n".format(p[1]))
wrote_genome = True
gid, name, acc, who, ftype, fid, refseq_locus, alt, uni, start, stop, strand, length, gene, prod, ffid, plid, pgid, go, ec, pw = p
if start > stop:
(start, stop) = (stop, start)
if "CDS" in p[4]:
peg.write("{}\t{}_{}_{}\n".format(fid, acc, start, stop))
asf.write("{}\t{}\n".format(fid, prod))
elif "rna" in p[4].lower():
rna.write("{}\t{}_{}_{}\n".format(fid, acc, start, stop))
peg.close()
rna.close()
asf.close()
#################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert a patric tab file to a minimal seed directory")
parser.add_argument('-f', help='The patric tab file', required=True)
parser.add_argument('-o', help='output directory', required=True)
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
if not os.path.exists(args.o):
os.mkdir(args.o)
parse_tab(args.f, args.o)
```
|
{
"source": "JFsanchezherrero/TFM_UOC_AMoya",
"score": 3
}
|
#### File: BacDup/scripts/format_checker.py
```python
import os
import sys
import re
from Bio import SeqIO
from BCBio import GFF
from builtins import str
from HCGB.functions.aesthetics_functions import debug_message
################################################################################
def is_fasta(filename, debug):
'''Fasta checker'''
with open(filename, "r") as handle:
fasta = SeqIO.parse(handle, "fasta")
## debug messages
if debug:
debug_message("Fasta file: ")
print (filename)
print (any(fasta))
return any(fasta)
################################################################################
def fasta_seq(filename, debug):
'''Fasta type checker'''
seqs = {"dna": re.compile("^[acgt]*$", re.I),
"protein": re.compile("^[acdefghiklmnpqrstvwy]*$", re.I)}
if is_fasta(filename, debug):
fasta_sequences = SeqIO.parse(open(filename), "fasta")
for fasta in fasta_sequences:
sequences = str(fasta.seq)
dna = seqs["dna"].search(sequences)
protein = seqs["protein"].search(sequences)
## debug messages
if debug:
debug_message("Type of fasta file: ")
print ('dna: ' + dna)
print ('protein: ' + protein)
return (dna, protein)
################################################################################
def is_gbk(filename, debug):
'''Genbank format file checker'''
## debug messages
if debug:
debug_message("genbank file?")
for rec in SeqIO.parse(filename, "genbank"):
if rec.id:
## debug messages
if debug:
print("True")
return (True)
else:
## debug messages
if debug:
print("False")
return (False)
################################################################################
def is_gff(filename, debug):
''' GFF file checker'''
## debug messages
if debug:
debug_message("GFF file?")
try:
with open(filename,"r") as handle:
limit_info = dict(gff_type=["CDS"])
for rec in GFF.parse(handle, limit_info = limit_info):
if rec.id:
## debug messages
if debug:
print("True")
return (True)
except:
## debug messages
if debug:
print("False")
return (False)
################################################################################
def is_format(filename, debug=False):
'''Determines input format'''
if debug:
debug_message("Format for file: " + filename)
## fasta
if is_fasta(filename, debug):
dna, protein = fasta_seq(filename, debug)
## DNA
if dna is not None:
if debug:
debug_message("fasta DNA")
return ("fasta DNA")
## Protein
elif protein is not None:
if debug:
debug_message("fasta protein" )
return ("fasta protein")
## GFF
if is_gff(filename, debug):
if debug:
debug_message("GFF")
return ("gff")
## genbank
if is_gbk(filename, debug):
if debug:
debug_message("GenBank")
return ("gbk")
else:
if debug:
debug_message("Sorry this file has not any recognizable format:")
print(os.path.splitext(filename))
return (False)
################################################################################
if __name__ == "__main__":
if len(sys.argv) != 2:
print (__doc__)
print ("## Usage format_checker")
print ("python %s file\n" %sys.argv[0])
sys.exit()
is_format(*sys.argv[1:], debug=True)
```
#### File: BacDup/scripts/gff_parser.py
```python
import sys
import os
import pandas as pd
import numpy as np
import HCGB
from Bio import SeqIO, Seq
from Bio.SeqRecord import SeqRecord
from BCBio import GFF
from BacDup.scripts.functions import columns_annot_table
##################################################
def gff_parser_caller(gff_file, ref_file, output_path, debug):
'''This function calls the actual gff parser
It serves as the entry point either from a module or system call
'''
## set output paths
prot_file = os.path.abspath( os.path.join(output_path, 'proteins.fa'))
csv_file = os.path.abspath( os.path.join(output_path, 'annot_df.csv'))
csv_length = os.path.abspath( os.path.join(output_path, 'length_df.csv'))
list_out_files = [prot_file, csv_file, csv_length]
try:
with open (ref_file) as in_handle:
ref_recs = SeqIO.to_dict(SeqIO.parse(in_handle, "fasta"))
## debug messages
if (debug):
debug_message('GenBank record', 'yellow')
print (ref_recs)
## parse
with open(prot_file, "w") as out_handle:
SeqIO.write(protein_recs(gff_file, ref_recs,
list_out_files, debug=debug), out_handle, "fasta")
## return information
return (list_out_files)
except:
return (False)
############################################################
def protein_recs(gff_file, ref_recs, list_out_files, debug=False):
'''GFF parser to retrieve proteins and annotation
'''
#create an empty dataframe.
columns = columns_annot_table() ## get common column names
annot_df = pd.DataFrame(data=None, columns=columns)
genome_length = pd.DataFrame(data=None, columns=["length"])
with open(gff_file) as in_handle:
##parse the output. Generate SeqRecord and SeqFeatures for predictions
##sort by CDS type. Duplicate genes analysis just needs coding regions to proteins.
limit_info = dict(gff_type=["CDS"])
for rec in GFF.parse(in_handle, limit_info = limit_info, base_dict=ref_recs):
#get genome length for BioCircos plotting
ID = rec.id
genome_length.loc[ID,["length"]]=[len(rec.seq)]
## debug messages
if (debug):
debug_message('GFF record', 'yellow')
print(rec)
for feature in rec.features:
## Debug messages
if (debug):
debug_message('feature: ', 'yellow')
print(feature)
## strand
if feature.strand == -1:
strand = "neg"
else:
strand = "pos"
#we create an ID for each entry
protID = feature.type + "_" + rec.id + "_" + str(feature.location.nofuzzy_start) + "_" + str(feature.location.nofuzzy_end) + "_" + strand
annot_df.loc[protID, ["rec_id", "type", "start", "end", "strand"]] = [ID, feature.type, feature.location.nofuzzy_start, feature.location.nofuzzy_end, strand]
qualif = feature.qualifiers
## Debug messages
if (debug):
debug_message('protID: ' + protID, 'yellow')
debug_message('qualif: ', 'yellow')
print (qualif)
## loop
for keys, values in qualif.items():
#fill the dataframe info
if keys == "Note":
continue
annot_df.loc[protID,[keys]] = [values[0]]
## get gene sequence
gene_seq = Seq.Seq(str(rec.seq[feature.location.nofuzzy_start:feature.location.nofuzzy_end]))
## Debug messages
if (debug):
debug_message('gene_seq: ' + protID, 'yellow')
print (gene_seq)
if feature.type == "CDS":
if feature.strand == -1:
gene_seq = gene_seq.reverse_complement()
# translate genome sequence
table_code = feature.qualifiers["transl_table"][0]
protein_seq = gene_seq.translate(table=table_code, to_stop=False)
# delete STOP symbols
# we set gene_seq.translate to include all stop codons to include
# stop codons in pseudogenes. then, we removed last symbol * for
# each sequence
if protein_seq.endswith("*"):
protein_seq = protein_seq[:-1]
yield(SeqRecord(protein_seq, protID, "", ""))
## print additional information
annot_df.to_csv(list_out_files[1], header=True)
genome_length.to_csv(list_out_files[2], header=True)
#get genome length for BioCircos plotting
#genome_length = pd.DataFrame(data=None, columns=["length"])
#ID = rec.id
#length = len(rec.seq)
#genome_length.loc[ID,["length"]]=[length]
#csv_length = "%s/%s_length.csv" % (output_path, rec.id)
#genome_length.to_csv(csv_length, header=True)
## debug messages
if (debug):
debug_message('annot_df: ', 'yellow')
print(annot_df)
## empty return
return()
#################################################################
def main (gff_file, ref_file, output_folder, debug=False):
#get name
base, ext = os.path.splitext(gff_file)
gff_file = os.path.abspath(gff_file)
#create folder
output_path = HCGB.functions.file_functions.create_folder(output_path)
if (debug):
print ("## DEBUG:")
print ("base:" , base)
print ("ext:" , ext)
print ()
gff_parser_caller(gff_file, ref_file, output_path, debug)
################################################################################
if __name__ == "__main__":
if len(sys.argv) != 4:
print (__doc__)
print ("## Usage gff_parser")
print ("python %s gff_file ref_fasta_file output_folder\n" %sys.argv[0])
sys.exit()
main(*sys.argv[1:], debug=True)
#main(*sys.argv[1:])
# la variable debug no es obligatoria. tiene un "por defecto definido"
# Se utiliza el "=" para indicar el default.
```
|
{
"source": "jfsantos/ift6266h14",
"score": 2
}
|
#### File: jfsantos/ift6266h14/gen_phone.py
```python
import cPickle as pickle
import numpy as np
import theano
_mean = 0.0035805809921434142
_std = 542.48824133746177
def gen_phone(mdl, phones, noise_level):
terr_monitor = mdl.monitor.channels['test_objective']
terr = min(terr_monitor.val_record)
X = theano.tensor.dmatrix('X')
P = theano.tensor.dmatrix('P')
y = mdl.fprop([X,P])
predict = theano.function([X, P], y)
# Let's start with a all zero vector, then use the prediction to populate the next sample
duration = 3
fs = 16000
frame_length = mdl.input_space.components[0].dim
x0 = np.asmatrix(np.zeros((1,duration*fs)))
# phones = np.load('test_phones.npy')
phone_code = np.asmatrix(np.zeros((duration*fs,3*62)))
for pi, p in enumerate(phones):
phone_code[pi,[p, p+62, p+2*62]] = 1 # code for 'aw'
for k in np.arange(frame_length,duration*fs):
frame = x0[:,k-frame_length:k]
x0[0,k] = np.random.normal(predict(frame + np.random.normal(0, noise_level[0]*np.sqrt(terr), frame.shape), phone_code[k]), noise_level[1]*np.sqrt(terr))
x0 = x0.T
x0_scaled = x0*_std + _mean
x0a = np.asarray(x0_scaled, dtype=np.int16)
return x0a
if __name__ == "__main__":
from sys import argv
from scipy.io import wavfile
mdl = pickle.load(open(argv[1]))
x0a = gen_phone(mdl)
wavfile.write(argv[2], 16000, x0a)
```
#### File: jfsantos/ift6266h14/mlp_with_source.py
```python
from pylearn2.models.mlp import MLP, CompositeLayer
from pylearn2.space import CompositeSpace
from theano.compat.python2x import OrderedDict
class MLPWithSource(MLP):
def __init__(self, *args, **kwargs):
self.input_source = kwargs.pop('input_source', 'features')
self.target_source = kwargs.pop('target_source', 'targets')
super(MLPWithSource, self).__init__(*args, **kwargs)
def get_input_source(self):
return self.input_source
def get_target_source(self):
return self.target_source
class CompositeLayerWithSource(CompositeLayer):
def get_input_source(self):
return tuple([layer.get_input_source() for layer in self.layers])
def get_target_source(self):
return tuple([layer.get_target_source() for layer in self.layers])
def set_input_space(self, space):
self.input_space = space
for layer, component in zip(self.layers, space.components):
layer.set_input_space(component)
self.output_space = CompositeSpace(tuple(layer.get_output_space()
for layer in self.layers))
def fprop(self, state_below):
return tuple(layer.fprop(component_state) for
layer, component_state in zip(self.layers, state_below))
def get_monitoring_channels(self):
return OrderedDict()
```
|
{
"source": "jfsantos/irasl2018",
"score": 2
}
|
#### File: jfsantos/irasl2018/main.py
```python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(42) # for reproducibility
from tqdm import tqdm
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from stft_dataset import STFTDataset
from residual import ResidualModel
from highway import HighwayModel
from masking import MaskingModel
from baseline import BaselineModel
from pytorch_utils import TrainLoop, load_checkpoint
def load_data(window_size, step_size, use_log):
print("Loading data...")
G_train = STFTDataset(window=window_size, step=step_size, use_log=use_log)
G_train.load_metadata_from_desc_file('ieee_reverb_only_train.json')
G_train.fit_stats()
G_val = STFTDataset(window=window_size, step=step_size, use_log=use_log)
G_val.load_metadata_from_desc_file('ieee_reverb_only_valid.json')
G_val.feats_mean = G_train.feats_mean
G_val.feats_std = G_train.feats_std
return G_train, G_val
def load_noisy_data(window_size, overlap, use_log):
print("Loading data...")
G_train = STFTDataset(window=window_size, step=overlap, use_log=use_log)
G_train.load_metadata_from_desc_file('ieee_noisy_train.json')
G_train.fit_stats()
G_val = STFTDataset(window=window_size, step=overlap, use_log=use_log)
G_val.load_metadata_from_desc_file('ieee_noisy_valid.json')
G_val.feats_mean = G_train.feats_mean
G_val.feats_std = G_train.feats_std
return G_train, G_val
def load_noisy_timit(window_size, overlap, use_log):
print("Loading data...")
G_train = STFTDataset(window=window_size, step=overlap, use_log=use_log)
G_train.load_metadata_from_desc_file('timit_noisy_train.json')
G_train.fit_stats()
G_val = STFTDataset(window=window_size, step=overlap, use_log=use_log)
G_val.load_metadata_from_desc_file('timit_noisy_valid.json')
G_val.feats_mean = G_train.feats_mean
G_val.feats_std = G_train.feats_std
return G_train, G_val
def load_reverb_timit(window_size, overlap, use_log):
print("Loading data...")
G_train = STFTDataset(window=window_size, step=overlap, use_log=use_log)
G_train.load_metadata_from_desc_file('timit_reverb_only_train.json')
G_train.fit_stats()
G_val = STFTDataset(window=window_size, step=overlap, use_log=use_log)
G_val.load_metadata_from_desc_file('timit_reverb_only_valid.json')
G_val.feats_mean = G_train.feats_mean
G_val.feats_std = G_train.feats_std
return G_train, G_val
def train_fn(model, optimizer, criterion, batch):
x, y, lengths = batch
x = Variable(x.cuda())
y = Variable(y.cuda(), requires_grad=False)
mask = Variable(torch.ByteTensor(x.size()).fill_(1).cuda(),
requires_grad=False)
for k, l in enumerate(lengths):
mask[:l, k, :] = 0
y_hat = model(x)
# Apply mask
y_hat.masked_fill_(mask, 0.0)
y.masked_fill_(mask, 0.0)
loss = criterion(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.data.item()
def valid_fn(model, criterion, batch):
x, y, lengths = batch
x = Variable(x.cuda(), volatile=True)
y = Variable(y.cuda(), requires_grad=False)
mask = Variable(torch.ByteTensor(x.size()).fill_(1).cuda(),
requires_grad=False)
for k, l in enumerate(lengths):
mask[:l, k, :] = 0
y_hat = model(x)
# Apply mask
y_hat.masked_fill_(mask, 0.0)
y.masked_fill_(mask, 0.0)
val_loss = criterion(y_hat, y).data.item()
return val_loss
if __name__ == '__main__':
from argparse import ArgumentParser
import os
from glob import glob
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=100)
parser.add_argument('--num_hidden', type=int, default=256)
parser.add_argument('--num_blocks', type=int, default=3)
parser.add_argument('--num_layers_per_block', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--model_type', default='residual')
parser.add_argument('--window_size', type=int, default=32)
parser.add_argument('--step_size', type=int, default=16)
parser.add_argument('--data_type', default='reverb')
parser.add_argument('--use_log', action='store_true')
parser.add_argument('checkpoint_path')
args = parser.parse_args()
try:
train_loop = load_checkpoint(args.checkpoint_path)
except ValueError:
print('No checkpoints, initializing a model from scratch...')
window_size = args.window_size # in ms
step_size = args.step_size
n_input = int(1e-3*window_size*16000/2 + 1)
n_output = n_input
if args.model_type == 'residual':
model = ResidualModel(n_input,
args.num_blocks,
args.num_hidden,
args.num_layers_per_block).cuda()
elif args.model_type == 'highway':
model = HighwayModel(n_input,
args.num_blocks,
args.num_hidden,
args.num_layers_per_block).cuda()
elif args.model_type == 'masking':
model = MaskingModel(n_input,
args.num_blocks,
args.num_hidden,
args.num_layers_per_block).cuda()
elif args.model_type == 'baseline':
model = BaselineModel(n_input,
args.num_hidden,
args.num_layers_per_block).cuda()
else:
raise ValueError('model_type has to be either "residual", "highway", or "baseline"')
print(model)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
if args.data_type == 'reverb':
print('Loading reverb dataset')
G_train, G_val = load_data(window_size, step_size, args.use_log)
elif args.data_type == 'noisy':
print('Loading noisy dataset')
G_train, G_val = load_noisy_data(window_size, step_size, args.use_log)
elif args.data_type == 'noisy_timit':
print('Loading noisy_timit dataset')
G_train, G_val = load_noisy_timit(window_size, step_size, args.use_log)
elif args.data_type == 'reverb_timit':
G_train, G_val = load_reverb_timit(window_size, step_size, args.use_log)
else:
raise ValueError('data_type has to be either "reverb" or "noisy"')
train_loader = DataLoader(G_train, batch_size=args.batch_size,
collate_fn=G_train.collate_samples,
num_workers=8, shuffle=True)
valid_loader = DataLoader(G_val, batch_size=args.batch_size,
collate_fn=G_train.collate_samples,
num_workers=4)
train_loop = TrainLoop(model,
optimizer, criterion,
train_fn, train_loader,
valid_fn, valid_loader,
checkpoint_path=args.checkpoint_path)
train_loop.train(args.max_epochs)
```
|
{
"source": "jfsantos/maracas",
"score": 2
}
|
#### File: maracas/tests/test_asl_meter.py
```python
from maracas import asl_meter
from maracas.utils import wavread
import numpy as np
def test_asl_meter():
x, fs = wavread('tests/sp10.wav')
assert np.isclose(asl_meter(x, fs), -25.631381743520010)
if __name__ == '__main__':
test_asl_meter()
```
|
{
"source": "jfsantos/stable-baselines",
"score": 3
}
|
#### File: stable_baselines/common/input.py
```python
import tensorflow as tf
from gym.spaces import Discrete, Box
def observation_input(ob_space, batch_size=None, name='Ob'):
"""
Build observation input with encoding depending on the observation space type
:param ob_space: (Gym Space) The observation space
:param batch_size: (int) batch size for input
(default is None, so that resulting input placeholder can take tensors with any batch size)
:param name: (str) tensorflow variable name for input placeholder
:return: (TensorFlow Tensor, TensorFlow Tensor) input_placeholder, processed_input_tensor
"""
if isinstance(ob_space, Discrete):
input_x = tf.placeholder(shape=(batch_size,), dtype=tf.int32, name=name)
processed_x = tf.to_float(tf.one_hot(input_x, ob_space.n))
return input_x, processed_x
elif isinstance(ob_space, Box):
input_shape = (batch_size,) + ob_space.shape
input_x = tf.placeholder(shape=input_shape, dtype=ob_space.dtype, name=name)
processed_x = tf.to_float(input_x)
return input_x, processed_x
else:
raise NotImplementedError("Error: the model does not support input space of type {}".format(
type(ob_space).__name__))
```
|
{
"source": "jfschaefer/GLIFcore",
"score": 2
}
|
#### File: GLIFcore/glif/Glif.py
```python
from typing import Optional
from distutils.spawn import find_executable
import glif.gf as gf
import glif.commands as cmd
import glif.parsing as parsing
import glif.mmt as mmt
import glif.utils as utils
from glif.utils import Result
import os
import shutil
DEFAULT_ARCHIVE = 'tmpGLIF/default'
class Glif(object):
def __init__(self):
# GF
self._gfshell: Optional[gf.GFShellRaw] = None
self._gfshellFailedLogs: Optional[str] = None
# MMT and MathHub
self.mmtjar: Optional[str] = None
self.mh: Optional[mmt.MathHub] = None
self._mmt: Optional[mmt.MMTInterface] = None
self._findMMTlogs: list[str] = []
self._mmtFailedStartupLogs: list[str] = []
self._mmtFailedStartupMessage: Optional[str] = None
self._initMMTLocation()
self.defaultview : Optional[str] = None
# ELPI
self.defaultelpi : Optional[str] = None
self._typecheckelpi : bool = False
self._archive: Optional[str] = None
self._subdir: Optional[str] = None
if self.mh:
if DEFAULT_ARCHIVE not in self.mh.archives:
assert self.mh.makeArchive(DEFAULT_ARCHIVE).success
self.cwd = os.path.join(self.mh.archives[DEFAULT_ARCHIVE], 'source')
self._archive = DEFAULT_ARCHIVE
else:
self.cwd = os.getcwd()
self._commands: dict[str, cmd.CommandType] = {} # command name -> command type
self._loadInitialCommands()
def setArchive(self, archive: str, subdir: Optional[str], create: bool = False) -> Result[str]:
if not self.mh:
return Result(False, None, 'Error: MathHub folder not found\nLogs:' + parsing.indent('\n'.join(self._findMMTlogs)))
logs = []
newArchiveCreated = False
if archive not in self.mh.archives:
if create:
r = self.mh.makeArchive(archive)
if not r.success:
return Result(False, None, f'Error: Failed to create archive {archive}:' + parsing.indent(r.logs))
logs.append(f'Successfully created archive {archive}')
newArchiveCreated = True
else:
return Result(False, None, f'Error: Archive {archive} doesn\'t exist')
if subdir and not self.mh.existsSubdir(archive, subdir):
if create:
assert self.mh.makeSubdir(archive, subdir).success
logs.append(f'Successfully created directory {subdir} in archive {archive}')
else:
return Result(False, None, f'Error: Archive {archive} doesn\'t have a directory {subdir}')
self._archive = archive
self._subdir = subdir
if self._subdir:
self.cwd = os.path.join(self.mh.archives[self._archive], 'source', self._subdir)
else:
self.cwd = os.path.join(self.mh.archives[self._archive], 'source')
if newArchiveCreated and self._mmt:
self._mmt.do_shutdown()
self._mmt = None
self._mmtFailedStartupLogs = []
self._mmtFailedStartupMessage = None
logs.append('MMT will be reloaded')
if self._gfshell:
self._gfshell.do_shutdown()
self._gfshell = None
logs.append('GF shell will be reloaded')
return Result(True, '\n'.join(logs))
def getArchiveSubdir(self) -> Result[tuple[str,Optional[str]]]:
if self._archive:
return Result(True, (self._archive, self._subdir))
return Result(False, None, 'No MMT archive selected. This is probably due to problems during the initialization of MMT. Here are the logs:\n' + parsing.indent("\n".join(self._findMMTlogs)))
def _initMMTLocation(self):
# JAR
mmtjar = utils.find_mmt_jar()
self._findMMTlogs.append('Finding mmt.jar: "' + mmtjar.logs + '"')
if not mmtjar.success:
return
assert mmtjar.value
self._findMMTlogs.append('Location: ' + mmtjar.value)
self.mmtjar = mmtjar.value
# MH
mhdir = utils.find_mathhub_dir(self.mmtjar)
self._findMMTlogs.append('Finding MathHub: "' + mhdir.logs + '"')
if not mhdir.success:
return
assert mhdir.value
self._findMMTlogs.append('Location: ' + mhdir.value)
self.mh = mmt.MathHub(mhdir.value)
def getMMT(self) -> Result[mmt.MMTInterface]:
if self._mmt:
return Result(True, self._mmt)
if not (self.mmtjar and self.mh):
return Result(False, logs = '\n'.join(self._findMMTlogs))
assert self.mmtjar
assert self.mh
try:
self._mmt = mmt.MMTInterface(self.mmtjar, self.mh)
except mmt.MMTStartupException as ex:
self._mmtFailedStartupLogs = ex.logs
self._mmtFailedStartupMessage = ex.message
return Result(False, logs=ex.message)
return Result(True, self._mmt)
def _loadInitialCommands(self):
# load GF commands
for ct in cmd.GF_COMMAND_TYPES + cmd.GLIF_COMMAND_TYPES:
for name in ct.names:
self._commands[name] = ct
def executeCell(self, code: str) -> list[Result[cmd.Items]]:
fileR = parsing.identifyFile(code)
if fileR.success:
assert fileR.value
type_ = fileR.value[0]
name = fileR.value[1]
ending = type_.split('-')[0] # should be one in 'mmt', 'gf', 'elpi'
archiveresult = self.getArchiveSubdir()
if ending == 'mmt' and not archiveresult.success:
return [Result(False, None, archiveresult.logs)]
with open(os.path.join(self.cwd, f'{name}.{ending}'), 'w', encoding='utf8') as fp:
if type_ in ['mmt-view', 'mmt-theory']:
assert archiveresult.value
archive, subdir = archiveresult.value
fp.write(f'namespace http://mathhub.info/{archive}{"/" + subdir if subdir else ""} ❚')
elif type_ in ['elpi', 'elpi-notc']:
fp.write('accumulate glif. ')
fp.write(fileR.value[2])
if type_ in ['elpi', 'elpi-notc']:
fp.write('\n\nnamespace glifutil { type success (list string) -> prop. success _. }\n')
try:
if type_ == 'elpi':
self._typecheckelpi = True
result = self.executeCommand(f'import "{name}.{ending}"')
finally:
self._typecheckelpi = False
if result.success and type_ == 'mmt-view' and self.defaultview != name:
if result.logs:
result.logs += '\n'
result.logs += f'"{name}" is the new default view'
self.defaultview = name
return [result]
# TODO: comments and multiple commands
return self.executeCommands(code)
def executeCommands(self, code: str) -> list[Result[cmd.Items]]:
results = []
currentcommand = ''
for line in code.splitlines():
line = line.strip()
if line.startswith('"') or currentcommand.endswith('|'):
currentcommand += '\n' + line
continue
if line.startswith('--') or line.startswith('//') or line.startswith('#'):
continue
if line == '':
continue
if currentcommand.strip():
results.append(self.executeCommand(currentcommand))
currentcommand = line
if currentcommand.strip():
results.append(self.executeCommand(currentcommand))
if not results:
return [Result(False, logs=f'No command given')]
return results
def executeCommand(self, command: str) -> Result[cmd.Items]:
items = None
rest = command.strip()
while rest:
if ' ' in rest:
name = rest[:rest.find(' ')]
else:
name = rest
if not name in self._commands:
return Result(False, logs=f'Unkown command "{name}"')
r = self._commands[name].fromString(rest)
if not r.success:
return Result(False, logs=r.logs)
assert r.value
cmd, rest = r.value
if items:
items = cmd.apply(self, items)
else:
items = cmd.execute(self)
rest = rest.strip()
if not items:
return Result(False, logs=f'No command given')
return Result(True, value=items)
def importGFfile(self, filename: str) -> Result[None]:
success = True
logs = []
gfresult = self.getGfShell()
if gfresult.success:
gf = gfresult.value
assert gf
r = gf.handle_command(f'import {filename}').strip()
if r and not r.startswith('Abstract changed'): # Failure
success = False
logs.append(f'GF import failed:\n{parsing.indent(r)}')
else:
success = False
logs.append(f'GF import failed:\n{parsing.indent(gfresult.logs)}')
mmtresult = self.getMMT()
if mmtresult.success:
mmt = mmtresult.value
assert mmt
assert self._archive
rr = mmt.buildFile(self._archive, self._subdir, filename)
if not rr.success and rr.logs: # We get failures (without logs) for concrete syntaxes
# TODO: Find a better solution!
logs.append(f'MMT import failed:\n{parsing.indent(rr.logs)}')
success = False
if rr.success:
rrr = mmt.elpigen('types', self._archive, self._subdir, filename + '/' + os.path.splitext(os.path.basename(filename))[0])
if not rrr.success:
logs.append(f'ELPI export failed:\n{parsing.indent(rrr.logs)}')
success = False
else:
assert rrr.value
with open(os.path.join(self.cwd, os.path.splitext(filename)[0]+'.elpi'), 'w', encoding='utf8') as fp:
fp.write(rrr.value)
else:
success = False
logs.append(f'MMT import failed:\n{parsing.indent(mmtresult.logs)}')
return Result(success, logs='\n'.join(logs))
def importMMTfile(self, filename: str) -> Result[None]:
mmtresult = self.getMMT()
if mmtresult.success:
mmt = mmtresult.value
assert mmt
assert self._archive
rr = mmt.buildFile(self._archive, self._subdir, filename)
if not rr.success:
return Result(False, logs=rr.logs)
else:
return Result(False, logs=f'MMT import failed:\n{parsing.indent(mmtresult.logs)}')
return Result(True)
def importELPIfile(self, filename: str) -> Result[None]:
fullpath = os.path.join(self.cwd, filename)
# using f'elpi -I {__file__}' instead
# shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'glif.elpi'),
# os.path.join(os.path.dirname(fullpath), 'glif.elpi'))
if self._typecheckelpi:
er = utils.runelpi(self.cwd, fullpath, 'glifutil.success')
if not er.success:
return Result(False, logs=er.logs)
assert er.value
warning = er.value[0].strip() # stdout should be empty
if warning:
return Result(False, logs=warning)
self.defaultelpi = fullpath
r: Result[None] = Result(True)
r.logs = f'{filename} is the new default file for ELPI commands'
return r
def getGfShell(self) -> Result[gf.GFShellRaw]:
if not self._gfshell and self._gfshellFailedLogs is None:
place = find_executable('gf')
if place:
self._gfshell = gf.GFShellRaw(place, cwd = self.cwd)
else:
self._gfshellFailedLogs = 'Failed to locate executable "gf"'
if self._gfshell:
return Result(True, self._gfshell)
else:
assert self._gfshellFailedLogs
return Result(False, logs=self._gfshellFailedLogs)
def do_shutdown(self):
if self._gfshell:
self._gfshell.do_shutdown()
if self._mmt:
self._mmt.do_shutdown()
```
#### File: GLIFcore/glif/parsing.py
```python
from typing import Optional, Union
from glif.utils import Result
# COMMAND PARSING
def parseCommandName(s: str) -> tuple[str, str]:
s = s.strip()
assert s
i = s.find(' ')
if i > 0: # space was found
return (s[:i], s[i+1:].strip())
else: # command is only command name
return (s, '')
class CommandArgument(object):
def __init__(self, key: str, value: str = ''):
self.key = key
self.value = value
def __eq__(self, r):
return self.key == r.key and self.value == r.value
def __str__(self):
if self.value:
return f'-{self.key}={argformat(self.value)}'
return f'-{self.key}'
def parseCommandArg(s0: str) -> Result[tuple[CommandArgument, str]]:
s = s0.strip()
# Deal with leading "-" or "--"
if not s or not s[0] == '-' or len(s) == 1:
return Result(success = False, logs = f'Expected argument starting with "-", found "{s}"')
if s[1] == '-':
s = s[2:]
else:
s = s[1:]
argname, s = parseIdentifier(s, allowMinus = True)
if not s:
return Result(True, (CommandArgument(argname), ''))
if s[0] == ' ':
return Result(True, (CommandArgument(argname), s[1:]))
if s[0] != '=':
return Result(success = False, logs = f'Unexpected character "{s[0]}" when parsing "{s0}"')
s = s[1:]
if not s:
return Result(success = False, logs = f'Missing argument value in "{s0}"')
if s[0] == '"':
res = parseString(s)
if res.success:
assert res.value
argval, s = res.value
return Result(success = True, value=(CommandArgument(argname, argval), s))
else:
return Result(success = False, logs=res.logs)
# r = parseString(s)
elif s[0].isidentifier() or s[0].isalnum() or s[0] in {'.', '/'}:
argval, s = parseUntilSpace(s)
return Result(success = True, value=(CommandArgument(argname, argval), s))
else:
return Result(success = False, logs=f'Unexpected argument value in "{s0}"')
def parseString(s: str) -> Result[tuple[str, str]]:
assert s[0] == '"'
i = 1
lastWasBackslash = False
res = ''
while i < len(s):
if lastWasBackslash:
if s[i] in ['"', '\\']:
res += s[i]
else: # assume backslash wasn't use for escaping
res += '\\' + s[i]
lastWasBackslash = False
elif s[i] == '\\':
lastWasBackslash = True
elif s[i] == '"': # end of string
return Result(True, (res, s[i+1:]))
else:
res += s[i]
i += 1
return Result(False, logs = f'String not closed: "{s}"')
def parseUntilSpace(s: str) -> tuple[str, str]:
assert s
result = s[0]
i = 1
while i < len(s):
if s[i].isspace():
return (result, s[i:])
else:
result += s[i]
i += 1
return (result, '')
def parseIdentifier(s: str, canbenum: bool = False, allowMinus: bool = False) -> tuple[str, str]:
assert s
assert s[0].isidentifier() or (canbenum and s[0].isalnum) or s[0] == '?' # ? for user-defined macros
identifier = s[0]
i = 1
while i < len(s):
if s[i].isalnum() or s[i].isidentifier() or (allowMinus and s[i] == '-'): # not '7'.isidentifier()
identifier += s[i]
i += 1
else:
return (identifier, s[i:])
return (identifier, '')
class BasicCommand(object):
def __init__(self, name: str, args: list[CommandArgument], mainargs: list[str]):
self.name = name
self.args = args
self.mainargs = mainargs
def gfFormat(self, mainarg: Optional[str], mainargIsStr: bool = False):
head = f'{self.name} {" ".join([str(a) for a in self.args])}'
if mainarg:
return f'{head} {strformat(mainarg) if mainargIsStr else mainarg}'
else:
return head
def getValOrDefault(self, variants: set[str], default: str) -> str:
for c in self.args:
if c.key in variants:
return c.value
return default
def argformat(s : str) -> str:
if s.isidentifier() or s.isalnum():
return s
return strformat(s)
def strformat(s : str) -> str:
return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"'
def parseBasicCommand(string: str, splitMainArgAtSpace = False) -> Result[tuple[BasicCommand, str]]:
string = string.strip()
commandname, rest = parseCommandName(string.strip())
command = BasicCommand(commandname, [], [])
# Args
while True:
rest = rest.strip()
if not rest:
break
if rest[0] == '-':
r = parseCommandArg(rest)
if not r.success:
return Result(False, logs=r.logs)
assert r.value
arg, rest = r.value
command.args.append(arg)
else:
break
rest = rest.strip()
if not rest:
return Result(True, (command, ''))
if rest[0] == '|':
return Result(True, (command, rest[1:]))
# Find next pipe
mainarg = '' # Record main argument
i = 0
while i < len(rest):
if rest[i] == '|':
# Done :)
mainarg = mainarg.strip()
if mainarg:
command.mainargs.append(mainarg)
return Result(True, (command, rest[i+1:]))
elif rest[i] == '"':
rr = parseString(rest[i:])
if not rr.success:
return Result(False, None, logs=rr.logs)
assert rr.value
rest = rr.value[1]
command.mainargs.append(rr.value[0])
i = 0
elif splitMainArgAtSpace and rest[i].isspace():
mainarg = mainarg.strip()
if mainarg:
command.mainargs.append(mainarg)
mainarg = ''
i += 1
else:
i += 1
mainarg += rest[i-1]
mainarg = mainarg.strip()
if mainarg:
command.mainargs.append(mainarg)
return Result(True, (command, ''))
# OTHER USEFUL THINGS
def _nextup(s, i, s2):
if len(s)-i < len(s2):
return 0 # failure
if s[i:i+len(s2)] == s2:
return i+len(s2) # > 0 (assuming s2 != '')
return 0 # failure
def _skipto(s, i, s2):
while i < len(s):
n = _nextup(s, i, s2)
if n:
return n
i += 1
return 0
def identifyFile(s: str) -> Result[tuple[str, str, str]]: # (type, name, content)
i = 0
while True:
if i >= len(s):
return Result(False)
elif s[i].isspace():
i += 1
elif _nextup(s, i, '//'): # mmt comment
i = _skipto(s, i, '❚')
elif _nextup(s, i, '--'): # gf comment
i = _skipto(s, i, '\n')
elif _nextup(s, i, '{-'): # gf block comment
i = _skipto(s, i, '-}')
elif _nextup(s, i, '%'): # elpi comment
i = _skipto(s, i, '\n')
elif _nextup(s, i, '/*'): # elpi block comment
i = _skipto(s, i, '*/')
elif _nextup(s, i, 'namespace'):
i = _skipto(s, i, '❚')
else:
for (k, t) in [('theory', 'mmt-theory'), ('view', 'mmt-view'),
('abstract', 'gf-abstract'), ('concrete', 'gf-concrete'), ('resource', 'gf-resource'),
('interface', 'gf-interface'), ('instance', 'gf-instance'),
('incomplete concrete', 'gf-incomplete concrete'),
('mmt:', 'mmt'), ('elpi:', 'elpi'), ('elpi-notc:', 'elpi-notc'), ('gf:', 'gf'),
('MMT:', 'mmt'), ('ELPI:', 'elpi'), ('ELPI-NOTC:', 'elpi-notc'), ('GF:', 'gf'),
('kind', 'elpi'), ('type', 'elpi')]:
n = _nextup(s, i, k)
if n:
s2 = s[n:].strip()
if not s2[0].isidentifier():
return Result(False, None, f'Expected identifier after "{k}"')
pi = parseIdentifier(s2)
return Result(True, (t, pi[0], pi[1] if k.endswith(':') else s))
return Result(False)
return Result(False)
def indent(s: str, level: int = 4) -> str:
return '\n'.join([' '*level + l for l in s.splitlines()])
```
#### File: GLIFcore/test/test_glif.py
```python
from glif import Glif
from typing import Optional
import unittest
import os
TEST_ARCHIVE = 'tmpGLIF/test'
class TestGlif(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.glif = Glif()
# copy files
cls.glif.mh.makeSubdir(TEST_ARCHIVE, 'mini')
for name in ['MiniGrammar.gf', 'MiniGrammarEng.gf', 'FOL.mmt', 'MiniGrammarDDT.mmt', 'MiniGrammarSemantics.mmt']:
source = os.path.join(os.path.dirname(__file__), 'resources', 'mmt' if name.endswith('.mmt') else 'gf', name)
with open(source, 'r') as fp:
with open(cls.glif.mh.getFilePath(TEST_ARCHIVE, 'mini', name), 'w') as fp2:
fp2.write(fp.read())
@classmethod
def tearDownClass(cls):
cls.glif.do_shutdown()
def command_test(self, cmdstr, success = True, output = None):
r = self.glif.executeCommand(cmdstr)
if success:
self.assertTrue(r.success)
else:
self.assertFalse(r.success)
if output is not None:
self.assertEqual(str(r.value), output)
def test_basic(self):
self.command_test(f'archive {TEST_ARCHIVE} mini')
self.command_test('import MiniGrammar.gf MiniGrammarEng.gf')
self.command_test('i MiniGrammarSemantics.mmt')
self.command_test('parse -cat=S "someone loves someone"', output='s someone (love someone)')
self.command_test('generate_random') # tests GF execute command
r = self.glif.executeCommand('ps "Hello World" | ps -unchars')
self.assertEqual(str(r.value), 'HelloWorld')
self.assertTrue(r.success)
assert r.value
self.assertEqual(str(r.value), 'HelloWorld')
def test_gf_multiple_output(self):
self.command_test(f'archive {TEST_ARCHIVE} mini')
self.command_test('import MiniGrammar.gf MiniGrammarEng.gf')
r = self.glif.executeCommand('parse -cat=S "someone loves someone and someone loves everyone and everyone loves someone"')
self.assertTrue(r.success)
self.assertEqual(len(r.value.items), 2)
strs = [str(item) for item in r.value.items]
self.assertIn('and (s someone (love someone)) (and (s someone (love everyone)) (s everyone (love someone)))', strs)
self.assertIn('and (and (s someone (love someone)) (s someone (love everyone))) (s everyone (love someone))', strs)
def elpi_codecell_test(self, content, success):
rs = self.glif.executeCell(content)
self.assertEqual(len(rs), 1)
self.assertTrue(rs[0].success) # even if the content has errors, the command should have been executed successfully
assert rs[0].value
if success:
self.assertFalse(bool(rs[0].value.errors))
else:
self.assertTrue(bool(rs[0].value.errors))
def test_elpi_codecell(self):
self.elpi_codecell_test('type h prop.', True)
self.elpi_codecell_test('elpi: test1\ntype h prop.\nh.', True)
self.elpi_codecell_test('elpi-notc: test2.\nh _.', True)
self.elpi_codecell_test('elpi: test3\ntype h prop.\nh _.', False)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jfsehuanes/BATSpy",
"score": 2
}
|
#### File: jfsehuanes/BATSpy/bats.py
```python
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pandas as pd
from sklearn.linear_model import LinearRegression as linreg
from thunderfish.dataloader import load_data
from thunderfish.powerspectrum import spectrogram, decibel
from thunderfish.eventdetection import detect_peaks, percentile_threshold
from thunderfish.harmonics import harmonic_groups
from thunderfish.powerspectrum import psd
from call_parameters import call_window
from IPython import embed
class Batspy:
def __init__(self, file_path, f_resolution=2**9, overlap_frac=0.7, dynamic_range=50, pcTape_rec=False,
multiCH=False):
self.file_path = file_path
self.file_name = file_path.split('/')[-1]
self.freq_resolution = f_resolution # in Hz
self.overlap_frac = overlap_frac # Overlap fraction of the FFT windows
self.dynamic_range = dynamic_range # in dB
self.pcTape_rec = pcTape_rec # Was this file recorded by PC-Tape?
self.multiCH = multiCH # Does this file was recorded with other channels simultaneously?
# Flow control booleans
self.data_loaded = False
self.spectrogram_computed = False
self.spectrogram_plotted = False
def load_data(self):
dat, sr, u = load_data(self.file_path)
self.recording_trace = dat.squeeze()
if self.pcTape_rec: # This fixes PC-Tape's bug that writes 1/10 of the samplingrate in the header of the .wav file
self.sampling_rate = sr * 10.
else:
self.sampling_rate = sr
self.data_loaded = True
pass
def compute_spectrogram(self):
if not self.data_loaded:
self.load_data()
from thunderfish.powerspectrum import nfft
n_nfft = nfft(self.sampling_rate, self.freq_resolution)
self.spec_mat, self.f, self.t = mlab.specgram(self.recording_trace, NFFT=n_nfft, Fs=self.sampling_rate,
noverlap=int(n_nfft * self.overlap_frac))
self.spectrogram_computed = True
pass
def plot_spectrogram(self, dec_mat=None, spec_mat=None, f_arr=None, t_arr=None, in_kHz=True, adjust_to_max_db=True,
ret_fig_and_ax=False, fig_input=None, showit=True):
if spec_mat is None and dec_mat is None:
spec_mat = self.spec_mat
dec_mat = decibel(spec_mat)
elif spec_mat is not None and dec_mat is None:
dec_mat = decibel(spec_mat)
elif not self.spectrogram_computed:
self.compute_spectrogram()
if adjust_to_max_db:
# set dynamic range
ampl_max = np.nanmax(
dec_mat) # define maximum; use nanmax, because decibel function may contain NaN values
dec_mat -= ampl_max + 1e-20 # subtract maximum so that the maximum value is set to lim x--> -0
dec_mat[dec_mat < -self.dynamic_range] = - self.dynamic_range
# Fix NaNs issue
if True in np.isnan(dec_mat):
dec_mat[np.isnan(dec_mat)] = - self.dynamic_range
else:
dec_mat = spec_mat
if f_arr is None:
f_arr = self.f
if t_arr is None:
t_arr = self.t
if in_kHz:
hz_fac = 1000
else:
hz_fac = 1
inch_factor = 2.54
fs = 20
if fig_input is None:
fig = plt.figure(constrained_layout=True, figsize=(56. / inch_factor, 30. / inch_factor))
else:
fig = fig_input
gs = fig.add_gridspec(2, 3, height_ratios=(4, 1), width_ratios=(4.85, 4.85, .3))
ax0 = fig.add_subplot(gs[0, :-1])
ax1 = fig.add_subplot(gs[1, :-1])
ax2 = fig.add_subplot(gs[0:-1, -1])
im = ax0.imshow(dec_mat, cmap='jet',
extent=[t_arr[0], t_arr[-1],
int(f_arr[0])/hz_fac, int(f_arr[-1])/hz_fac], # divide by 1000 for kHz
aspect='auto', interpolation='hanning', origin='lower', alpha=0.7, vmin=-self.dynamic_range,
vmax=0.)
cb = fig.colorbar(im, cax=ax2)
cb.set_label('dB', fontsize=fs)
ax0.set_ylabel('Frequency [kHz]', fontsize=fs+1)
ax1.set_ylabel('Amplitude [a.u.]', fontsize=fs+1)
ax1.set_xlabel('Time [sec]', fontsize=fs+1)
for c_ax in [ax0, ax1, ax2]:
c_ax.tick_params(labelsize=fs)
# Plot the soundwave underneath the spectrogram
ax1.set_facecolor('black')
time_arr = np.arange(0, len(self.recording_trace)/self.sampling_rate, 1/self.sampling_rate)
ax1.plot(time_arr, self.recording_trace, color='yellow', lw=2, rasterized=True)
# Share the time axis of spectrogram and raw sound trace
ax0.get_shared_x_axes().join(ax0, ax1)
ax1.set_xlim(0, time_arr[-1])
# Remove time xticks of the spectrogram
ax0.xaxis.set_major_locator(plt.NullLocator())
self.spectrogram_plotted = True
if ret_fig_and_ax:
return fig, (ax0, ax1)
else:
pass
if showit:
plt.show()
else:
pass
def detect_calls(self, det_range=(50000, 180000), th_between_calls=0.004, plot_debug=False,
plot_in_spec=False, save_spec_w_calls=False):
# Get an average over all frequency channels within detection range
av_power = np.mean(self.spec_mat[np.logical_and(self.f > det_range[0], self.f < det_range[1])], axis=0)
th = np.min(av_power) # THIS THRESHOLD ROCKS YOUR PANTS! for more detections, increase f_res. 2^7 or 2^8
if th <= 0: # Fix cases where th <= 0
th = np.mean(av_power)
peaks, _ = detect_peaks(av_power, th) # Use thunderfish's peak-trough algorithm
# clean pks that might be echoes
below_t_th = np.diff(self.t[peaks]) < th_between_calls
if len(np.where(below_t_th)[0]) == 0:
cleaned_peaks = peaks
else:
cleaned_peaks = np.delete(peaks, np.where(below_t_th)[0])
if plot_debug:
fig, ax = plt.subplots()
ax.plot(self.t, av_power)
ax.plot(self.t[cleaned_peaks], np.ones(len(cleaned_peaks)) * np.max(av_power), 'o', ms=10, color='darkred',
alpha=.8, mec='k', mew=1.5)
ax.plot([self.t[0], self.t[-1]], [th, th], '--k', lw=2.5)
# plt.show()
if plot_in_spec:
spec_fig, spec_ax = self.plot_spectrogram(spec_mat=self.spec_mat, f_arr=self.f, t_arr=self.t,
ret_fig_and_ax=True, showit=False)
spec_ax = spec_ax[0]
spec_ax.plot(self.t[cleaned_peaks], np.ones(len(cleaned_peaks))*80, 'o', ms=10, # plots the detection at 80kHz
color='darkred', alpha=.8, mec='k', mew=1.5)
spec_fig.suptitle(self.file_name.split('.')[0])
if save_spec_w_calls:
spec_fig.savefig('test_result/detected_calls/' + self.file_name.split('.')[0] + '.pdf')
return av_power, cleaned_peaks
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print("ERROR\nPlease tell me the FilePath of the recording you wish to analyze as 1st argument and if it is"
" a single recording ('s') or part of a multi-channel ('m') recording as second argument")
quit()
recording = sys.argv[1]
rec_type = sys.argv[2]
# Analyze MultiChannel
if rec_type == 'm':
from multiCH import get_all_ch, get_calls_across_channels
from helper_functions import manualCallDetectionAdjustment
# Get all the channels corresponding to the input file
all_recs = get_all_ch(recording)
# Get the calls
calls, chOfCall = get_calls_across_channels(all_recs, run_window_width=0.05, step_quotient=10, f_res=2**9,
overlap=0.7, dr=70, plot_spec=True)
chOfCall += 1 # set the channel name same as the filename
# Here to switch on the interactive window for detecting the calls and add them to a csv file
specFig = plt.gcf() # plot_spec needs to be True in get_calls_across_channels() function.
manualCallDetectionAdjustment(specFig, calls, recording)
# Here for individual call parameter extraction
rec_dict = {enu+1: Batspy(rec, f_resolution=2**9, overlap_frac=.70, dynamic_range=70)
for enu, rec in enumerate(all_recs)}
[rec_dict[e].load_data() for e in rec_dict.keys()] # load the data in all channels
# Goal now is to create small windows for each call
# make a time array with the sampling rate
time = np.arange(0, len(rec_dict[1].recording_trace) /
rec_dict[1].sampling_rate, 1/rec_dict[1].sampling_rate)
window_width = 0.010 # in seconds
call_dict = {'cb': [], 'ce': [], 'fb': [], 'fe': [], 'pf': [], 'call_number': []}
print('\nCalls extracted, proceeding to loop through %.i detected calls...\n' % len(calls))
callDur = np.zeros(len(calls))
freqBeg = np.zeros(len(calls))
freqEnd = np.zeros(len(calls))
peakFreq = np.zeros(len(calls))
callsMask = np.zeros(len(calls))
enu = 0
for channel in set(chOfCall):
# load data
dat, sr, u = load_data(all_recs[channel-1])
dat = np.hstack(dat)
for callT in calls[chOfCall == channel]:
print('analyzing call %i' % (enu+1)) # calls are not analyzed in order ;)
# compute a high res spectrogram of a defined window length
dur, fb, fe, pf = call_window(dat, sr, callT, plotDebug=True)
# save the parameters
callDur[enu] = dur
freqBeg[enu] = fb
freqEnd[enu] = fe
peakFreq[enu] = pf
callsMask[enu] = callT
# save the debug figure
fig = plt.gcf()
fig.suptitle('__CALL#' + '{:03}'.format(enu + 1), fontsize=14)
fig.savefig(
'tmp/plots/' + 'CALL#' + '{:03}'.format(enu + 1) + '.pdf')
plt.close(fig)
enu += 1
# Reorder the arrays and create a csv
path = 'tmp/call_params/'
sortedInxs = np.argsort(callsMask)
paramsdf = pd.DataFrame({'callTime': callsMask[sortedInxs], 'bch': chOfCall[sortedInxs],
'callDur': callDur[sortedInxs], 'fBeg': freqBeg[sortedInxs],
'fEnd': freqEnd[sortedInxs], 'pkfreq': peakFreq[sortedInxs]})
paramsdf.to_csv(path_or_buf=path + '__'.join(recording.split('/')[2:]) + '.csv', index=False)
print('CHE ACABOOO')
exit()
# Dictionary with call parameters should be filled here
call_dict = {e: np.array(call_dict[e]) for e in call_dict.keys()}
from multiCH import plot_call_parameter_distributions
plot_call_parameter_distributions(call_dict, showit=True)
plt.show()
quit()
# from helper_functions import save_pis_and_call_parameters
# save_pis_and_call_parameters(all_diffs, call_dict, '../phd_figures/call_parameter_arrays/')
quit()
# Analyze SingleChannel
elif rec_type == 's':
from call_intervals import extract_pulse_sequence, save_ipi_sequence
bat = Batspy(recording, f_resolution=2**10, overlap_frac=.90, dynamic_range=70, pcTape_rec=False) # 2^7 = 128
bat.compute_spectrogram()
# bat.plot_spectrogram(showit=False)
pows, pks = bat.detect_calls(det_range=(50000, 180000), plot_in_spec=True, plot_debug=False)
# create the header for the csv
r = '/'.join(recording.split('/')[-3:])
shortHeader = '_'.join([r[5:22], 'ch', r[-19], 'rec', r[-6:-4]])
plt.show()
embed()
quit()
# save pulse intervals
pulse_times = extract_pulse_sequence(bat.t[pks], (1.43, 2.92), to_add=[1.5286])
save_ipi_sequence(pulse_times, 'approach', shortHeader)
ipis = np.diff(pulse_times)
embed()
quit()
# bat.plot_spectrogram()
plt.show()
quit()
# ToDo: Need to improve the basic call detection algorithm!
average_power, peaks = bat.detect_calls(det_range=(100000, 150000), plot_in_spec=False, plot_debug=False)
# Goal now is to create small windows for each call
# make a time array with the sampling rate
time = np.arange(0, len(bat.recording_trace) / bat.sampling_rate, 1/bat.sampling_rate)
window_width = 0.010 # in seconds
# now the call windows
call_windows = [bat.recording_trace[np.logical_and(time >= bat.t[e]-window_width/2.,
time <= bat.t[e]+window_width/2.)]
for e in peaks]
call_dict = {'cb': [], 'ce': [], 'fb': [], 'fe': [], 'pf': [], 'call_number': []}
for c_call in range(len(call_windows)): # loop through the windows
nfft = 2 ** 8
s, f, t = mlab.specgram(call_windows[c_call], Fs=bat.sampling_rate,
NFFT=nfft, noverlap=int(0.8 * nfft)) # Compute a high-res spectrogram of the window
dec_spec = decibel(s)
call_freq_range = (50000, 250000)
filtered_spec = dec_spec[np.logical_and(f > call_freq_range[0], f < call_freq_range[1])]
freqs_of_filtspec = np.linspace(call_freq_range[0], call_freq_range[-1], np.shape(filtered_spec)[0])
# measure noise floor
noiseEdge = int(np.floor(0.002 / np.diff(t)[0]))
noise_floor = np.max(np.hstack((filtered_spec[:, :noiseEdge], filtered_spec[:, -noiseEdge:]))) + 2
lowest_decibel = noise_floor
# get peak frequency
peak_f_idx = np.unravel_index(filtered_spec.argmax(),
filtered_spec.shape)
# ToDo: Make a function out of this in order to avoid code copy-paste
left_from_peak = np.arange(peak_f_idx[1]-1, -1, -1, dtype=int)
right_from_pk = np.arange(peak_f_idx[1]+1, len(t), dtype=int)
mainHarmonicTrace = []
db_th = 15.0
f_tol_th = 40000 # in Hz
t_tol_th = 0.0012 # in s
freq_tolerance = np.where(np.cumsum(np.diff(freqs_of_filtspec)) > f_tol_th)[0][0]
time_tolerance = np.where(np.cumsum(np.diff(t)) > t_tol_th)[0][0]
# first start from peak to right
f_ref = peak_f_idx[0]
t_ref = peak_f_idx[1]
mainHarmonicTrace.append([peak_f_idx[0], peak_f_idx[1]])
for ri in right_from_pk:
pi, _ = detect_peaks(filtered_spec[:, ri], db_th)
pi = pi[filtered_spec[pi, ri] > lowest_decibel]
if len(pi) > 0:
curr_f = pi[np.argmin(np.abs(f_ref - pi))]
if np.abs(ri - t_ref) > time_tolerance or np.abs(curr_f - f_ref) > freq_tolerance \
or f_ref - curr_f < 0:
continue
else:
mainHarmonicTrace.append([curr_f, ri])
f_ref = curr_f
t_ref = ri
else:
continue
# Now from peak to left
f_ref = peak_f_idx[0]
t_ref = peak_f_idx[1]
for li in left_from_peak:
pi, _ = detect_peaks(filtered_spec[:, li], db_th)
pi = pi[filtered_spec[pi, li] > lowest_decibel]
if len(pi) > 0:
curr_f = pi[np.argmin(np.abs(f_ref - pi))]
if np.abs(li - t_ref) > time_tolerance or np.abs(curr_f - f_ref) > freq_tolerance\
or curr_f - f_ref < 0:
continue
else:
mainHarmonicTrace.insert(0, [curr_f, li])
f_ref = curr_f
t_ref = li
else:
continue
mainHarmonicTrace = np.array(mainHarmonicTrace)
if np.abs(noise_floor - filtered_spec[peak_f_idx]) > db_th and \
(t[mainHarmonicTrace[-1][1]] - t[mainHarmonicTrace[0][1]]) * 1000. > 1.2:
call_dict['call_number'].append(c_call)
call_dict['cb'].append(t[mainHarmonicTrace[0][1]])
call_dict['ce'].append(t[mainHarmonicTrace[-1][1]])
call_dict['fb'].append(freqs_of_filtspec[mainHarmonicTrace[0][0]])
call_dict['fe'].append(freqs_of_filtspec[mainHarmonicTrace[-1][0]])
call_dict['pf'].append(freqs_of_filtspec[peak_f_idx[0]])
# if (t[mainHarmonicTrace[-1][1]] - t[mainHarmonicTrace[0][1]]) * 1000. > 2.5: # filter for calls longer than 2.5s
# fig, ax = bat.plot_spectrogram(dec_mat=filtered_spec, f_arr=freqs_of_filtspec, t_arr=t, ret_fig_and_ax=True)
# ax.plot(t[mainHarmonicTrace[:, 1]], freqs_of_filtspec[mainHarmonicTrace[:, 0]]/1000.,
# 'o', ms=12, color='None', mew=3, mec='k', alpha=0.7)
# ax.plot(t[peak_f_idx[1]], freqs_of_filtspec[peak_f_idx[0]] / 1000, 'o', ms=15, color='None', mew=4, mec='purple', alpha=0.8)
# ax.set_title('call # %i' % c_call, fontsize=20)
#
# import os
# save_path = '../../data/temp_batspy/' + '/'.join(bat.file_path.split('/')[5:-1]) +\
# '/' + bat.file_name.split('.')[0] + '/'
# if not os.path.exists(save_path):
# os.makedirs(save_path)
#
# fig.savefig(save_path + 'fig_' + str(c_call).zfill(4) + '.pdf')
# plt.close(fig)
# Create figure of call parameters
call_dict = {e: np.array(call_dict[e]) for e in call_dict.keys()}
from multiCH import plot_call_parameter_distributions
plot_call_parameter_distributions(call_dict)
# embed()
plt.show()
quit()
print('\nDONE!')
```
#### File: jfsehuanes/BATSpy/gui.py
```python
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import sys
from PyQt5.QtWidgets import QWidget, QMainWindow, QApplication, QAction, QDesktopWidget, QFileDialog, QPushButton, QToolTip,\
QVBoxLayout, QHBoxLayout
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from bats import Batspy
from multiCH import get_all_ch, plot_multiCH_spectrogram
from IPython import embed
class MainWindow(QMainWindow):
def __init__(self, verbose=3):
super().__init__()
self._main = QWidget()
self.setCentralWidget(self._main)
self.verbose = verbose
# Initialize function
self.InitFunc()
def open(self):
openObj = QAction('&Open', self)
openObj.setShortcut('Ctrl+O')
openObj.setStatusTip('Open a file browser dialog')
openObj.triggered.connect(self.click_open)
return openObj
def click_open(self):
fd = QFileDialog()
self.fname = fd.getOpenFileName(self, 'Select File', '~/', 'Please select .wav files only (*.wav )')[0]
if self.verbose == 3:
print('opening file %s' %self.fname)
if len(self.fname) > 0:
self.fname_selected = True
self.statusBar().showMessage("%s selected... awaiting orders!" % ('.../' + '/'.join(self.fname.split('/')[-3:])))
def load_single_Ch(self):
loadSObj = QAction('&Load Single Channel', self)
loadSObj.setShortcut('Ctrl+N')
loadSObj.setStatusTip('Loads a single channel file')
loadSObj.triggered.connect(self.click_singleCH)
return loadSObj
def click_singleCH(self):
if not self.fname_selected:
self.click_open()
# close previous figure if plotted
if self.multiCH_loaded or self.singleCH_loaded:
self.figure.clear()
bat = Batspy(self.fname, f_resolution=2 ** 9, overlap_frac=.70, dynamic_range=50, pcTape_rec=False)
bat.compute_spectrogram()
_, ax = bat.plot_spectrogram(ret_fig_and_ax=True, fig_input=self.figure, showit=False)
# refresh canvas
self.figure.tight_layout()
self.canvas.draw()
self.singleCH_loaded = True
self.statusBar().showMessage("single channel: %s loaded" % ('.../' + '/'.join(self.fname.split('/')[-3:])))
pass
def click_multiCH(self):
if not self.fname_selected:
self.click_open()
# close previous figure if plotted
if self.multiCH_loaded or self.singleCH_loaded:
self.figure.clear()
all_fnames = get_all_ch(self.fname)
# ToDo: Remake multiCH.py so that it is compatible with just plotting in the gui
def quit(self):
quitObj = QAction('&Quit', self)
quitObj.setShortcut('Ctrl+Q')
quitObj.setStatusTip('Quit BATSpy')
quitObj.triggered.connect(self.close)
return quitObj
def InitFunc(self):
# boolean flow control
self.fname_selected = False
self.singleCH_loaded = False
self.multiCH_loaded = False
self.fig = None
# status bar
self.statusBar().showMessage('Welcome to BATSpy!')
# menu bar
# File Submenu
menubar = self.menuBar()
file = menubar.addMenu("&File")
file.addAction(self.open())
file.addAction(self.quit())
file.addAction(self.load_single_Ch())
# View Submenu
view = menubar.addMenu('&View')
# Data Explorer Submenu
dataex = menubar.addMenu('&Data Explorer')
# Calls Submenu
calls = menubar.addMenu('&Calls')
# get current screen resolution
sSize = QDesktopWidget().screenGeometry(-1)
sWidth = sSize.width()
sHeight = sSize.height()
mwLength = 1800 # in Pixel
# Establish main window size and title
self.setGeometry(sWidth/2 - mwLength/2, sHeight/2 - mwLength/2, mwLength, mwLength)
self.setWindowTitle('BATSpy')
# Draw the main Canvas
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
# Create the Navigation Toolbar
self.navToolbar = NavigationToolbar(self.canvas, self)
# Select File button
selFile = QPushButton('Select File (Ctrl+O)', self)
selFile.released.connect(self.click_open)
selFile.setToolTip('Select a File!')
# Load singleCH button
loadSCH = QPushButton('Load Single Channel (Ctrl+N)', self)
loadSCH.released.connect(self.click_singleCH)
# Set tool tip
loadSCH.setToolTip('Analyze file as a <b>single channel<\b>')
# Load multiCH button
loadMCH = QPushButton('Load Multi Channel (Ctrl+M)', self)
loadMCH.released.connect(self.click_multiCH)
loadMCH.setToolTip('Analyze file as a <b>multi channel<\b>')
# Set a horizontal box where the buttons will be placed
hbox = QHBoxLayout()
hbox.addWidget(selFile)
hbox.addWidget(loadSCH)
hbox.addWidget(loadMCH)
layout = QVBoxLayout(self._main)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.canvas)
layout.addWidget(self.navToolbar)
layout.addLayout(hbox)
self.show()
if __name__ == '__main__':
spygui = QApplication(sys.argv)
MainWindow = MainWindow()
sys.exit(spygui.exec_())
```
|
{
"source": "jfsolarte/python_clean_architecture",
"score": 3
}
|
#### File: python_clean_architecture/domain/orderdata.py
```python
from python_clean_architecture.shared.domain_model import DomainModel
class OrderData(object):
def __init__(self, items):
self.items = items
@classmethod
def getdata(self):
return [1,2,3,4,5,6,7,8]
DomainModel.register(OrderData)
```
#### File: python_clean_architecture/repository/memrepo.py
```python
from python_clean_architecture.domain import orderdata as sr
import sys
class MemRepo:
def __init__(self, entries=None):
print(entries)
print('ssssssssssssssssssss')
self._entries = []
if entries:
self._entries.extend(entries)
def order(self, items=None):
listOrder = []
self.extractor(items,listOrder)
return {'result':listOrder}
def extractor(self,items, listOrder):
for x in items:
if isinstance(x, list):
self.extractor(x,listOrder)
else:
listOrder.append(x)
```
#### File: python_clean_architecture/serializers/orderdata_serializer.py
```python
import json
import sys
class OrderDataEncoder(json.JSONEncoder):
def default(self, o):
print('sssssssssssssssssssssssssssssssssssss')
try:
print(o)
to_serialize = {
'items': json.dumps(o.items),
}
return to_serialize
except AttributeError:
return super().default(o)
```
#### File: python_clean_architecture/use_cases/orderdata_use_case.py
```python
from python_clean_architecture.shared import use_case as uc
from python_clean_architecture.shared import response_object as res
class OrderDataGetUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def execute(self, request_object):
#if not request_object:
#return res.ResponseFailure.build_from_invalid_request_object(request_object)
storage_rooms = self.repo.order(items=request_object.items)
return res.ResponseSuccess(storage_rooms)
```
|
{
"source": "jfsScience/jfsOtterVIS",
"score": 2
}
|
#### File: jfsScience/jfsOtterVIS/CCDpanelsetup.py
```python
#
# Author: <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#python imports
import tkinter as tk
from tkinter import ttk
import numpy as np
import serial
#application imports
import config
from CCDhelp import *
import CCDserial
import CCDfiles
#jfs
from JFSphoto import *
import time
from JFShelp import *
class buildpanel(tk.Frame):
def __init__(self, master, CCDplot, SerQueue,JFSphoto):
#geometry-rows for packing the grid
device_row = 10
shicg_row = 20
continuous_row = 30
avg_row = 40
collect_row = 50
plotmode_row = 60 #60
save_row = 70 #70
update_row = 80 #80
progress_var = tk.IntVar()
tk.Frame.__init__(self, master=None)
#Create all widgets and space between them
self.devicefields(device_row)
#insert vertical space
self.grid_rowconfigure(device_row+3, minsize=20) #30
self.CCDparamfields(shicg_row)
#insert vertical space
self.grid_rowconfigure(shicg_row+4, minsize=20) #30
self.collectmodefields(continuous_row)
self.avgfields(avg_row)
#insert vertical space
self.grid_rowconfigure(avg_row+2, minsize=10) #30
self.collectfields(collect_row, SerQueue, progress_var)
#vertical space
self.grid_rowconfigure(collect_row+2, minsize=10) #30
self.plotmodefields(plotmode_row, CCDplot)
self.saveopenfields(save_row, CCDplot)
self.updateplotfields(update_row, CCDplot)
#
self.jf = JFSphoto
self.jfsAddOns(CCDplot)
def devicefields(self, device_row):
#device setup - variables, widgets and traces associated with the device entrybox
#variables
self.device_address = tk.StringVar()
self.device_status = tk.StringVar()
self.device_statuscolor = tk.StringVar()
#widgets
self.ldevice = tk.Label(self, text="COM-device:")
self.ldevice.grid(column=0, row=device_row)
self.edevice = tk.Entry(self, textvariable=self.device_address, justify='left')
self.edevice.grid(column=1, row=device_row)
self.ldevicestatus = tk.Label(self, textvariable=self.device_status, fg="green")
#setup trace to check if the device exists
self.device_address.trace("w", lambda name, index, mode, Device=self.device_address, status=self.device_status, colr=self.ldevicestatus: self.DEVcallback(name, index, mode, Device, status, colr))
self.device_address.set(config.port)
self.ldevicestatus.grid(columnspan=2, row=device_row+1)
#help button
self.bhdev = tk.Button(self, text="?", command=lambda helpfor=0: helpme(helpfor))
self.bhdev.grid(row=device_row, column=3)
def CCDparamfields(self, shicg_row):
#CCD parameters - variables, widgets and traces associated with setting ICG and SH for the CCD
self.SHvalue = tk.StringVar()
self.SHvalue.set("200")
self.ICGvalue = tk.StringVar()
self.ICGvalue.set("100000")
self.tint = tk.StringVar()
self.tint.set("Integration time is 0.1 ms")
self.ICGSHstatus = tk.StringVar()
self.ICGSHstatus.set("Correct CCD pulse timing.")
self.ICGSHstatuscolor = tk.StringVar()
#pulse timing tip
self.ltipSHICG = tk.Label(self, text="ICG = n·SH")
self.ltipSHICG.grid(columnspan=2, row=shicg_row-1)
#setup SH-entry
self.lSH = tk.Label(self, text="SH-period:")
self.lSH.grid(column=0, row=shicg_row)
self.eSH = tk.Entry(self, textvariable=self.SHvalue, justify='right')
self.eSH.grid(column=1, row=shicg_row)
#setup ICG-entry
self.lICG = tk.Label(self, text="ICG-period:")
self.lICG.grid(column=0, row=shicg_row+1)
self.eICG = tk.Entry(self, textvariable=self.ICGvalue, justify='right')
self.eICG.grid(column=1, row=shicg_row+1)
#setup ICGSH-status label
self.lICGSH = tk.Label(self, textvariable=self.ICGSHstatus, fg="green")
self.lICGSH.grid(columnspan=2, row=shicg_row+2)
#integration time label
self.ltint = tk.Label(self, textvariable=self.tint)
self.ltint.grid(columnspan=2, row=shicg_row+3)
#help button
self.bhtiming = tk.Button(self, text="?", command=lambda helpfor=1: helpme(helpfor))
self.bhtiming.grid(row=shicg_row, rowspan=2, column=3)
#setup traces to update tx-data
self.SHvalue.trace("w", lambda name, index, mode, status=self.ICGSHstatus, tint=self.tint, colr=self.lICGSH, SH=self.SHvalue, ICG=self.ICGvalue: self.ICGSHcallback(name, index, mode, status, tint, colr, SH, ICG))
self.ICGvalue.trace("w", lambda name, index, mode, status=self.ICGSHstatus, tint=self.tint, colr=self.lICGSH, SH=self.SHvalue, ICG=self.ICGvalue: self.ICGSHcallback(name, index, mode, status, tint, colr, SH, ICG))
def collectmodefields(self, continuous_row):
#setup continuous vs one-shot
self.collectmode_frame = tk.Frame(self)
self.collectmode_frame.grid(row=continuous_row, columnspan=2)
self.CONTvar = tk.IntVar()
self.rcontinuous = tk.Radiobutton(self.collectmode_frame, text="Continuous", variable=self.CONTvar, value=1, command=lambda CONTvar=self.CONTvar: self.modeset(CONTvar))
self.rcontinuous.grid(row=0, column=2, sticky="W")
self.roneshot = tk.Radiobutton(self.collectmode_frame, text="Single", variable=self.CONTvar, value=0, command=lambda CONTvar=self.CONTvar: self.modeset(CONTvar))
self.roneshot.grid(row=0, column=1, sticky="W")
#help button
self.bhcollectmode = tk.Button(self, text="?", command=lambda helpfor=6: helpme(helpfor))
self.bhcollectmode.grid(row=continuous_row, column=3)
def avgfields(self, avg_row):
#setup AVG entry
self.lAVG = tk.Label(self, text="Average:")
self.lAVG.grid(column=0, row=avg_row)
self.AVGscale = tk.Scale(self, orient='horizontal', from_=1, to=15)
self.AVGscale.configure(command=self.AVGcallback)
self.AVGscale.grid(column=1, row=avg_row, sticky="we")
#help button
self.bhavg = tk.Button(self, text="?", command=lambda helpfor=2: helpme(helpfor))
self.bhavg.grid(row=avg_row, column=3)
def collectfields(self, collect_row, SerQueue, progress_var):
#setup collect and stop buttons
self.progress = ttk.Progressbar(self, orient="horizontal", maximum=10, mode="determinate", var=progress_var)
self.bcollect = tk.Button(self, text="Collect", command=lambda panel=self, SerQueue=SerQueue, progress_var=progress_var: CCDserial.rxtx(panel, SerQueue, progress_var))
self.bcollect.event_generate('<ButtonPress>', when='tail')
self.bcollect.grid(row=collect_row, columnspan=3, sticky="EW", padx=5)
self.bstop = tk.Button(self, text="Stop", state=tk.DISABLED, command=lambda queue=SerQueue: CCDserial.rxtxcancel(queue))
self.bstop.grid(row=collect_row, column=3)
self.progress.grid(row=collect_row+1, columnspan=3, sticky="EW", padx=5)
def plotmodefields(self, plotmode_row, CCDplot):
#setup plot mode checkbuttons
self.plotmode_frame = tk.Frame(self)
self.plotmode_frame.grid(row=plotmode_row, columnspan=2)
self.balance_var = tk.IntVar()
self.rawplot_var = tk.IntVar()
self.cinvert = tk.Checkbutton(self.plotmode_frame, text="Plot raw data", variable=self.rawplot_var, offvalue=1, onvalue=0)#, state=tk.ACTIVE)
self.cinvert.deselect()
self.cinvert.grid(row=0, column=1, sticky="W")
self.cbalance = tk.Checkbutton(self.plotmode_frame, text="Balance output", variable=self.balance_var, offvalue=0, onvalue=1)#, state=tk.ACTIVE)
self.cbalance.select()
self.cbalance.grid(row=0, column=2, sticky="W")
self.grid_rowconfigure(plotmode_row+2, minsize=50)
#help button
self.bhinv = tk.Button(self, text="?", command=lambda helpfor=3: helpme(helpfor))
self.bhinv.grid(row=plotmode_row, column=3)
#setup traces
self.rawplot_var.trace("w", lambda name, index, mode, invert=self.rawplot_var, plot=CCDplot: self.RAWcallback(name, index, mode, invert, plot))
self.balance_var.trace("w", lambda name, index, mode, balance=self.balance_var, plot=CCDplot: self.BALcallback(name, index, mode, balance, plot))
def saveopenfields(self, save_row, CCDplot):
#setup save/open buttons
self.fileframe = tk.Frame(self)
self.fileframe.grid(row=save_row, columnspan=2)
self.bopen = tk.Button(self.fileframe, text="Open", width=11, command=lambda self=self, CCDplot=CCDplot: CCDfiles.openfile(self, CCDplot))
self.bsave = tk.Button(self.fileframe, text="Save", width=11, state=tk.DISABLED,command=lambda self=self: CCDfiles.savefile(self))
self.bopen.pack(side=tk.LEFT)
self.bsave.pack(side=tk.LEFT)
#help button
self.bhsav = tk.Button(self, text="?", command=lambda helpfor=5: helpme(helpfor))
self.bhsav.grid(row=save_row, column=3)
def updateplotfields(self, update_row, CCDplot):
self.bupdate = tk.Button(self, text="Update plot", command=lambda CCDplot=CCDplot: self.updateplot(CCDplot))
#setup an event on the invisible update-plot button with a callback this thread can invoke in the mainloop
self.bupdate.event_generate('<ButtonPress>', when='tail')
#commented out, it's needed to inject an event into the tk.mainloop for updating the plot from the 'checkfordata' thread
#self.bupdate.grid(row=update_row, columnspan=3, sticky="EW", padx=5)
#
#
def jfsAddOns(self,CCDplot):
device_row = 10
shicg_row = 20
con_row = 30
avg_row = 40
col_row = 50
plt_row = 60
save_row = 70
upd_row = 80
kin_delta = tk.IntVar()
kin_delta.set(2)
kin_repeats = tk.IntVar()
kin_repeats.set(10)
kin_time = tk.IntVar()
kin_time.set(0)
def darkline():
if (self.jf.do_save_darkline(config.rxData16)==1):
self.jfsdark_check.config(state=tk.NORMAL)
def baseline():
if (self.jf.get_darkline_checked() == 1):
base = self.jf.darkData16-config.rxData16
if (self.jf.do_save_baseline(base)==1):
self.jfsbase_check.config(state=tk.NORMAL)
self.jfsbase_transmission.config(state=tk.NORMAL)
self.jfsbase_absorption.config(state=tk.NORMAL)
def loaddata():
self.jf.load_pandas()
config.photometer=1
self.updateplot(CCDplot)
self.jfsdark_check.config(state=tk.NORMAL)
self.jfsbase_check.config(state=tk.NORMAL)
self.jfsbase_transmission.config(state=tk.NORMAL)
self.jfsbase_absorption.config(state=tk.NORMAL)
def start_kinetics():
if self.device_status.get() == "Device exist":
self.bcollect.invoke()
self.jf.start_kinetic()
kinetics()
else:
messagebox.showerror("By the great otter!"," Sorry No Device")
def messure():
kin_time.set(kin_time.get()+1)
self.bcollect.invoke()
self.jf.add_kinetic(str(kin_delta.get()*kin_time.get()))
kin_repeats.set(kin_repeats.get()-1)
kinetics()
def kinetics():
if kin_repeats.get() > 0 :
self.after(kin_delta.get()*1000,messure)
else:
print('Kinetics finisched')
kin_time.set(0)
self.jf.df['baseline']=self.jf.baseData16
self.jf.df['darkline']=self.jf.darkData16
self.jf.df['nmscale']=self.jf.nmData16
self.jf.do_math(self)
self.jfstitel = tk.Label(self, text=' Photometer ',fg="#6A9662")
#self.jfstitel.config(font=("Courier",0))
self.jfstitel.grid(row=device_row,column=4,columnspan=2,sticky='w')
self.jfs4cal = tk.Button(self,text='Calibration',fg="blue", command=self.jf.do_calibrate)
self.jfs4cal.grid(row=device_row+1,column=4,sticky='e',padx=4)
self.jfs4calhp = tk.Button(self,text='?',command=lambda roots=self, helpfor=1: jfshelpme(roots,helpfor))
self.jfs4calhp.grid(row=device_row+1,column=5,sticky='e',padx=4)
self.jfs4nm_check = tk.Checkbutton(self,text="[nm] scale on/off",variable=self.jf.nm_checked,command=lambda CCDplot=CCDplot: self.updateplot(CCDplot))
self.jfs4nm_check.grid(row=shicg_row-1,column=4,sticky='w',padx=4)
self.jfsdark = tk.Button(self,text='save Dark',image=self.jf.bulbOff,compound=tk.LEFT,fg="blue", command= darkline)
self.jfsdark.grid(row=shicg_row,column=4,sticky='e',padx=4)
self.jfsdarkhp = tk.Button(self,text='?',command=lambda roots=self, helpfor=2: jfshelpme(roots,helpfor))
self.jfsdarkhp.grid(row=shicg_row,column=5,sticky='e',padx=4)
self.jfsdark_check = tk.Checkbutton(self,text="Darkline on/off",variable=self.jf.darkline_checked,state=tk.DISABLED,command=lambda CCDplot=CCDplot: self.updateplot(CCDplot))
self.jfsdark_check.grid(row=shicg_row+1,column=4,sticky='w')
self.jfsbase = tk.Button(self,text='save Base',image=self.jf.bulbOn,compound=tk.LEFT,fg="blue", command= baseline)
self.jfsbase.grid(row=shicg_row+2,column=4,sticky='e',padx=4)
self.jfsdarkhp = tk.Button(self,text='?',command=lambda roots=self, helpfor=3: jfshelpme(roots,helpfor))
self.jfsdarkhp.grid(row=shicg_row+2,column=5,sticky='e',padx=4)
self.jfsbase_check = tk.Checkbutton(self,text="Baseline on/off",variable=self.jf.baseline_checked,state=tk.DISABLED,command=lambda CCDplot=CCDplot: self.updateplot(CCDplot))
self.jfsbase_check.grid(row=shicg_row+3,column=4,sticky='w')
self.jfsbase_absorption = tk.Radiobutton(self,text="Absorbanz [E]",variable=self.jf.abs_trans,value=0,state=tk.DISABLED,command=lambda CCDplot=CCDplot: self.updateplot(CCDplot))
self.jfsbase_absorption.grid(row=con_row,column=4,sticky='es')
self.jfsbase_transmission = tk.Radiobutton(self,text="Transmision",variable=self.jf.abs_trans,value=1,state=tk.DISABLED,command=lambda CCDplot=CCDplot: self.updateplot(CCDplot))
self.jfsbase_transmission.grid(row=avg_row,column=4,sticky='e')
self.jfspdsave = tk.Button(self,text='save Data',fg="blue", command=self.jf.save_pandas)
self.jfspdsave.grid(row=col_row,column=4,sticky='e',padx=4)
self.jfssavehp = tk.Button(self,text='?',command=lambda roots=self, helpfor=4: jfshelpme(roots,helpfor))
self.jfssavehp.grid(row=col_row,column=5,sticky='e',padx=4)
self.jfspdload = tk.Button(self,text='load Data',fg="blue", command=loaddata)
self.jfspdload.grid(row=col_row+1,column=4,sticky='e',padx=4)
#### LabelFrame
self.jfslf1 = tk.LabelFrame(self,text= 'kinetic')
self.jfslf1.grid(row=plt_row,column=4,sticky='e',padx=4)
self.jfskinb = tk.Button(self.jfslf1,text='start Kinetic',fg="blue", command=start_kinetics)
self.jfskinb.grid(row=2,column=1,sticky='e')
self.jfskinl1 = tk.Label(self.jfslf1,text='Interval [s]',fg="blue")
self.jfskinl1.grid(row=0,column=0,sticky='w')
self.jfskine1 = tk.Entry(self.jfslf1,textvariable=kin_delta,width=4)
self.jfskine1.grid(row=0,column=1,sticky='e')
self.jfskinl2 = tk.Label(self.jfslf1,text='Repetitions',fg="blue")
self.jfskinl2.grid(row=1,column=0,sticky='w')
self.jfspkine2 = tk.Entry(self.jfslf1,textvariable=kin_repeats,width=4)
self.jfspkine2.grid(row=1,column=1,sticky='e')
self.jfskinhp = tk.Button(self,text='?',command=lambda roots=self, helpfor=5: jfshelpme(roots,helpfor))
self.jfskinhp.grid(row=plt_row,column=5,sticky='e',padx=4)
self.jfspdmath = tk.Button(self.jfslf1,text='Math',fg="blue", command=lambda JFSphoto=Jfsphoto: self.jf.do_math(self))
self.jfspdmath.grid(row=2,column=0,sticky='w')
self.jfspdmeth = tk.Button(self,text='Methods',fg="blue", command=lambda JFSphoto=Jfsphoto: self.jf.do_methods(self))
self.jfspdmeth.grid(row=save_row,column=4,sticky='e',padx=4)
self.jfsmethhp = tk.Button(self,text='?',command=lambda roots=self, helpfor=7: jfshelpme(roots,helpfor))
self.jfsmethhp.grid(row=save_row,column=5,sticky='e',padx=4)
### Reset changes from loading phometerfile
def reset_settings(self):
print('reset settings 1')
self.jf.reset_settings()
### Callbacks for traces, buttons, etc ###
def callback(self):
self.bopen.config(state=tk.DISABLED)
return()
def ICGSHcallback(self, name, index, mode, status, tint, colr, SH, ICG):
try:
config.SHperiod = np.uint32(int(SH.get()))
config.ICGperiod = np.uint32(int(ICG.get()))
except:
print("SH or ICG not an integer")
self.print_tint = tk.StringVar()
if (config.SHperiod < 1):
config.SHperiod = 1
if (config.ICGperiod < 1):
config.ICGperiod = 1
if ((config.ICGperiod % config.SHperiod) or (config.SHperiod < 20) or (config.ICGperiod < 14776)):
status.set("CCD pulse timing violation!")
colr.configure(fg="red")
self.print_tint.set("invalid")
else:
status.set("Correct CCD pulse timing.")
colr.configure(fg="green")
if (config.SHperiod < 20000000):
self.print_tint.set(str(config.SHperiod/2000) + " ms")
elif (config.SHperiod <= 1200000000):
self.print_tint.set(str(config.SHperiod/2000000) + " s")
elif (config.SHperiod > 1200000000):
self.print_tint.set(str(round(config.SHperiod/120000000,2)) + " min")
#tint.set("Integration time is " + + " ms")
tint.set("Integration time is " + self.print_tint.get())
def modeset(self, CONTvar):
config.AVGn[0]=CONTvar.get()
def AVGcallback(self,AVGscale):
config.AVGn[1] = np.uint8(self.AVGscale.get())
def RAWcallback(self, name, index, mode, invert, CCDplot):
config.datainvert = invert.get()
if (config.datainvert == 0):
self.cbalance.config(state=tk.DISABLED)
else:
self.cbalance.config(state=tk.NORMAL)
self.updateplot(CCDplot)
def BALcallback(self, name, index, mode, balanced, CCDplot):
config.balanced = balanced.get()
self.updateplot(CCDplot)
def DEVcallback(self, name, index, mode, Device, status, colr):
config.port = Device.get()
try:
ser = serial.Serial(config.port, config.baudrate, timeout=1)
status.set("Device exist")
ser.close()
colr.configure(fg="green")
except serial.SerialException:
status.set("Device doesn't exist")
colr.configure(fg="red")
def updateplot_B(self,CCDplot):
self.jf.reset_settings()
self.updateplot(CCDplot)
def updateplot(self, CCDplot):
CCDplot.a.clear()
# Photometer Job Baseline is the Intensity of the Lamp
# baseline start and baseline end depends on the spektral range of the Lamp
# the actuel input is lower due to the absorption of the liquid
if (self.jf.get_baseline_checked()==1):
config.pltBaseData16 = self.jf.baseData16
config.pltData16 = self.jf.darkData16 - config.rxData16
for i in range(0,3694):
if (config.pltBaseData16[i] == 0):
config.pltData16[i] = 1
else:
if (self.jf.abs_trans.get()==1):
config.pltData16[i] = (config.pltData16[i] / config.pltBaseData16[i])
else:
config.pltData16[i] = np.log10(config.pltBaseData16[i] / config.pltData16[i])
#if (self.jf.abs_trans.get()==1):
# config.pltData16 = np.log10(config.pltData16)*-1
try:
up = np.max(config.pltData16)
if (self.jf.get_nm_checked()==1):
CCDplot.a.plot(self.jf.get_nm_scale(),config.pltData16,linewidth=0.6)
CCDplot.a.axis([self.jf.nm_left,self.jf.nm_right,0,up])
CCDplot.a.set_xlim([self.jf.nm_left+self.jf.baseline_start*self.jf.nm_step, self.jf.nm_left+self.jf.baseline_end*self.jf.nm_step])
CCDplot.a.set_xlabel(" [nm] ")
else:
CCDplot.a.plot(config.pltData16,linewidth=0.6)
CCDplot.a.axis([0,3694,-10,up])
CCDplot.a.set_xlim([self.jf.baseline_start, self.jf.baseline_end])
CCDplot.a.set_xlabel("Pixelnumber")
CCDplot.a.set_ylabel("Intensity")
except ValueError:
messagebox.showerror("By the great otter!","Is there's a problem with the light.\n or disable baseline checkbox")
else:
#This subtracts the ADC-pixel from ADC-dark
if (config.datainvert==1):
if (self.jf.get_darkline_checked()==1):
config.pltData16 = self.jf.darkData16 - config.rxData16
else:
config.pltData16 = (config.rxData16[10]+config.rxData16[11])/2 - config.rxData16
#This subtracts the average difference between even and odd pixels from the even pixels
if (config.balanced==1):
config.offset = (config.pltData16[18]+config.pltData16[20]+config.pltData16[22]+config.pltData16[24]-config.pltData16[19]-config.pltData16[21]-config.pltData16[23]-config.pltData16[24])/4
#print(config.offset)
for i in range (1847):
config.pltData16[2*i] = config.pltData16[2*i] - config.offset
#CCDplot.a.clear()
#plot intensities
if (config.datainvert == 1):
## make max visible
up = np.max(config.pltData16)
if (self.jf.get_nm_checked()==1):
CCDplot.a.plot(self.jf.get_nm_scale(),config.pltData16,linewidth=0.6)
CCDplot.a.axis([self.jf.nm_left,self.jf.nm_right,0,up])
CCDplot.a.set_xlabel(" [nm] ")
else:
CCDplot.a.plot(config.pltData16,linewidth=0.6)
CCDplot.a.axis([0,3694,-10,4095])
CCDplot.a.set_xlabel("Pixelnumber")
CCDplot.a.set_ylabel("Intensity")
else:
if (self.jf.get_nm_checked()==1):
CCDplot.a.plot(self.jf.get_nm_scale(),config.rxData16,linewidth=0.6)
CCDplot.a.axis([self.jf.nm_left,self.jf.nm_right,-10,4095])
CCDplot.a.set_xlabel(" [nm] ")
else:
CCDplot.a.plot(config.rxData16,linewidth=0.6)
CCDplot.a.axis([0,3694,-10,4095])
CCDplot.a.set_xlabel("Pixelnumber")
CCDplot.a.set_ylabel("ADCcount")
#plot raw data
CCDplot.canvas.draw()
```
|
{
"source": "jftaas/tegmail",
"score": 2
}
|
#### File: tegmail/tegmail/gmail.py
```python
from apiclient import errors
from apiclient.http import BatchHttpRequest
class Gmail(object):
def __init__(self, http, service):
self.connected = False
self.labels = {}
self.events = {
'on_message': []
}
self._http = http
self._service = service
self._users = service.users()
self._labels = self._get_labels()
self._start()
self.connected = True
def _start(self):
for label in self._labels:
self.labels[label['id']] = label['name']
def _get_labels(self):
try:
results = self._users.labels().list(userId='me').execute()
labels = results.get('labels', [])
except errors.HttpError:
labels = {}
return labels
def _get_message_ids(self, max_results, label_ids, page_token):
try:
if not page_token:
response = self._users.messages().list(userId='me',
labelIds=label_ids,
maxResults=max_results
).execute()
else:
response = self._users.messages().list(userId='me',
labelIds=label_ids,
maxResults=max_results,
pageToken=page_token
).execute()
return response
except (errors.HttpError, ConnectionResetError):
return None
def get_messages(self, max_results=10, request_format=None,
label_ids=[], page_token=None):
response = self._get_message_ids(max_results, label_ids, page_token)
if not response:
return []
if not request_format:
request_format = 'metadata'
messages = []
def on_get_message(request_id, response, exception):
if exception is not None:
return
messages.append(response)
batch = BatchHttpRequest(callback=on_get_message)
try:
for message in response['messages']:
# message_ids.append(message['id'])
batch.add(self._users.messages().get(id=message['id'],
userId='me',
format=request_format))
batch.execute(http=self._http)
except KeyError:
return messages
return messages
def get_message_raw(self, message_id):
response = self._users.messages().get(id=message_id,
userId='me',
format='raw').execute()
return response['raw']
def modify_message(self, message_id, removeLabelIds=[], addLabelIds=[]):
try:
body = {'addLabelIds': addLabelIds,
'removeLabelIds': removeLabelIds}
response = self._users.messages().modify(id=message_id,
userId='me',
body=body).execute()
return response
except errors.HttpError as error:
print(error)
def trash_message(self, message_id):
try:
self._users.messages().trash(id=message_id,
userId='me').execute()
except errors.HttpError as error:
print(error)
```
#### File: tegmail/tegmail/interface.py
```python
import curses
from tegmail import Event
class Interface(object):
def __init__(self):
self.on_key_event = Event()
self.menu_box = None
self.main_box = None
self.info_box = None
self._keys = {
13: 'KEY_ENTER',
22: 'KEY_BACKSPACE',
27: 'KEY_ESCAPE',
127: 'KEY_BACKSPACE',
258: 'KEY_DOWN',
259: 'KEY_UP',
260: 'KEY_LEFT',
261: 'KEY_RIGHT',
}
self._init_curses()
def _init_curses(self):
self._stdscr = curses.initscr()
curses.curs_set(0)
curses.noecho()
curses.cbreak()
curses.nonl()
self._stdscr.keypad(True)
self._stdscr.refresh()
# set custom color pairs
# TODO check COLORS for number of
# supported pairs
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, -1, -1)
curses.init_pair(2, -1, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_RED, -1)
curses.init_pair(4, curses.COLOR_GREEN, -1)
curses.init_pair(5, curses.COLOR_BLUE, -1)
curses.init_pair(6, curses.COLOR_CYAN, -1)
curses.init_pair(7, curses.COLOR_YELLOW, -1)
curses.init_pair(8, curses.COLOR_MAGENTA, -1)
self.menu_box = curses.newwin(1, curses.COLS, 0, 0)
self.main_box = curses.newwin(curses.LINES - 2, curses.COLS, 1, 0)
self.info_box = curses.newwin(1, curses.COLS, curses.LINES - 1, 0)
self.main_box.idlok(1)
self.main_box.scrollok(True)
def _exit_curses(self):
curses.curs_set(1)
curses.echo()
curses.nocbreak()
curses.nl()
self._stdscr.keypad(False)
curses.endwin()
def _format_key(self, i):
if i in self._keys:
key = self._keys[i]
else:
try:
key = chr(i)
except ValueError:
key = ''
return key
def _change_window_color(self, win, color_index):
win.bkgd(' ', curses.A_REVERSE)
win.refresh()
def update(self):
getch = self._stdscr.getch()
key = self._format_key(getch)
self.on_key_event(key)
def close(self):
self._exit_curses()
def clear(self, win=None):
if not win:
win = self.main_box
win.erase()
win.refresh()
def print_text(self, text, win=None):
if not win:
win = self.main_box
win.addstr(text)
win.refresh()
def get_cursor_pos(self, win=None):
if not win:
win = self.main_box
return win.getyx()
# move_cursor(y_direction)
# move_cursor(y, x)
def move_cursor(self, *args, **kwargs):
if len(args) == 1:
yx = self.main_box.getyx()
y = yx[0] + args[0]
x = yx[1]
elif len(args) == 2:
y = args[0]
x = args[1]
if (y < self.main_box.getbegyx()[0] - 1 or
x > self.main_box.getmaxyx()[0] - 1):
return
self.main_box.chgat(curses.color_pair(1))
self.main_box.move(y, x)
self.main_box.chgat(curses.A_REVERSE)
self.main_box.refresh()
def add_char(self, y, x, ch, win=None):
if not win:
win = self.main_box
win.addch(y, x, ch, curses.A_REVERSE)
win.move(y, 0)
win.refresh()
```
|
{
"source": "JFTavares/ePubCreator",
"score": 3
}
|
#### File: converters/docx/footnotes.py
```python
from lxml import etree
from epubcreator.converters.docx import utils
class Footnotes:
def __init__(self, file):
self._footnotesXml = etree.parse(file)
def getFootnote(self, footnoteId):
return utils.xpath(self._footnotesXml, 'w:footnote[@w:id = "{0}"]'.format(footnoteId))[0]
def getRawText(self):
return "".join(utils.xpath(self._footnotesXml, "//w:t/text()"))
```
#### File: epubcreator/epubbase/ebook_metadata.py
```python
import datetime
import math
class Metadata:
DEFAULT_TITLE = "Título"
DEFAULT_AUTHOR = "Autor"
DEFAULT_EDITOR = "Editor"
DEFAULT_LANGUAGE = "es"
DEFAULT_BOOK_ID = "0000"
DEFAULT_SYNOPSIS = ("Yo por bien tengo que cosas tan señaladas, y por ventura nunca oídas ni vistas, vengan a noticia de muchos y no se "
"entierren en la sepultura del olvido, pues podría ser que alguno que las lea halle algo que le agrade, y a los que "
"no ahondaren tanto los deleite.\n"
"Y a este propósito dice Plinio que no hay libro, por malo que sea, que no tenga alguna cosa buena; mayormente que "
"los gustos no son todos unos, mas lo que uno no come, otro se pierde por ello. "
'<span class="nosep">L<small>ÁZARO</small></span> <small>DE</small> '
'<span class="nosep">T<small>ORMES</small>.</span>')
# En el epubbase, en la dedicatoria de ejemplo, el segundo párrafo lleva la clase "salto05", por eso utilizo directamente el
# tag "p" para poder agregar la clase, ya que de otra manera me sería imposible hacerlo.
DEFAULT_DEDICATION = ("Suspiró entonces mío Cid, de pesadumbre cargado, y comenzó a hablar así, justamente mesurado: «¡Loado "
"seas, Señor, Padre que estás en lo alto! Todo esto me han urdido mis enemigos malvados».\n"
'<p class="salto05">A<small>NÓNIMO</small></p>')
DEFAULT_AUTHOR_BIOGRAPHY = ("NO<NAME>UTOR (Reikiavik, Islandia, 2013 - Terra III, 3072). Lorem ipsum dolor sit amet, consectetur "
"adipiscing elit. Nunc vel libero sed est ultrices elementum at vel lacus. Sed laoreet, velit nec congue "
"pellentesque, quam urna pretium nunc, et ultrices nulla lacus non libero.\n"
"Integer eu leo justo, vel sodales arcu. Donec posuere nunc in lectus laoreet a rhoncus enim fermentum. "
"Nunc luctus accumsan ligula eu molestie.")
DEFAULT_COVER_MODIFICATION = "Diseño"
# Las modificaciones posibles que pueden hacerse a la cubierta.
COVER_MODIFICATION_OPTIONS = (("Diseño", "Elija esta opción si se conserva la cubierta original aun con algún ajuste menor de "
"contraste y color (de la que, eso sí, es obligatorio quitar el logo o cualquier referencia "
"a la editorial). Debe introducirse el nombre del diseñador de la cubierta. Si la cubierta es "
"una creación original, debe introducirse el alias (o, si él quiere, el nombre) del creador."),
("Retoque", "Elija esta opción si la cubierta original se ha modificado significativamente. Deben "
"introducirse los nombres del diseñador original y el alias de quien la haya retocado."))
# Todos los géneros posibles.
# Es un diccionario de la siguiente forma:
# Key -> el tipo de género
# Value -> una tupla de dos elementos:
# 1 -> los géneros: un conjunto de tuplas de dos elementos:
# 1 -> el nombre del género.
# 2 -> la descripción del género.
# 2 -> los subgéneros: un conjunto de tuplas de dos elementos:
# 1 -> el nombre del subgénero.
# 2 -> la descripción del subgénero.
GENRES = {
"Ficción": (
(
("Guion", "Obra compuesta o adaptada para medios masivos (cine, televisión, radio, Internet)."),
("Novela", "Obra narrativa en prosa, extensa y compleja, de sucesos imaginados y parecidos a la realidad."),
("Poesía", "Obra lírica, usualmente en verso, con fines estético-emocionales."),
("Relato", "Obra narrativa en prosa, de menor extensión que la novela. Puede ser corta (Cuento) o mediana "
"(Relato propiamente dicho)."),
("Teatro", "Obra compuesta para ser representada en un escenario, ante público.")),
(
("Aventuras", "Narra sucesos fuera de lo común, a menudo en escenarios exóticos. Incluye Acción, Exploradores, "
"Piratas, Viajeros, Western."),
("Bélico", "Trata de campañas, batallas o guerras. Suele presentar con detalle estrategias militares (reales o "
"verosímiles)."),
("Ciencia ficción", "Explora el impacto de posibles avances científicos, tecnológicos, sociales o culturales "
"(presentes o futuros), sobre la sociedad o los individuos."),
("Didáctico", "Con clara intención de dejar una enseñanza. Incluye Fábulas, Parábolas."),
("Drama", "Narra hechos que conmueven al lector, usualmente desembocando en un final trágico. Incluye, por "
"supuesto, Tragedia."),
("Erótico", "Se relaciona directamente con la sensualidad y el sexo, presentándolos de forma implícita "
"o explícita."),
("Fantástico", "Utiliza la magia y otras formas sobrenaturales como un elemento primario del argumento, "
"la temática o el ambiente. Incluye Mitología."),
("Filosófico", "Una parte significativa de la obra se dedica a la filosofía discursiva (en temas como la "
"función y el papel de la sociedad, el propósito de la vida, la ética o la moral, el papel "
"del arte en la vida humana y el rol de la experiencia o la razón en el desarrollo del "
"conocimiento). Incluye Novela de ideas."),
("Histórico", "Ofrece una visión verosímil de una época histórica (preferiblemente lejana). Suele utilizar "
"acontecimientos verídicos aunque los personajes principales sean inventados."),
("Humor", "Usa el absurdo, en personajes o situaciones, para provocar la hilaridad. Incluye Comedia."),
("Infantil", "Dirigido a los niños. Incluye Cuentos de hadas."),
("Interactivo", "Exige una participación más activa del lector, que puede escoger varios «caminos» argumentales."),
("Intriga", "Las acciones se ejecutan con inteligencia y astucia, y ocultan acontecimientos importantes "
"para suscitar interés y tensión en el lector. Incluye Misterio, Suspenso."),
("Juvenil", "Dirigido a adolescentes y jóvenes."),
("Policial", "Su móvil principal es la resolución de un enigma, generalmente criminal, mediante procesos "
"mentales (como la deducción). Incluye Novela negra, Espionaje."),
("Psicológico", "Enfatiza la caracterización interior de sus personajes, sus motivos, circunstancias y "
"acción interna. Usa técnicas como flujo de conciencia o monólogo interior."),
("Realista", "Apela a recursos pseudo documentales, principalmente para denunciar una situación injusta (social "
"o individual). Incluye Costumbrismo, Narrativa social."),
("Romántico", "Su tema primordial es el amor y las relaciones de pareja. (No se trata del movimiento "
"Romanticismo de los siglos XVIII y XIX)."),
("Sátira", "A diferencia del mero humor, apela a la ironía y el sarcasmo, con propósito moralizador, lúdico o "
"meramente burlesco. Incluye Parodia, Picaresca."),
("Terror", "Busca provocar el espanto en el lector, frecuentemente a través de elementos paranormales. "
"Incluye Gore, Gótico, Horror, Thriller."),
("Otros", "De no ubicarse en ninguno de los subgéneros anteriores.")
)
),
"No Ficción": (
(
("Crónica", "Texto con estructura esencialmente cronológica, que suele emplear recursos literarios o periodísticos."),
("Divulgación", "Texto informativo, sin excesivo rigor metodológico, que interpreta y hace accesible el conocimiento "
"científico al público general."),
("Ensayo", "Texto que presenta un punto de vista personal y subjetivo, sin aparato documental, de manera libre y asistemática "
"y con voluntad de estilo."),
("Referencia", "Texto con rigor académico/científico y estructura sistematizada, normalmente dividida en apartados "
"o lecciones.")),
(
("Arte", "(Arquitectura, Danza, Escultura, Música, Pintura...)."),
("Autoayuda", "(Superación)."),
("Ciencias exactas", "(Lógica, Matemática...)."),
("Ciencias naturales", "(Astronomía, Biología, Geología, Geografía, Física, Química...)."),
("Ciencias sociales", "(Administración, Antropología, Arqueología, Demografía, Derecho, Economía, Educación, Política, "
"Sociología...). Excepciones, por popularidad: Historia, Psicología."),
("Comunicación", "(Cine, Diseño gráfico, Espectáculo, Fotografía, Historieta, Lingüística, Periodismo, Publicidad, "
"Televisión...)."),
("Crítica y teoría literaria", ""),
("Deportes y juegos", ""),
("Diccionarios y enciclopedias", ""),
("Espiritualidad", "(Esoterismo, Religión)."),
("Filosofía", ""),
("Historia", ""),
("Hogar", "(Bricolaje, Cocina, Decoración, Jardinería, Mascotas...)."),
("Humor", ""),
("Idiomas", ""),
("Manuales y cursos", ""),
("Memorias", "(Autobiografía, Biografía, Cartas, Diarios...)."),
("Padres e hijos", ""),
("Psicología", "(Psiquiatría...). Excepciones: Autoayuda, Sexualidad, Padres e hijos."),
("Salud y bienestar", "(Medicina, Nutrición, Terapias alternativas...)."),
("Sexualidad", ""),
("Tecnología", "(Electrónica, Industria, Informática, Telecomunicaciones...)."),
("Viajes", ""),
("Otros", "De no ubicarse en ninguno de los subgéneros anteriores.")
)
)}
def __init__(self):
self._publicationDate = None
self.title = ""
self.synopsis = ""
self.bookId = ""
self.subtitle = ""
self.editor = ""
self.originalTitle = ""
# Saga.
self.collectionName = ""
# Serie.
self.subCollectionName = ""
# Volumen.
self.collectionVolume = ""
# Una lista de Person con los autores.
self.authors = []
# Una lista de Person con los traductores.
self.translators = []
# Una lista de Person con los ilustradores.
self.ilustrators = []
# Una lista de Genre con los géneros.
self.genres = []
self.coverModification = ""
self.coverDesigner = ""
self.language = ""
self.dedication = ""
# Un objeto CoverImage.
self.coverImage = None
# Un date con la fecha de publicación en el idioma original.
@property
def publicationDate(self):
return self._publicationDate
@publicationDate.setter
def publicationDate(self, value):
if type(value) is datetime.date or value is None:
self._publicationDate = value
else:
raise ValueError("Date expected.")
@staticmethod
def convertNameToFileAsFormat(name):
"""
Convierte un nombre al formato file-as.
Ejemplo: "<NAME>" es convertido en: "<NAME>".
@param name: un string con el nombre a convertir.
@return: un string con el nombre convertido.
"""
words = name.split(" ")
if len(words) > 1:
pivot = math.ceil(len(words) / 2)
orderedName = []
words[-1] += ","
for i in range(pivot, len(words)):
orderedName.append(words[i])
for i in range(pivot):
orderedName.append(words[i])
return " ".join(orderedName)
else:
return name
class Person:
MALE_GENDER = 0
FEMALE_GENDER = 1
def __init__(self, name, fileAs, gender=MALE_GENDER, image=None, biography=None):
self.name = name
self.fileAs = fileAs
self.gender = gender
self.image = image
self.biography = biography
def clone(self):
clonedPerson = Person(self.name, self.fileAs, self.gender, None, self.biography)
if self.image:
clonedPerson.image = self.image.clone()
return clonedPerson
class Genre:
def __init__(self, genreType, genre, subGenre):
self.genreType = genreType
self.genre = genre
self.subGenre = subGenre
```
#### File: epubcreator/gui/about.py
```python
from PyQt4 import QtGui, QtCore
from epubcreator.gui.forms import about_dialog_ui
from epubcreator import version
class About(QtGui.QDialog, about_dialog_ui.Ui_Dialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.versionLabel.setText(version.VERSION)
self.descriptionLabel.setText(version.DESCRIPTION)
self.qtVersionLabel.setText(QtCore.QT_VERSION_STR)
```
#### File: epubcreator/gui/custom_widgets.py
```python
from PyQt4 import QtCore, Qt, QtGui
class ExtendedQLabel(QtGui.QLabel):
"""
Un QLabel con los siguientes añadidos:
- Emite la señal "clicked" cuando se realiza click sobre él.
- Emite la señal "entered" cuando recibe el foco del mouse.
- Emite la señal "left" cuando pierde el foco del mouse.
"""
clicked = Qt.pyqtSignal()
entered = Qt.pyqtSignal()
left = Qt.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
def mouseReleaseEvent(self, event):
self.clicked.emit()
def enterEvent(self, event):
self.entered.emit()
def leaveEvent(self, event):
self.left.emit()
class ExtendedQListWidget(QtGui.QListWidget):
"""
Un QListWidget que emite la señal "deleteKeyPressed" cuando se presiona la tecla "delete".
"""
deleteKeyPressed = Qt.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
def keyReleaseEvent(self, event):
if event.key() == QtCore.Qt.Key_Delete:
self.deleteKeyPressed.emit()
```
#### File: epubcreator/gui/metadata_tabs.py
```python
import datetime
from PyQt4 import QtGui, QtCore
from epubcreator.misc import language, gui_utils, settings_store, utils
from epubcreator.gui.forms import basic_metadata_widget_ui, additional_metadata_widget_ui
from epubcreator.epubbase import ebook_metadata, images
from epubcreator.gui import image_edit, author_data_edit
class BasicMetadata(QtGui.QWidget, basic_metadata_widget_ui.Ui_BasicMetadata):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self._extendUi()
# Un objeto CoverImage.
self._coverImage = None
# Una vez cargada una imagen en el label, no puedo recuperar el texto de información
# que tenía, por eso me lo guardo en caso de que al método setCoverImage se le pase
# None como parámetro.
self._coverImageText = self.coverImage.text()
self._populateCoverModificationOptions()
self._populateLanguages()
# Por defecto, el combobox del lenguaje muestra "español".
self.languageInput.setCurrentIndex(self.languageInput.findText(language.Language.getLanguageName("es")))
self._connectSignals()
def getTitle(self):
return self.titleInput.text().strip()
def getSubtitle(self):
return self.subtitleInput.text().strip()
def getAuthors(self):
"""
Retorna los autores ingresados por el usuario.
@return: una lista de Person.
"""
authors = []
for i in range(self.authorsList.count()):
person = self.authorsList.item(i).data(QtCore.Qt.UserRole)
authors.append(person)
return authors
def getCoverModification(self):
return self.coverModificationInput.currentText().strip()
def getCoverDesigner(self):
return self.coverDesignerInput.text().strip()
def getLanguageCode(self):
return language.Language.getLanguageCode(self.languageInput.currentText())
def getBookId(self):
return self.idInput.text().strip()
def getSynopsis(self):
return utils.removeControlCharacters(self.synopsisInput.toPlainText().strip())
def getCoverImage(self):
"""
Retorna un objeto CoverImage.
"""
return self._coverImage
def setCoverImage(self, coverImage):
"""
Setea la imagen de cubierta.
@param coverImage: un objeto CoverImage.
"""
self._coverImage = coverImage
if coverImage:
self._refreshCoverImage()
else:
self.coverImage.setText(self._coverImageText)
self.editCoverImageButton.setEnabled(False)
def _populateCoverModificationOptions(self):
for i, option in enumerate(ebook_metadata.Metadata.COVER_MODIFICATION_OPTIONS):
self.coverModificationInput.addItem(option[0])
self.coverModificationInput.setItemData(i, gui_utils.formatTextForTooltip(option[1]), QtCore.Qt.ToolTipRole)
def _populateLanguages(self):
for languageName in language.Language.getSortedLanguagesNames():
self.languageInput.addItem(languageName)
def _changeCoverImage(self):
settings = settings_store.SettingsStore()
allowedFormatsToOpen = ("*.{0}".format(f) for f in images.CoverImage.allowedFormatsToOpen(settings.allowImageProcessing))
imagesFilter = "Imágenes ({0})".format(" ".join(allowedFormatsToOpen))
imageName = QtGui.QFileDialog.getOpenFileName(self, "Seleccionar Imagen", filter=imagesFilter)
if imageName:
try:
image = images.CoverImage(imageName, allowProcessing=settings.allowImageProcessing)
except images.InvalidDimensionsError:
gui_utils.displayStdErrorDialog("La imagen de cubierta seleccionada no tiene las dimensiones requeridas, que deben ser "
"de {0}px de ancho y {1}px de alto. Si desea que la imagen se redimensione "
"automáticamente, habilite la opción para permitir el procesamiento de las imágenes desde el "
"menú Preferencias.".format(images.CoverImage.WIDTH, images.CoverImage.HEIGHT))
return
except images.MaxSizeExceededError:
gui_utils.displayStdErrorDialog("La imagen de cubierta excede el tamaño máximo permitido, que debe "
"ser menor o igual a {0} kB. Si desea que la calidad de la imagen se ajuste automáticamente "
"para reducir su tamaño, habilite la opción para permitir el procesamiento de las imágenes "
"desde el menú Preferencias.".format(images.CoverImage.MAX_SIZE_IN_BYTES // 1000))
return
except images.ProgressiveImageError:
gui_utils.displayStdErrorDialog("La imagen de cubierta no puede ser abierta porque fue guardada en modo progresivo. Guárdela de "
"manera normal y vuelva a abrirla, o habilite la opción para permitir el procesamiento de las "
"imágenes desde el menú Preferencias.")
return
self.setCoverImage(image)
if settings.allowImageProcessing:
self.editCoverImageButton.setEnabled(True)
def _refreshCoverImage(self):
pixmap = QtGui.QPixmap()
pixmap.loadFromData(self._coverImage.toBytes())
self.coverImage.setPixmap(pixmap)
def _editCoverImage(self):
clonedCoverImage = self._coverImage.clone()
if image_edit.ImageEdit(clonedCoverImage, parent=self.window()).exec() == QtGui.QDialog.Accepted:
self.setCoverImage(clonedCoverImage)
def _addAuthorToList(self):
name = self.authorInput.text().strip()
fileAs = self.authorFileAsInput.text().strip()
if name and fileAs:
# Compruebo que el nombre del autor no haya sido agregado ya a la lista.
for i in range(self.authorsList.count()):
if self.authorsList.item(i).data(QtCore.Qt.UserRole).name == name:
return
item = QtGui.QListWidgetItem("{0} --> {1}".format(name, fileAs))
item.setData(QtCore.Qt.UserRole, ebook_metadata.Person(name, fileAs))
self.authorsList.addItem(item)
self.authorInput.clear()
self.authorFileAsInput.clear()
def _removeSelectedAuthorFromList(self):
if self.authorsList.currentItem() is not None:
self.authorsList.takeItem(self.authorsList.row(self.authorsList.currentItem()))
def _editAuthorData(self, author):
clonedAuthor = author.clone()
canChooseGender = len(self.getAuthors()) == 1
if author_data_edit.AuthorDataEdit(clonedAuthor, canChooseGender=canChooseGender, parent=self).exec() == QtGui.QDialog.Accepted:
author.gender = clonedAuthor.gender
author.image = clonedAuthor.image
author.biography = clonedAuthor.biography
def _populateCurrentAuthorData(self, selectedAuthor):
if selectedAuthor is None:
return
person = selectedAuthor.data(QtCore.Qt.UserRole)
self.authorInput.setText(person.name)
self.authorFileAsInput.setText(person.fileAs)
def _updateAuthorFileAs(self, authorName):
self.authorFileAsInput.setText(ebook_metadata.Metadata.convertNameToFileAsFormat(authorName.strip()))
def _showContextMenuForAuthorsList(self, pos):
selectedItem = self.authorsList.itemAt(pos)
if selectedItem is not None:
selectedAuthor = selectedItem.data(QtCore.Qt.UserRole)
action = QtGui.QAction("Editar Autor", self.authorsList)
action.triggered.connect(lambda: self._editAuthorData(selectedAuthor))
menu = QtGui.QMenu()
menu.addAction(action)
menu.exec(self.authorsList.mapToGlobal(pos))
def _extendUi(self):
# En el campo id del libro solo pueden ingresar números.
self.idInput.setValidator(QtGui.QIntValidator(self))
self.authorsList.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
def _connectSignals(self):
self.coverImage.clicked.connect(self._changeCoverImage)
self.addAuthorButton.clicked.connect(self._addAuthorToList)
self.authorsList.deleteKeyPressed.connect(self._removeSelectedAuthorFromList)
self.authorsList.currentItemChanged.connect(self._populateCurrentAuthorData)
self.authorsList.itemDoubleClicked.connect(lambda item: self._editAuthorData(item.data(QtCore.Qt.UserRole)))
self.authorsList.customContextMenuRequested.connect(self._showContextMenuForAuthorsList)
self.authorInput.textChanged.connect(self._updateAuthorFileAs)
self.authorInput.returnPressed.connect(self._addAuthorToList)
self.authorFileAsInput.returnPressed.connect(self._addAuthorToList)
self.editCoverImageButton.clicked.connect(self._editCoverImage)
class AdditionalMetadata(QtGui.QWidget, additional_metadata_widget_ui.Ui_AdditionalMetadata):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self._populateGenreTypes()
self._populateGenresAndSubGenres()
self.collectionVolumeInput.setEnabled(False)
self._connectSignals()
def getOriginalTitle(self):
return self.originalTitleInput.text().strip()
def getPublicationDate(self):
"""
Retorna la fecha de publicación.
@return: un date.
@raise: ValidationException, si la fecha no tiene un formato válido.
"""
if self.publicationDateInput.text() != "--":
day, month, year = self.publicationDateInput.text().split("-")
if not day:
day = "01"
if not month:
month = "01"
self.publicationDateInput.setText("{0}-{1}-{2}".format(day, month, year))
try:
return datetime.datetime.strptime(self.publicationDateInput.text().strip(), "%d-%m-%Y").date()
except ValueError:
raise ValidationException("Fecha de publicación no válida",
"El formato de la fecha de publicación debe ser: dd-mm-aaaa. Si no conoce el día o mes exacto, déjelo en "
"blanco, que automáticamente el campo se autocompleta a 01-01-aaaa al momento de generar el epub.",
self, self.publicationDateInput)
def getDedication(self):
return utils.removeControlCharacters(self.dedicationInput.toPlainText().strip())
def getTranslators(self):
"""
Retorna los traductores ingresados por el usuario.
@return: una lista de Person.
"""
translators = []
for i in range(self.translatorsList.count()):
person = self.translatorsList.item(i).data(QtCore.Qt.UserRole)
translators.append(person)
return translators
def getIlustrators(self):
"""
Retorna los ilustradores ingresados por el usuario.
@return: una lista de Person.
"""
ilustrators = []
for i in range(self.ilustratorsList.count()):
person = self.ilustratorsList.item(i).data(QtCore.Qt.UserRole)
ilustrators.append(person)
return ilustrators
def getGenres(self):
"""
Retorna los géneros ingresados por el usuario.
@return: una lista de Genre.
"""
genres = []
for i in range(self.genresList.count()):
genre = self.genresList.item(i).data(QtCore.Qt.UserRole)
genres.append(genre)
return genres
def getCollection(self):
"""
Retorna la saga, serie y volumen.
@return: una tupla de strings con: saga, serie, volumen.
@raise: ValidationException, si se especificó una saga pero no una serie, o si se especificó
una serie pero no el número de volumen.
"""
collectionName = self.collectionNameInput.text().strip()
subCollectionName = self.subCollectionNameInput.text().strip()
collectionVolume = self.collectionVolumeInput.text().strip()
if collectionName and not subCollectionName:
raise ValidationException("No se especificó serie",
"Se especificó el nombre de la saga, pero no la serie.",
self,
self.subCollectionNameInput)
elif subCollectionName and not collectionVolume:
raise ValidationException("No se especificó volumen de la serie",
"Se especificó un nombre para la serie, pero no el número de volumen.",
self,
self.collectionVolumeInput)
else:
return collectionName, subCollectionName, collectionVolume
def _populateGenreTypes(self):
for genreType in ebook_metadata.Metadata.GENRES:
self.genreTypeInput.addItem(genreType)
def _populateGenresAndSubGenres(self):
selectedGenreType = self.genreTypeInput.currentText()
genres = ebook_metadata.Metadata.GENRES[selectedGenreType][0]
subGenres = ebook_metadata.Metadata.GENRES[selectedGenreType][1]
self.genreGenreInput.clear()
self.genreSubGenreInput.clear()
for i, genre in enumerate(genres):
self.genreGenreInput.addItem(genre[0])
self.genreGenreInput.setItemData(i, gui_utils.formatTextForTooltip(genre[1]), QtCore.Qt.ToolTipRole)
for i, subGenre in enumerate(subGenres):
self.genreSubGenreInput.addItem(subGenre[0])
self.genreSubGenreInput.setItemData(i, gui_utils.formatTextForTooltip(subGenre[1]), QtCore.Qt.ToolTipRole)
def _addTranslatorToList(self):
name = self.translatorInput.text().strip()
fileAs = self.translatorFileAsInput.text().strip()
if name and fileAs:
self._addPersonToList(self.translatorsList, name, fileAs)
self.translatorInput.clear()
self.translatorFileAsInput.clear()
def _removeCurrentItemFromList(self):
listWidget = self.sender()
if listWidget.currentItem() is not None:
listWidget.takeItem(listWidget.row(listWidget.currentItem()))
def _populateCurrentTranslatorData(self, selectedTranslator):
if selectedTranslator is None:
return
person = selectedTranslator.data(QtCore.Qt.UserRole)
self.translatorInput.setText(person.name)
self.translatorFileAsInput.setText(person.fileAs)
def _updateTranslatorFileAs(self, translatorName):
self.translatorFileAsInput.setText(ebook_metadata.Metadata.convertNameToFileAsFormat(translatorName.strip()))
def _addIlustratorToList(self):
name = self.ilustratorInput.text().strip()
fileAs = self.ilustratorFileAsInput.text().strip()
if name and fileAs:
self._addPersonToList(self.ilustratorsList, name, fileAs)
self.ilustratorInput.clear()
self.ilustratorFileAsInput.clear()
def _populateCurrentIlustratorData(self, selectedIlustrator):
if selectedIlustrator is None:
return
person = selectedIlustrator.data(QtCore.Qt.UserRole)
self.ilustratorInput.setText(person.name)
self.ilustratorFileAsInput.setText(person.fileAs)
def _updateIlustratorFileAs(self, ilustratorName):
self.ilustratorFileAsInput.setText(ebook_metadata.Metadata.convertNameToFileAsFormat(ilustratorName.strip()))
def _addGenreToList(self):
genreType = self.genreTypeInput.currentText()
genreGenre = self.genreGenreInput.currentText()
genreSubGenre = self.genreSubGenreInput.currentText()
item = QtGui.QListWidgetItem("{0}, {1}, {2}".format(genreType, genreGenre, genreSubGenre))
# Compruebo que el género no haya sido agregado ya a la lista.
for i in range(self.genresList.count()):
if self.genresList.item(i).text() == item.text():
return
item.setData(QtCore.Qt.UserRole, ebook_metadata.Genre(genreType, genreGenre, genreSubGenre))
self.genresList.addItem(item)
self.genresList.sortItems()
def _addPersonToList(self, listWidget, name, fileAs):
item = QtGui.QListWidgetItem("{0} --> {1}".format(name, fileAs))
# Compruebo que el nombre de la persona no haya sido agregado ya a la lista.
for i in range(listWidget.count()):
if listWidget.item(i).data(QtCore.Qt.UserRole).name == name:
return
item.setData(QtCore.Qt.UserRole, ebook_metadata.Person(name, fileAs))
listWidget.addItem(item)
def _connectSignals(self):
self.addGenreButton.clicked.connect(self._addGenreToList)
self.genreTypeInput.currentIndexChanged.connect(self._populateGenresAndSubGenres)
self.genresList.deleteKeyPressed.connect(self._removeCurrentItemFromList)
self.subCollectionNameInput.textChanged.connect(
lambda s: self.collectionVolumeInput.setEnabled(len(s.strip()) != 0))
self.addTranslatorButton.clicked.connect(self._addTranslatorToList)
self.translatorsList.deleteKeyPressed.connect(self._removeCurrentItemFromList)
self.translatorsList.currentItemChanged.connect(self._populateCurrentTranslatorData)
self.translatorInput.textChanged.connect(self._updateTranslatorFileAs)
self.translatorInput.returnPressed.connect(self._addTranslatorToList)
self.translatorFileAsInput.returnPressed.connect(self._addTranslatorToList)
self.addIlustratorButton.clicked.connect(self._addIlustratorToList)
self.ilustratorsList.deleteKeyPressed.connect(self._removeCurrentItemFromList)
self.ilustratorsList.currentItemChanged.connect(self._populateCurrentIlustratorData)
self.ilustratorInput.textChanged.connect(self._updateIlustratorFileAs)
self.ilustratorInput.returnPressed.connect(self._addIlustratorToList)
self.ilustratorFileAsInput.returnPressed.connect(self._addIlustratorToList)
class ValidationException(Exception):
def __init__(self, error, description, tab, widget):
self.error = error
self.description = description
self.tab = tab
self.widget = widget
```
#### File: epubcreator/gui/preferences.py
```python
from PyQt4 import QtGui
from epubcreator.gui.forms import preferences_dialog_ui
class Preferences(QtGui.QDialog, preferences_dialog_ui.Ui_Preferences):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.accepted.connect(self._savePreferencesAndClose)
def _savePreferencesAndClose(self):
for i in range(self.stackedPreferences.count()):
preferenceWidget = self.stackedPreferences.widget(i)
preferenceWidget.saveSettings()
```
#### File: epubcreator/misc/settings_store.py
```python
import os
from PyQt4 import QtCore, QtGui
from epubcreator.epubbase.ebook import Ebook
from epubcreator.converters.converter_factory import ConverterFactory
class SettingsStore(QtCore.QSettings):
"""
Permite guardar y recuperar las diversas opciones de configuración. Expone además
todos los atributos relacionados con las preferencias generales del usuario, que pueden
ser leídos desde cualquier parte de la aplicación. Los atributos de la clase son:
-- Un atributo por cada opción de cada converter. El nombre del atributo resulta
de concatenar el tipo de archivo sobre el que opera el converter, más el nombre
de la opción capitalizando la primer letra. Ejemplo: la opción "ignoreEmptyParagraphs"
del docx converter, se traduce en: "docxIgnoreEmptyParagraphs". Con ese nombre es como
la opción se guarda en disco, y como el consumer debe leer el atributo de la clase.
-- Un atributo por cada opción de la clase Ebook. La diferencia en el nombre del atributo
con el procedimiento descrito arriba radica en que el prefijo de cada atributo
es: "epubOutput".
-- Todas las keys del diccionario _SETTINGS.
"""
_SETTINGS_GROUP = "userPreferences"
# Lista de atributos que SettingsStore expone.
# Key = nombre de atributo.
# Value = valor por defecto.
_SETTINGS = dict(editor="",
sigilPath="",
allowImageProcessing=True)
# Agrego todas las opciones posibles de todos los converters.
_SETTINGS.update({c.FILE_TYPE + o.name[0].upper() + o.name[1:]: o.value for c in ConverterFactory.getAllConverters() for o in c.OPTIONS})
# Agrego todas las opciones posibles de la clase Ebook.
_SETTINGS.update({"epubOutput" + o.name[0].upper() + o.name[1:]: o.value for o in Ebook.OPTIONS})
def getAllSettingsForConverter(self, fileType):
"""
Retorna todas las opciones de un converter dado. Es más que nada un
método que facilita el poder pasarle a un converter todas las opciones guardadas, sin
necesidad de que el consumer tenga que realizar esto:
op1 = settings.op1
op2 = settings.op2
...
@param fileType: un string, que indica de qué converter retornar las opciones.
Ejemplo: "docx", "fb2".
@return: un diccionario.
Key: el nombre la opción.
Value: el valor almacenado de la opción.
"""
return self._getAllSettingsByPrefix(fileType)
def getAllSettingsForEbook(self):
"""
Similar al método getAllSettingsForConverter, pero para las opciones de
la clase Ebook.
"""
return self._getAllSettingsByPrefix("epubOutput")
def __getattr__(self, item):
if item not in SettingsStore._SETTINGS:
raise AttributeError("'{0}' object has no attribute '{1}'".format(self.__class__.__name__, item))
defaultValue = SettingsStore._SETTINGS[item]
return self.value("{0}/{1}".format(SettingsStore._SETTINGS_GROUP, item), defaultValue, type(defaultValue))
def __setattr__(self, key, value):
if key in SettingsStore._SETTINGS:
self.setValue("{0}/{1}".format(SettingsStore._SETTINGS_GROUP, key), value)
else:
object.__setattr__(self, key, value)
def _getAllSettingsByPrefix(self, prefix):
i = len(prefix)
return {s[i].lower() + s[i + 1:]: getattr(self, s) for s in SettingsStore._SETTINGS if s.startswith(prefix)}
def __init__(self):
iniPath = os.path.join(QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DataLocation), "epubcreator.ini")
super().__init__(iniPath, QtCore.QSettings.IniFormat)
```
#### File: epubcreator/misc/utils.py
```python
import unicodedata
import re
import difflib
import pprint
_controlChars = None
_invalidCharForFileName = None
def toFileName(s):
"""
Elimina caracteres inválidos de un string para que pueda ser utilizado como nombre de
archivo en windows y linux.
"""
global _invalidCharForFileName
if _invalidCharForFileName is None:
_invalidCharForFileName = re.compile("|".join(("\\\\", "\\/", ":", "\*", "\?", '"', "<", ">", "\|")))
temp = unicodedata.normalize("NFKD", s).encode('ASCII', 'ignore').decode()
return _invalidCharForFileName.sub("", temp)
def removeTags(s):
"""
Elimina los tags de un string.
@param s: el string del cual eliminar los tags.
@return: un string con los tags eliminados.
"""
return re.sub("<[^>]*>", "", s)
def assertXhtmlsAreEqual(xml1, xml2):
# Elimina pretty-print.
normal1 = re.sub(r"\r?\n\s*<", "<", xml1.decode()).strip()
normal2 = re.sub(r"\r?\n\s*<", "<", xml2.decode()).strip()
if normal1 != normal2:
lines1 = re.sub(r"(</\w+>)", r"\1\n", normal1).splitlines()
lines2 = re.sub(r"(</\w+>)", r"\1\n", normal2).splitlines()
dif = [d for d in list(difflib.Differ().compare(lines1, lines2)) if d.startswith("+") or d.startswith("-")]
raise AssertionError("Los xhtmls no son iguales:\n\n{0}".format(pprint.pformat(dif)))
def removeControlCharacters(s):
"""
Elimina todos los caracteres de control de un string, exceptuando \t, \r y \n.
"""
global _controlChars
if _controlChars is None:
chars = set(chr(c) for c in range(32))
for c in ((chr(c) for c in (9, 10, 13))):
chars.remove(c)
_controlChars = re.compile("|".join(chars))
return _controlChars.sub("", s)
```
#### File: pyepub/pyepubreader/toc.py
```python
import os
from lxml import etree
class Toc:
_TOC_NS = "http://www.daisy.org/z3986/2005/ncx/"
def __init__(self, toc):
self._toc = etree.parse(toc)
def getTitles(self):
def addTitles(parentNavPoint):
titles = []
childNavPoints = self._xpath(parentNavPoint, "toc:navPoint")
for navPoint in childNavPoints:
text = self._xpath(navPoint, "toc:navLabel/toc:text/text()")[0]
src = os.path.split(self._xpath(navPoint, "toc:content/@src")[0])[1]
childTitles = addTitles(navPoint)
titles.append((text, src, childTitles))
return titles
navMap = self._xpath(self._toc, "/toc:ncx/toc:navMap")[0]
return addTitles(navMap)
def _xpath(self, element, xpath):
return element.xpath(xpath, namespaces={"toc": Toc._TOC_NS})
```
#### File: pyepub/pyepubwriter/opf.py
```python
from lxml import etree
class Opf:
OPF_NS = "http://www.idpf.org/2007/opf"
DC_NS = "http://purl.org/dc/elements/1.1/"
def __init__(self):
self.metadata = Metadata()
self.manifest = Manifest()
self.spine = Spine()
self.guide = Guide()
def toXml(self):
return etree.tostring(self._generateOpf(), encoding="utf-8", xml_declaration=True, pretty_print=True)
def _generateOpf(self):
opf = etree.Element("{{{0}}}package".format(Opf.OPF_NS), {"unique-identifier": "BookId", "version": "2.0"}, nsmap={None: Opf.OPF_NS})
opf.append(self.metadata.toElement())
opf.append(self.manifest.toElement())
opf.append(self.spine.toElement())
opf.append(self.guide.toElement())
return opf
class Manifest:
def __init__(self):
self._items = []
self._items.append(_ManifestItem("toc.ncx", "ncx"))
def addItem(self, href, itemId):
self._items.append(_ManifestItem(href, itemId))
def toElement(self):
manifest = etree.Element("manifest")
for item in self._items:
manifest.append(item.toElement())
return manifest
class Spine:
def __init__(self):
self._idsRef = []
def addItemRef(self, idRef):
self._idsRef.append(idRef)
def toElement(self):
spine = etree.Element("spine", {"toc": "ncx"})
for idRef in self._idsRef:
etree.SubElement(spine, "itemref", {"idref": idRef})
return spine
class Metadata:
def __init__(self):
self._dcItems = []
self._items = []
def addTitle(self, title):
item = _MetadataDCItem("title", title)
self._dcItems.append(item)
def addLanguage(self, language):
item = _MetadataDCItem("language", language)
self._dcItems.append(item)
def addIdentifier(self, identifier):
item = _MetadataDCItem("identifier", identifier)
item.addOpfAttribute("scheme", "UUID")
item.addAttribute("id", "BookId")
self._dcItems.append(item)
def addModificationDate(self, date):
item = _MetadataDCItem("date", date)
item.addOpfAttribute("event", "modification")
self._dcItems.append(item)
def addPublisher(self, publisher):
item = _MetadataDCItem("publisher", publisher)
self._dcItems.append(item)
def addPublicationDate(self, publicationDate):
item = _MetadataDCItem("date", publicationDate.strftime("%Y-%m-%d"))
item.addOpfAttribute("event", "publication")
self._dcItems.append(item)
def addDescription(self, description):
item = _MetadataDCItem("description", description)
self._dcItems.append(item)
def addTranslator(self, translator, fileAs=""):
item = _MetadataDCItem("contributor", translator)
item.addOpfAttribute("role", "trl")
item.addOpfAttribute("file-as", fileAs if fileAs else translator)
self._dcItems.append(item)
def addAuthor(self, author, fileAs=""):
item = _MetadataDCItem("creator", author)
item.addOpfAttribute("role", "aut")
item.addOpfAttribute("file-as", fileAs if fileAs else author)
self._dcItems.append(item)
def addSubject(self, subject):
item = _MetadataDCItem("subject", subject)
self._dcItems.append(item)
def addIlustrator(self, ilustrator, fileAs=""):
item = _MetadataDCItem("contributor", ilustrator)
item.addOpfAttribute("role", "ill")
item.addOpfAttribute("file-as", fileAs if fileAs else ilustrator)
self._dcItems.append(item)
def addCustom(self, name, content):
item = _MetadataItem(name, content)
self._items.append(item)
def toElement(self):
metadata = etree.Element("metadata", nsmap={"opf": Opf.OPF_NS, "dc": Opf.DC_NS})
for dcItem in self._dcItems:
metadata.append(dcItem.toElement())
for item in self._items:
metadata.append(item.toElement())
return metadata
class Guide:
def __init__(self):
self._guide = etree.Element("guide")
def addReference(self, href, title, type):
etree.SubElement(self._guide, "reference", {"href": href, "title": title, "type": type})
def toElement(self):
return self._guide
class _ManifestItem:
_mediaTypes = {"ncx": "application/x-dtbncx+xml",
"xhtml": "application/xhtml+xml",
"css": "text/css",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"png": "image/png",
"gif": "image/gif"}
def __init__(self, href, itemId):
self._href = href
self._itemId = itemId
self._mediaType = self._getMediaType(href)
def toElement(self):
return etree.Element("item", {"href": self._href, "id": self._itemId, "media-type": self._mediaType})
def _getMediaType(self, href):
ext = href[href.rfind(".") + 1:]
return _ManifestItem._mediaTypes[ext]
class _MetadataDCItem:
def __init__(self, name, content):
self._name = name
self._content = content
self._attributes = {}
self._opfAttributes = {}
def addAttribute(self, name, value):
self._attributes[name] = value
def addOpfAttribute(self, name, value):
self._opfAttributes[name] = value
def toElement(self):
dc_ns = "{{{0}}}".format(Opf.DC_NS)
opf_ns = "{{{0}}}".format(Opf.OPF_NS)
element = etree.Element(dc_ns + self._name)
for name, value in self._attributes.items():
element.set(name, value)
for name, value in self._opfAttributes.items():
element.set(opf_ns + name, value)
element.text = self._content
return element
class _MetadataItem:
def __init__(self, name, content):
self._name = name
self._content = content
def toElement(self):
return etree.Element("meta", {"name": self._name, "content": self._content})
```
#### File: pyepub/pyepubwriter/toc.py
```python
from lxml import etree
class Toc:
_TOC_NS = "http://www.daisy.org/z3986/2005/ncx/"
def __init__(self):
self._headItems = []
self._metadataItems = []
self._navPoints = []
def addIdentifier(self, identifier):
self._headItems.append(_HeadItem("uid", identifier))
def addTitle(self, title):
self._metadataItems.append(_MetadataItem("docTitle", title))
def addNavPoint(self, ref, title):
navPoint = NavPoint(ref, title)
self._navPoints.append(navPoint)
return navPoint
def toXml(self):
toc = etree.Element("{{{0}}}ncx".format(Toc._TOC_NS), {"version": "2005-1"}, nsmap={None: Toc._TOC_NS})
# Agrego todos los playorders e ids de los navpoints.
playOrder = 1
for navPoint in self._navPoints:
playOrder = self._appendNavPointsPlayOrderAndId(navPoint, playOrder)
tocHead = etree.SubElement(toc, "head")
self._addRequiredTocHeadItems()
for headItem in self._headItems:
tocHead.append(headItem.toElement())
for metadataItem in self._metadataItems:
toc.append(metadataItem.toElement())
navMap = etree.SubElement(toc, "navMap")
for navPoint in self._navPoints:
navMap.append(navPoint.toElement())
doctypeText = '<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">'
return etree.tostring(toc, encoding="utf-8", xml_declaration=True, doctype=doctypeText, pretty_print=True)
def _appendNavPointsPlayOrderAndId(self, navPoint, startPlayOrder):
"""
Agrego los playorders e ids al navpoint pasado como parámetro, y recursivamente a todos sus navpoints
hijos también.
@param navPoint: un objeto NavPoint.
@param startPlayOrder: el número de playorder desde el cual empezar a contar.
@return: el próximo número de playorder válido.
"""
navPoint.playOrder = startPlayOrder
navPoint.navId = "navPoint-{0}".format(startPlayOrder)
nextPlayOrder = startPlayOrder + 1
for childNavPoint in navPoint.navPoints:
nextPlayOrder = self._appendNavPointsPlayOrderAndId(childNavPoint, nextPlayOrder)
return nextPlayOrder
def _addRequiredTocHeadItems(self):
self._headItems.append(_HeadItem("depth", "1"))
self._headItems.append(_HeadItem("totalPageCount", "0"))
self._headItems.append(_HeadItem("maxPageNumber", "0"))
class NavPoint:
def __init__(self, ref, title):
# Una lista de NavPoint con los navpoints hijos.
self.navPoints = []
self.ref = "Text/{0}".format(ref)
self.title = title
# En el momento de generar el epub es cuando corrijo todos los playorders e ids. No puedo ir generando
# estos valores a medida que voy agregando navpoints, porque hay muchas formas de que queden en estado
# inconsistente si no voy agregando los navpoints ordenadamente.
self.playOrder = 0
self.navId = ""
def addNavPoint(self, ref, title):
navPoint = NavPoint(ref, title)
self.navPoints.append(navPoint)
return navPoint
def toElement(self):
navPoint = etree.Element("navPoint", {"id": str(self.navId), "playOrder": str(self.playOrder)})
navLabel = etree.SubElement(navPoint, "navLabel")
textElement = etree.SubElement(navLabel, "text")
textElement.text = self.title
etree.SubElement(navPoint, "content", {"src": self.ref})
for nvPoint in self.navPoints:
navPoint.append(nvPoint.toElement())
return navPoint
class _HeadItem:
def __init__(self, name, content):
self._name = name
self._ref = content
def toElement(self):
return etree.Element("meta", {"name": "dtb:{0}".format(self._name), "content": self._ref})
class _MetadataItem:
def __init__(self, tag, content):
self._tag = tag
self._ref = content
def toElement(self):
root = etree.Element(self._tag)
textElement = etree.SubElement(root, "text")
textElement.text = self._ref
return root
```
|
{
"source": "JFtechOfficial/JFtech4Kodi",
"score": 2
}
|
#### File: JFtech4Kodi/plugin.program.hyperion-controller/default.py
```python
import xbmcgui
import xbmcplugin
import xbmcaddon
import simplejson as json
import hyperion_client
import pyxbmct
import socket
import routing
ADDON = xbmcaddon.Addon()
#ADDONNAME = ADDON.getAddonInfo('id')
plugin = routing.Plugin()
xbmcplugin.setContent(plugin.handle, 'files')
#path = os.path.dirname(os.path.realpath(__file__))
#color_path = os.path.join(xbmc.translatePath(path), "colors.json")
color_path = xbmc.translatePath(
'special://home/addons/plugin.program.hyperion-controller/colors.json')
ip = str(ADDON.getSetting('ip'))
port = ADDON.getSetting('port')
apriority = ADDON.getSetting('priority')
h = hyperion_client.hyperion_client(ip, port)
debug = ADDON.getSetting('debug')
if debug:
pass
def translate(text):
return ADDON.getLocalizedString(text).encode("utf-8")
##################################
class MyAddon(pyxbmct.AddonDialogWindow):
def __init__(self, title=''):
super(MyAddon, self).__init__(title)
self.setGeometry(400, 240, 4, 4)
self.active_RGB = [0, 0, 0]
try:
active = h.active_color("RGB")
if len(active) == 3:
self.active_RGB = active
except socket.error:
pass
self.set_sliderR()
self.set_sliderG()
self.set_sliderB()
# Connect key and mouse events for slider update feedback.
self.connectEventList([pyxbmct.ACTION_MOVE_LEFT,
pyxbmct.ACTION_MOVE_RIGHT,
pyxbmct.ACTION_MOUSE_DRAG,
pyxbmct.ACTION_MOUSE_LEFT_CLICK],
self.slider_update)
self.okButton = pyxbmct.Button('Ok')
self.placeControl(self.okButton, 3, 2)
self.connect(self.okButton, self.launch)
self.closeButton = pyxbmct.Button(translate(30014))
self.placeControl(self.closeButton, 3, 1)
self.connect(self.closeButton, self.close)
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
def set_sliderR(self):
# Slider value label
SLIDER_INIT_VALUE = self.active_RGB[0]
self.slider_valueR = pyxbmct.Label(
'[COLOR FFFF0000]' + str(SLIDER_INIT_VALUE) + '[/COLOR]', alignment=pyxbmct.ALIGN_CENTER)
self.placeControl(self.slider_valueR, 0, 1)
#
slider_caption = pyxbmct.Label(translate(30007), alignment=pyxbmct.ALIGN_CENTER)
self.placeControl(slider_caption, 0, 0)
# Slider
self.sliderR = pyxbmct.Slider()
self.placeControl(self.sliderR, 0, 2, pad_y=10, columnspan=2)
# self.slider.setPercent(SLIDER_INIT_VALUE)
self.sliderR.setInt(SLIDER_INIT_VALUE, -10, 5, 255)
def set_sliderG(self):
# Slider value label
SLIDER_INIT_VALUE = self.active_RGB[1]
self.slider_valueG = pyxbmct.Label(
'[COLOR FF00FF00]' + str(SLIDER_INIT_VALUE) + '[/COLOR]', alignment=pyxbmct.ALIGN_CENTER)
self.placeControl(self.slider_valueG, 1, 1)
#
slider_caption = pyxbmct.Label(translate(30008), alignment=pyxbmct.ALIGN_CENTER)
self.placeControl(slider_caption, 1, 0)
# Slider
self.sliderG = pyxbmct.Slider()
self.placeControl(self.sliderG, 1, 2, pad_y=10, columnspan=2)
# self.slider.setPercent(SLIDER_INIT_VALUE)
self.sliderG.setInt(SLIDER_INIT_VALUE, -10, 5, 255)
def set_sliderB(self):
# Slider value label
SLIDER_INIT_VALUE = self.active_RGB[2]
self.slider_valueB = pyxbmct.Label(
'[COLOR FF0000FF]' + str(SLIDER_INIT_VALUE) + '[/COLOR]', alignment=pyxbmct.ALIGN_CENTER)
self.placeControl(self.slider_valueB, 2, 1)
#
slider_caption = pyxbmct.Label(translate(30009), alignment=pyxbmct.ALIGN_CENTER)
self.placeControl(slider_caption, 2, 0)
# Slider
self.sliderB = pyxbmct.Slider()
self.placeControl(self.sliderB, 2, 2, pad_y=10, columnspan=2)
# self.slider.setPercent(SLIDER_INIT_VALUE)
self.sliderB.setInt(SLIDER_INIT_VALUE, -10, 5, 255)
def launch(self):
red = self.sliderR.getInt()
green = self.sliderG.getInt()
blue = self.sliderB.getInt()
try:
global h
h.set_RGBcolor(red=red, green=green, blue=blue, priority=apriority)
line = translate(30010) + str(red) + ', ' + str(green) + \
', ' + str(blue) + ' on Hyperion ' + ip
xbmcgui.Dialog().notification(translate(30003), line)
h.close_connection()
except socket.error:
xbmcgui.Dialog().notification(translate(30012), translate(30013), xbmcgui.NOTIFICATION_ERROR)
self.close()
def set_navigation(self):
# Set navigation between controls
self.sliderG.controlUp(self.sliderR)
self.sliderR.controlDown(self.sliderG)
self.sliderB.controlUp(self.sliderG)
self.sliderG.controlDown(self.sliderB)
self.okButton.controlDown(self.sliderB)
self.sliderB.controlDown(self.okButton)
self.okButton.controlUp(self.sliderB)
self.okButton.controlLeft(self.closeButton)
self.closeButton.controlRight(self.okButton)
# Set initial focus
self.setFocus(self.sliderR)
def slider_update(self):
# Update slider value label when the slider nib moves
try:
if self.getFocus() == self.sliderR:
n = self.sliderR.getInt()
if n < 0:
n = 0
self.slider_valueR.setLabel('[COLOR FFFF0000]' + str(n) + '[/COLOR]')
elif self.getFocus() == self.sliderG:
n = self.sliderG.getInt()
if n < 0:
n = 0
self.slider_valueG.setLabel('[COLOR FF00FF00]' + str(n) + '[/COLOR]')
elif self.getFocus() == self.sliderB:
n = self.sliderB.getInt()
if n < 0:
n = 0
self.slider_valueB.setLabel('[COLOR FF0000FF]' + str(n) + '[/COLOR]')
except (RuntimeError, SystemError):
pass
def setAnimation(self, control):
# Set fade animation for all add-on window controls
control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=200',),
('WindowClose', 'effect=fade start=100 end=0 time=200',)])
##################################
@plugin.route('/')
def index():
li = xbmcgui.ListItem(translate(30000))
# 'http://www.weareclear.co.uk/wp-content/uploads/2017/12/logo.png'
image = 'https://i.imgur.com/WowKBZT.png'
li.setArt({'thumb': image,
'icon': image})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(
clear), listitem=li, isFolder=False)
li = xbmcgui.ListItem(translate(30001))
# 'https://teetribe.eu/wp-content/uploads/2018/05/RGB-Red-Green-Blue.png'
image = 'https://imgur.com/2k0E5R1.png'
li.setArt({'thumb': image,
'icon': image,
'fanart': image})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(colors),
listitem=li, isFolder=True)
li = xbmcgui.ListItem(translate(30002))
image = 'https://png.icons8.com/color/1600/color-wheel-2.png'
li.setArt({'thumb': image,
'icon': image,
'fanart': image})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(effects),
listitem=li, isFolder=True)
li = xbmcgui.ListItem(translate(30003))
image = 'https://imgur.com/DdRsSe9.png'
# 'https://sites.google.com/site/makecolorsimages/sliders-RGB_512x512.png'
li.setArt({'thumb': image,
'icon': image})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(RGB_sliders), listitem=li)
li = xbmcgui.ListItem(translate(30004))
image = 'https://cdn4.iconfinder.com/data/icons/meBaze-Freebies/512/setting.png'
li.setArt({'thumb': image,
'icon': image})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(
settings), listitem=li, isFolder=False)
li = xbmcgui.ListItem(translate(30005))
image = 'https://chart.googleapis.com/chart?cht=qr&chl=https%3A%2F%2Fko-fi.com%2FY8Y0FW3V&chs=180x180&choe=UTF-8&chld=L|2'
li.setArt({'thumb': image,
'icon': image})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(
donate), listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route('/clear')
def clear():
try:
h.clear_all()
xbmcgui.Dialog().notification(translate(30000), translate(30006))
h.close_connection()
except socket.error:
xbmcgui.Dialog().notification(translate(30012), translate(30013), xbmcgui.NOTIFICATION_ERROR)
@plugin.route('/colors')
def colors():
with open(color_path) as f:
colornames = json.load(f)
for color in colornames:
hexColor = '%02x%02x%02x' % tuple(colornames[color])
img = 'https://dummyimage.com/100x100/' + hexColor + '/' + hexColor + '.jpg'
li = xbmcgui.ListItem(color)
li.setArt({'thumb': img,
'icon': img,
'fanart': img})
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(
color_launcher, color), listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route('/colors/<color>')
def color_launcher(color):
with open(color_path) as f:
colornames = json.load(f)
try:
h.set_RGBcolor(red=colornames[color][0], green=colornames[color]
[1], blue=colornames[color][2], priority=apriority)
line = translate(30010) + color + translate(30011) + ip
xbmcgui.Dialog().notification(translate(30001), line)
h.close_connection()
except socket.error:
xbmcgui.Dialog().notification(translate(30012), translate(30013), xbmcgui.NOTIFICATION_ERROR)
@plugin.route('/effects')
def effects():
try:
effectnames = h.effects_names()
for effect in effectnames:
li = xbmcgui.ListItem(effect)
xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(effect_launcher, effect), listitem=li)
except socket.error:
xbmcgui.Dialog().notification(translate(30012), translate(30013), xbmcgui.NOTIFICATION_ERROR)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route('/effects/<effect>')
def effect_launcher(effect):
try:
h.set_effect(effectName=effect, priority=apriority)
line = translate(30010) + effect + translate(30011) + ip
xbmcgui.Dialog().notification(translate(30002), line)
h.close_connection()
except socket.error:
xbmcgui.Dialog().notification(translate(30012), translate(30013), xbmcgui.NOTIFICATION_ERROR)
@plugin.route('/RGB_sliders')
def RGB_sliders():
window = MyAddon(translate(30003))
window.doModal()
# Destroy the instance explicitly because
# underlying xbmcgui classes are not garbage-collected on exit.
del window
@plugin.route('/settings')
def settings():
ADDON.openSettings()
@plugin.route('/donate')
def donate():
pass
if __name__ == '__main__':
plugin.run()
```
|
{
"source": "jfterpstra/onepercentclub-site",
"score": 2
}
|
#### File: core/tests/test_api.py
```python
from decimal import Decimal
from mock import patch
from bunch import bunchify
from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from rest_framework import status
from bluebottle.bb_donations.tests.test_api import DonationApiTestCase
from bluebottle.bb_orders.views import ManageOrderDetail
from bluebottle.test.factory_models.projects import ProjectFactory
from bluebottle.test.factory_models.orders import OrderFactory
from bluebottle.test.factory_models.donations import DonationFactory
from bluebottle.test.factory_models.fundraisers import FundRaiserFactory
@patch.object(ManageOrderDetail, 'check_status_psp')
class TestDonationList(DonationApiTestCase):
"""
Test that the fundraiser donations list only works for the fundraiser owner
"""
def setUp(self):
super(TestDonationList, self).setUp()
# Make user 1 a staff user
self.user1.is_staff = True
self.user1.save()
# Create a target project/fundraiser
self.project = ProjectFactory.create(amount_asked=5000, owner=self.user1)
self.project.set_status('campaign')
self.fundraiser = FundRaiserFactory.create(amount=4000, owner=self.user1,
project=self.project)
# Two users make a donations
order1 = OrderFactory.create(user=self.user1)
self.donation1 = DonationFactory.create(amount=15, project=self.project,
fundraiser=self.fundraiser, order=order1)
order1.locked()
order1.succeeded()
# Create the second without fundraiser
order2 = OrderFactory.create(user=self.user2)
self.donation2 = DonationFactory.create(amount=10,
project=self.project, fundraiser=None, order=order2)
order2.locked()
order2.succeeded()
self.fundraiser_donation_list_url = reverse('fund-ticker-list')
def test_latest_donation_list(self, check_status_psp):
user_token = Token.objects.create(user=self.user1)
response = self.client.get(self.fundraiser_donation_list_url,
HTTP_AUTHORIZATION="Token {0}".format(user_token))
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(len(response.data['results']), 2)
# Second donation (first in list) without fundraiser
data1 = bunchify(response.data['results'][0])
self.assertEqual(data1.id, self.donation2.id)
self.assertEqual(data1.amount, Decimal('10'))
self.assertEqual(data1.project.title, self.project.title)
self.assertTrue(data1.project.country.name)
self.assertEqual(data1.user.full_name, self.user2.full_name)
self.assertEqual(data1.project.image, '')
self.assertEqual(data1.project.owner.avatar, '')
# First donation without fundraiser
data2 = bunchify(response.data['results'][1])
self.assertEqual(data2['amount'], Decimal('15'))
self.assertEqual(data2['fundraiser'], self.fundraiser.id)
```
#### File: apps/cowry/views.py
```python
from rest_framework import generics
from rest_framework import response
from rest_framework import status
from . import payments
from .exceptions import PaymentException
from .models import Payment
from .permissions import IsOrderCreator
from rest_framework.permissions import AllowAny
from .serializers import PaymentSerializer
from rest_framework import exceptions
class PaymentDetail(generics.RetrieveUpdateDestroyAPIView):
"""
View for working with Payments. Payments can be retrieved (GET), the payment method and submethod can updated (PUT)
and a payment can be cancelled (DELETE).
"""
model = Payment
serializer_class = PaymentSerializer
permission_classes = (IsOrderCreator, )
def get(self, request, *args, **kwargs):
# Catch NotAuthenticated exceptions so anonymous users get the proper response.
try:
return super(PaymentDetail, self).get(request, *args, **kwargs)
except exceptions.NotAuthenticated:
raise exceptions.PermissionDenied()
def destroy(self, request, *args, **kwargs):
payment = self.get_object()
try:
payments.cancel_payment(payment)
except (NotImplementedError, PaymentException) as e:
return response.Response(data=e, status=status.HTTP_400_BAD_REQUEST)
else:
return response.Response(status=status.HTTP_202_ACCEPTED)
```
#### File: apps/crawlable/middleware.py
```python
import logging
from bluebottle.fundraisers.models import FundRaiser
from apps.projects.models import Project
from apps.tasks.models import Task
from django.http.response import HttpResponsePermanentRedirect
from django.template.response import SimpleTemplateResponse
import re
import time
import os
import urllib
import urlparse
import tempfile
from django.http import HttpResponse, HttpResponseServerError
from django.conf import settings
from django.utils import html as html_utils
from django.core import cache
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.common.utils import is_connectable
from selenium.webdriver.phantomjs.webdriver import WebDriver
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
logger = logging.getLogger(__name__)
HASHBANG = '#!'
ESCAPED_FRAGMENT = '_escaped_fragment_'
CACHE_PREFIX = '_share_'
class DedicatedWebDriver(RemoteWebDriver):
"""
Wrapper to communicate with a dedicated PhantomJS through Ghostdriver.
If you have a phantomjs instance running at all times, you can use this dedicated webdriver to communicate with it.
"""
def __init__(self, port=None, desired_capabilities=DesiredCapabilities.PHANTOMJS):
if port is None:
port = 8910
class DummyService():
"""Dummy service to accept the same calls as the PhantomJS webdriver."""
def __init__(self, port):
self.port = port
@property
def service_url(self):
return 'http://localhost:%d/wd/hub' % port
def stop(self, *args, **kwargs):
pass
self.service = DummyService(port)
# Start the remote web driver.
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
except:
self.quit()
raise
self._is_remote = False
class WebCache(object):
"""
Class to make sure the web driver is lazily loaded. For regular requests, the driver should not be instantiated
because it significantly slows down the request/response cycle (it can easily take 10 seconds to start).
"""
_web_driver = None
def __init__(self):
if hasattr(settings, 'CRAWLABLE_PHANTOMJS_ARGS') and settings.CRAWLABLE_PHANTOMJS_ARGS:
service_args = settings.CRAWLABLE_PHANTOMJS_ARGS[:]
else:
service_args = [
'--load-images=false',
'--disk-cache=true',
'--local-storage-path=%s' % os.path.join(tempfile.gettempdir(), 'phantomjs')
]
self.service_args = service_args
def get_driver(self):
"""
Only creates the driver if not present and returns it.
:return: ``WebDriver`` instance.
"""
# Dedicated mode
if hasattr(settings, 'CRAWLABLE_PHANTOMJS_DEDICATED_MODE') and settings.CRAWLABLE_PHANTOMJS_DEDICATED_MODE:
if not self._web_driver:
self._web_driver = DedicatedWebDriver(
port=getattr(settings, 'CRAWLABLE_PHANTOMJS_DEDICATED_PORT', 8910)
)
elif not is_connectable(self._web_driver.service.port):
raise RuntimeError('Cannot connect to dedicated PhantomJS instance on: %s' %
self._web_driver.service.service_url)
# Instance based mode (more for testing purposes). When debugging, you can even replace the PhantomJS webdriver
# with firefox and remove the arguments to the web driver constructor below.
else:
if not self._web_driver:
self._web_driver = WebDriver(service_args=self.service_args)
elif not is_connectable(self._web_driver.service.port):
self._web_driver.service.stop()
self._web_driver = WebDriver(service_args=self.service_args)
# Make sure it doesn't time out.
self._web_driver.set_script_timeout(30)
return self._web_driver
# Create a single instance per process.
web_cache = WebCache()
class HashbangMiddleware(object):
"""
Middleware that catches requests with escaped fragments, like: http://example.com/?_escaped_fragment_=/projects
These special cases are most likely requested by search engines that detected hashbangs (#!) in the URL. If such a
request is made, the dynamic content is generated in the background, and the generated page source is served to the
search engine.
"""
def process_request(self, request):
if request.method == 'GET' and ESCAPED_FRAGMENT in request.GET:
original_url = request.build_absolute_uri()
parsed_url = urlparse.urlparse(original_url)
# Update URL with hashbang.
query = dict(urlparse.parse_qsl(parsed_url.query))
path = ''.join([parsed_url.path, HASHBANG, query.get(ESCAPED_FRAGMENT, '')])
# See if it's a page we now so that we can sent it back quickly.
route = parsed_url.query.replace('%2F', '/').split('/')
# Project page
if route[1] == 'projects' and len(route) > 2:
slug = route[2]
# strip query string
slug = slug.split('?')[0]
if slug != slug.lower():
return HttpResponsePermanentRedirect(original_url.lower())
try:
project = Project.objects.get(slug=slug)
return SimpleTemplateResponse(template='crawlable/project.html', context={'project': project})
except Project.DoesNotExist:
url = ''.join([parsed_url.path, '?', ESCAPED_FRAGMENT, '=', '/projects'])
return HttpResponsePermanentRedirect(url)
if route[1] == 'projects' and len(route) == 2:
projects = Project.objects.order_by('popularity').all()[:10]
url = ''.join([parsed_url.path, HASHBANG, '/projects'])
return SimpleTemplateResponse(template='crawlable/project_list.html',
context={'projects': projects, 'url': url})
# Task page
if route[1] == 'tasks' and len(route) > 2:
task_id = route[2].split('?')[0]
task = Task.objects.get(id=task_id)
return SimpleTemplateResponse(template='crawlable/task.html', context={'task': task})
# FundRaiser page
if route[1] == 'fundraisers' and len(route) > 2:
fundraiser_id = route[2].split('?')[0]
fundraiser = FundRaiser.objects.get(id=fundraiser_id)
return SimpleTemplateResponse(template='crawlable/fundraiser.html', context={'fundraiser': fundraiser})
# Update query string by removing the escaped fragment.
if ESCAPED_FRAGMENT in query:
del query[ESCAPED_FRAGMENT]
query = urllib.urlencode(query)
# Build new absolute URL.
# NOTE: Django behind a certain web/WSGI-server configuration cannot determine if a request was made using
# HTTPS or HTTP. We consult a special setting for that.
absolute_url = urlparse.urlunparse([
'https' if settings.CRAWLABLE_FORCE_HTTPS else parsed_url.scheme,
parsed_url.netloc,
path,
parsed_url.params,
query,
parsed_url.fragment
])
try:
driver = web_cache.get_driver()
logger.debug('Generating flat content from "%s" for "%s"%s.', absolute_url, original_url,
' (forced HTTPS)' if settings.CRAWLABLE_FORCE_HTTPS else '')
driver.get(absolute_url)
# TODO: This should be replaced with something smart that waits for a certain trigger that all JS
# is done.
time.sleep(3)
content = driver.page_source
# Remove all javascript, since its mostly useless now.
script_tags_template = re.compile(r'<script([^/]*/>|(\s+[^>]*><\/script>))', re.U)
content = script_tags_template.sub('', content)
cache.cache.set(CACHE_PREFIX+query,content)
except Exception, e:
if cache.cache.has_key(CACHE_PREFIX+query):
content = cache.cache.get(CACHE_PREFIX+query)
else:
logger.error('There was an error rendering "%s" for "%s" with the web driver: %s', absolute_url, original_url, e)
return HttpResponseServerError()
return HttpResponse(content=content)
return None
```
#### File: apps/csvimport/forms.py
```python
import unicodecsv as csv
import cStringIO as StringIO
import codecs
import itertools
from django import forms
from django.utils.translation import ugettext_lazy as _
from .utils.common import has_duplicate_items
class CSVImportForm(forms.Form):
"""
Form used for importing and uploading of CSV-file.
This form uses a dictionary `import_field_mapping` to be defined in
the admin class, specifying how CSV fields should be mapped to model
fields. CSV fields in `import_field_mapping` can be identified either by
column number or by the column header (if present).
Example::
class MyImportingAdmin(IncrementalCSVImportMixin, admin.ModelAdmin):
import_field_mapping = {
0: 'sender_account',
1: 'currency',
2: 'interest_date',
3: 'credit_debet',
4: 'amount',
5: 'counter_account',
6: 'counter_name',
7: 'book_date',
8: 'book_code',
9: 'filler',
10: 'description1',
11: 'description2',
12: 'description3',
13: 'description4',
14: 'description5',
15: 'description6',
16: 'end_to_end_id',
17: 'id_recipient',
18: 'mandate_id'
}
"""
csv_file = forms.FileField(label=_('CSV file'))
charset = 'utf-8'
delimiters = ';\t,'
# Import dialect; overrides dialect detection when specified
dialect = None
def __init__(self, *args, **kwargs):
""" Get initialization properties from kwargs. """
assert hasattr(self, 'field_mapping'), \
'No field mapping defined.'
assert isinstance(self.field_mapping, dict), \
'Field mapping is not a dictionary'
self.model = kwargs.pop('model', None)
super(CSVImportForm, self).__init__(*args, **kwargs)
def pre_save(self, instance):
"""
This method is called with model instance before saving.
By default, it does nothing but can be subclassed.
"""
pass
def skip_instance(self, instance):
"""
Wheter or not to skip an instance on import.
Called after pre_save().
Returns True or False.
By default, it does not skip any entries.
"""
return False
def validate_csv(self, reader):
"""
Generic validator for CSV contents, run during clean stage.
Takes reader as parameter and raises ValidationError when
CSV cannot be validated.
By default, it does nothing but can be subclassed.
"""
pass
def clean_csv_file(self):
csv_file = self.cleaned_data['csv_file']
# Universal newlines
# Ugly hack - but works for now
csv_string = '\n'.join(csv_file.read().splitlines())
csv_file = StringIO.StringIO(csv_string)
# TODO: Use chardet
# Ref: https://github.com/dokterbob/django-newsletter/blob/master/newsletter/admin_forms.py#L86
sniffer = csv.Sniffer()
# Python's CSV code eats only UTF-8
csv_file = codecs.EncodedFile(csv_file, self.charset)
try:
if self.dialect:
# Override dialect, don't autodetect
dialect = self.dialect
else:
# Sniff dialect
dialect = sniffer.sniff(
csv_string,
delimiters=self.delimiters
)
# Sniff for a header
has_header = sniffer.has_header(
csv_string
)
except csv.Error, e:
raise forms.ValidationError(
_('Could not read CSV file: %s' % e.message)
)
# Read CSV file
reader = csv.reader(csv_file,
dialect=dialect, encoding=self.charset
)
if has_header:
# Update mapping using header
header = reader.next()
for (key, value) in self.field_mapping.items():
if isinstance(key, basestring):
# Key is string, derive number using header
try:
header_index = header.index(key)
except ValueError:
error_message = 'Field %s not found in CSV header.'
# Try again with outer spaces removed, and everything
# lowercased - but only when no duplicates result
header = [f.strip().lower() for f in header]
new_key = key.lower()
if not has_duplicate_items(header):
try:
header_index = header.index(new_key)
except ValueError:
raise Exception(error_message % new_key)
else:
raise Exception(error_message % key)
self.field_mapping[header_index] = value
# Field found, remove from field mapping
del self.field_mapping[key]
# Split the iterator such that we can validate
(reader, validate_fieldcount, validate_csv) = itertools.tee(reader, 3)
# Validate field count
validation_row = validate_fieldcount.next()
if len(self.field_mapping) > len(validation_row):
raise forms.ValidationError(
'Less fields in CSV (%d) than specified in field mapping (%d).' % (
len(validation_row), len(self.field_mapping)
)
)
# Validate CSV
if self.validate_csv:
self.validate_csv(validate_csv)
self.cleaned_data['csv_reader'] = reader
return csv_file
def save(self):
""" Write results of CSV reader to database according to mapping. """
new_records = 0
ignored_records = 0
for row in self.cleaned_data['csv_reader']:
init_args = {}
for (index, field_name) in self.field_mapping.items():
init_args[field_name] = row[index]
instance = self.model(**init_args)
# Further processing before saving
self.pre_save(instance)
if self.skip_instance(instance):
# Ignore
ignored_records += 1
else:
# Save
instance.save()
new_records += 1
return (new_records, ignored_records)
```
#### File: csvimport/utils/admin.py
```python
from django.http import Http404
from functools import update_wrapper
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin.util import unquote
from django.utils.encoding import force_unicode
class ExtendibleModelAdminMixin(object):
def _getobj(self, request, object_id):
opts = self.model._meta
try:
obj = self.queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to
# be able to determine whether a given object exists.
obj = None
if obj is None:
raise Http404(
_(
'%(name)s object with primary key '
'%(key)r does not exist.'
) % {
'name': force_unicode(opts.verbose_name),
'key': unicode(object_id)
}
)
return obj
def _wrap(self, view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
def _view_name(self, name):
info = self.model._meta.app_label, self.model._meta.module_name, name
return '%s_%s_%s' % info
```
#### File: donations/tests/disabled_unit.py
```python
from onepercentclub.tests.factory_models.donation_factories import DonationFactory
from onepercentclub.tests.factory_models.fundraiser_factories import FundRaiserFactory
import re
from django.test import TestCase
from django.core import mail
from apps.fund.models import Donation, DonationStatuses
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory
class DonationMailTests(TestCase):
def setUp(self):
self.project_owner = BlueBottleUserFactory.create(email='<EMAIL>', primary_language='en')
self.project = OnePercentProjectFactory.create(amount_asked=50000, owner=self.project_owner)
self.user = BlueBottleUserFactory.create(first_name='Jane')
def test_mail_owner_on_new_donation(self):
donation = DonationFactory.create(user=self.user, project=self.project, donation_type=Donation.DonationTypes.one_off)
# Mailbox should not contain anything.
self.assertEqual(len(mail.outbox), 0)
donation.status = DonationStatuses.pending
donation.save()
# Owner should have a new email
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertListEqual(message.to, [self.project_owner.email])
self.assertEqual(message.subject, 'You received a new donation')
amount_string = 'EUR {0:.2f}'.format(donation.amount / 100.0)
self.assertTrue(amount_string in message.body)
for content, content_type in message.alternatives:
self.assertTrue(amount_string in content)
self.assertTrue(self.user.first_name in message.body)
def test_single_mail_on_new_donation(self):
donation = DonationFactory.create(user=self.user, project=self.project, donation_type=Donation.DonationTypes.one_off)
# Mailbox should not contain anything.
self.assertEqual(len(mail.outbox), 0)
donation.status = DonationStatuses.pending
donation.save()
# Save twice!
donation.save()
# Save with different status.
donation.status = DonationStatuses.paid
donation.save()
# Owner should have just one email
self.assertEqual(len(mail.outbox), 1)
def test_single_mail_on_new_fundraiser_donation(self):
self.fundraiser_owner = BlueBottleUserFactory.create(email='<EMAIL>', primary_language='en')
fundraiser = FundRaiserFactory.create(owner=self.fundraiser_owner, project=self.project)
donation = DonationFactory.create(user=self.user, project=self.project, donation_type=Donation.DonationTypes.one_off, fundraiser=fundraiser)
# Mailbox should not contain anything.
self.assertEqual(len(mail.outbox), 0)
donation.status = DonationStatuses.pending
donation.save()
# Save twice!
donation.save()
# Save with different status.
donation.status = DonationStatuses.paid
donation.save()
# Fundraiser owner and project owner should have just one email each
# careful, fundraiser mail is sent first
self.assertEqual(len(mail.outbox), 2)
# Verify that the link points to the fundraiser page
m = mail.outbox.pop(0)
match = re.search(r'https?://.*/go/fundraisers/(\d+)', m.body)
self.assertEqual(int(match.group(1)), fundraiser.id)
# verify that the mail is indeed directed to the fundraiser owner
self.assertIn(self.fundraiser_owner.email, m.recipients())
```
#### File: apps/fund/admin.py
```python
from apps.cowry_docdata.models import DocDataPaymentOrder, payment_method_mapping
from babel.numbers import format_currency
from bluebottle.utils.admin import export_as_csv_action, TotalAmountAdminChangeList
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.contrib.admin import SimpleListFilter
from django.contrib.admin.templatetags.admin_static import static
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from .models import Donation, Order, OrderStatuses, DonationStatuses
# http://stackoverflow.com/a/16556771
class DonationStatusFilter(SimpleListFilter):
title = _('Status')
parameter_name = 'status__exact'
default_status = DonationStatuses.paid
def lookups(self, request, model_admin):
return (('all', _('All')),) + DonationStatuses.choices
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == lookup if self.value() else lookup == self.default_status,
'query_string': cl.get_query_string({self.parameter_name: lookup}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() in DonationStatuses.values:
return queryset.filter(status=self.value())
elif self.value() is None:
return queryset.filter(status=self.default_status)
payment_method_icon_mapping = {
'iDeal': 'fund/icon-ideal.svg',
'Direct debit': 'fund/icon-direct-debit.png',
'Mastercard': 'fund/icon-mastercard.svg',
'Visa': 'fund/icon-visa.svg',
'Gift Card': 'fund/icon-gift-card.svg',
}
class DonationAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
list_display = ('created', 'ready', 'project', 'user', 'amount_override', 'status', 'type', 'payment_method_override')
list_filter = (DonationStatusFilter, 'donation_type')
ordering = ('-ready', '-updated')
raw_id_fields = ('user', 'project', 'fundraiser', 'voucher')
readonly_fields = ('view_order', 'created', 'updated', 'ready')
fields = readonly_fields + ('status', 'donation_type', 'amount', 'currency', 'user', 'project', 'fundraiser', 'voucher')
search_fields = ('user__first_name', 'user__last_name', 'user__email', 'project__title')
export_fields = ['project', 'user', 'amount', 'created', 'updated', 'ready', 'status', 'type']
actions = (export_as_csv_action(fields=export_fields), )
def get_changelist(self, request):
self.total_column = 'amount'
return TotalAmountAdminChangeList
def view_order(self, obj):
url = reverse('admin:%s_%s_change' % (obj.order._meta.app_label, obj.order._meta.module_name), args=[obj.order.id])
return "<a href='%s'>View Order</a>" % (str(url))
view_order.allow_tags = True
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100.0, obj.currency, locale=language)
amount_override.short_description = 'amount'
def type(self, obj):
recurring = obj.donation_type == Donation.DonationTypes.recurring
icon_url = static(
'fund/icon-{0}.svg'.format({True: 'recurring-donation', False: 'one-time-donation'}[recurring]))
alt_text = {True: 'Recurring', False: 'One-time'}[recurring]
return '<img alt="{0}" src="{1}" height="16px" />'.format(alt_text, icon_url)
type.allow_tags = True
type.short_description = 'type'
def payment_method_override(self, obj):
payment_method = payment_method_mapping[obj.payment_method]
if payment_method in payment_method_icon_mapping:
icon_url = static(payment_method_icon_mapping[payment_method])
return '<img src="{0}" height="16px" /> {1}'.format(icon_url, payment_method)
return payment_method
payment_method_override.allow_tags = True
payment_method_override.short_description = 'payment method'
admin.site.register(Donation, DonationAdmin)
class DocDataPaymentOrderInline(admin.TabularInline):
model = DocDataPaymentOrder
extra = 0
max_num = 0
fields = ('payment', 'amount_override', 'status',)
readonly_fields = fields
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100.0, obj.currency, locale=language)
amount_override.short_description = 'amount'
def payment(self, obj):
url = reverse('admin:%s_%s_change' % (obj._meta.app_label, obj._meta.module_name), args=[obj.id])
return "<a href='%s'>%s</a>" % (str(url), obj)
payment.allow_tags = True
# http://stackoverflow.com/a/16556771
class OrderStatusFilter(SimpleListFilter):
title = _('Status')
parameter_name = 'status__exact'
default_status = OrderStatuses.closed
def lookups(self, request, model_admin):
return (('all', _('All')),) + OrderStatuses.choices
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == lookup if self.value() else lookup == self.default_status,
'query_string': cl.get_query_string({self.parameter_name: lookup}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() in OrderStatuses.values:
return queryset.filter(status=self.value())
elif self.value() is None:
return queryset.filter(status=self.default_status)
class DonationAdminInline(admin.TabularInline):
model = Donation
extra = 0
raw_id_fields = ('project',)
fields = ('project', 'status', 'amount_override', 'amount', 'currency')
readonly_fields = ('amount_override',)
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100.0, obj.currency, locale=language)
amount_override.short_description = 'amount'
class Media:
css = {"all": ("css/admin/hide_admin_original.css",)}
# TODO Implement this when vouchers are added to the site.
#class VoucherAdminInline(admin.TabularInline):
# model = Voucher
# extra = 0
# raw_id_fields = ('sender',)
class OrderAdmin(admin.ModelAdmin):
date_hierarchy = 'updated'
list_filter = (OrderStatusFilter, 'recurring')
list_display = ('order_number', 'updated', 'closed', 'user', 'total', 'status', 'type', 'payment_status')
ordering = ('-closed', '-updated')
raw_id_fields = ('user',)
readonly_fields = ('total', 'order_number', 'created', 'updated')
fields = ('recurring',) + readonly_fields + ('user', 'status')
search_fields = ('user__first_name', 'user__last_name', 'user__email', 'order_number')
inlines = (DonationAdminInline, DocDataPaymentOrderInline,)
def total(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.total / 100.0, 'EUR', locale=language)
def type(self, obj):
icon_url = static(
'fund/icon-{0}.svg'.format({True: 'recurring-donation', False: 'one-time-donation'}[obj.recurring]))
alt_text = {True: 'Recurring', False: 'One-time'}[obj.recurring]
return '<img alt="{0}" src="{1}" height="16px" />'.format(alt_text, icon_url)
def payment_status(self, obj):
if obj.latest_payment:
return obj.latest_payment.status
return '-'
type.allow_tags = True
type.short_description = 'type'
admin.site.register(Order, OrderAdmin)
# http://stackoverflow.com/a/16556771
class ActiveFilter(SimpleListFilter):
title = _('Active')
parameter_name = 'active__exact'
active_choices = (('1', _('Yes')),
('0', _('No')),)
default = '1'
def lookups(self, request, model_admin):
return (('all', _('All')),) + self.active_choices
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == lookup if self.value() else lookup == self.default,
'query_string': cl.get_query_string({self.parameter_name: lookup}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() in ('0', '1'):
return queryset.filter(active=self.value())
elif self.value() is None:
return queryset.filter(active=self.default)
```
#### File: fund/migrations/0005_migrate_paymentlogs.py
```python
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
depends_on = (
('bluebottle.payments_logger', '0001_initial'),
('bluebottle.payments_docdata', '0002_auto__add_field_docdatapayment_customer_id__add_field_docdatapayment_e'),
)
def forwards(self, orm):
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
# Create a lookup for new docdata payments
ddps = orm['payments_docdata.DocdataPayment'].objects.all().values_list('payment_cluster_id', 'id')
dd_payments = {}
for ddp in ddps:
dd_payments[ddp[0]] = ddp[1]
count = 0
total = orm['cowry_docdata.DocDataPaymentLogEntry'].objects.count()
for i, log_entry_model in enumerate(orm['cowry_docdata.DocDataPaymentLogEntry'].objects.iterator()):
if not i % 50:
print "Processing DocdataPaymentLogEntry {0} of {1}".format(i, total)
# Fetch DocDataPaymentOrder
old_docdata_payment_order = log_entry_model.docdata_payment_order
# Fetch corresponding DocdataPayment
if old_docdata_payment_order.merchant_order_reference in dd_payments:
new_docdata_payment_id = dd_payments[old_docdata_payment_order.merchant_order_reference]
else:
count +=1
msg = "No new DocdataPayment object found for the old DocdataPaymentOrder object. DocdataPaymentOrder ID: {0} DocDataPaymentLogEntry ID: {1}".format(old_docdata_payment_order.id, log_entry_model.id)
print msg
continue
# Create new PaymentLogEntry using the old DocDataPaymentLogEntry data
payment_log_entry = orm['payments_logger.PaymentLogEntry'].objects.create(
message=log_entry_model.message,
level=log_entry_model.level,
timestamp=log_entry_model.timestamp,
payment_id=new_docdata_payment_id
)
payment_log_entry.save()
if not i % 50:
print "PaymentLogEntry {0} created".format(i)
print "PaymentLogEntries without DocdataPayment: {0}".format(count)
def backwards(self, orm):
orm['payments_logger.PaymentLogEntry'].objects.all().delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cowry.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}),
'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['fund.Order']"}),
'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPayment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'docdata_payments'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry_docdata.docdatapayment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'DocDataPaymentLogEntry'},
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log_entries'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentorder': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPaymentOrder', '_ormbases': [u'cowry.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_reference': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'})
},
u'cowry_docdata.docdatawebdirectdirectdebit': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataWebDirectDirectDebit', '_ormbases': [u'cowry_docdata.DocDataPayment']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11'}),
u'docdatapayment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry_docdata.DocDataPayment']", 'unique': 'True', 'primary_key': 'True'}),
'iban': ('django_iban.fields.IBANField', [], {'max_length': '34'})
},
u'fund.donation': {
'Meta': {'object_name': MODEL_MAP['donation']['class']},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'one_off'", 'max_length': '20', 'db_index': 'True'}),
'fundraiser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_donations'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['fundraiser']['model'])}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donations'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'old_donations'", 'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'ready': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'}),
'voucher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vouchers.Voucher']", 'null': 'True', 'blank': 'True'})
},
u'fund.order': {
'Meta': {'ordering': "('-updated',)", 'object_name': MODEL_MAP['order']['class']},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'current'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_orders'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])})
},
u'fund.recurringdirectdebitpayment': {
'Meta': {'object_name': 'RecurringDirectDebitPayment'},
'account': ('apps.fund.fields.DutchBankAccountField', [], {'max_length': '10'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'default': "''", 'max_length': '11', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'iban': ('django_iban.fields.IBANField', [], {'default': "''", 'max_length': '34', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_process': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'unique': 'True'})
},
MODEL_MAP['fundraiser']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['fundraiser']['class']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
MODEL_MAP['user']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['user']['class']},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'available_time': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skypename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
MODEL_MAP['order']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['order']['class']},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': "'100'", 'null': 'True', 'blank': 'True'}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '16', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'})
},
MODEL_MAP['organization']['model_lower']: {
'Meta': {'ordering': "['name']", 'object_name': MODEL_MAP['organization']['class']},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_bank_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_holder_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_holder_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_holder_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_line1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'payments.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '2'}),
'authorization_action': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.OrderPaymentAction']", 'unique': 'True', 'null': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'integration_data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'max_length': '5000', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_payments'", 'to': "orm['{0}']".format(MODEL_MAP['order']['model'])}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}),
'transaction_fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'})
},
u'payments.orderpaymentaction': {
'Meta': {'object_name': 'OrderPaymentAction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'payload': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'})
},
u'payments.payment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'Payment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_payment': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.OrderPayment']", 'unique': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_payments.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'started'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payments.transaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'Transaction'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Payment']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_payments.transaction_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payments_docdata.docdatadirectdebittransaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataDirectDebitTransaction', '_ormbases': [u'payments.Transaction']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
u'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
u'payments_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocdataPayment', '_ormbases': [u'payments.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'default_pm': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'ideal_issuer_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'ip_address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '5', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'payment_cluster_id': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '200'}),
'payment_cluster_key': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '200'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'total_acquirer_approved': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_acquirer_pending': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_captured': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_charged_back': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_gross_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '15', 'decimal_places': '2'}),
'total_refunded': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_registered': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'}),
'total_shopper_pending': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '15', 'decimal_places': '2'})
},
u'payments_docdata.docdatatransaction': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocdataTransaction', '_ormbases': [u'payments.Transaction']},
'authorization_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'authorization_currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'authorization_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'capture_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'capture_currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'capture_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'docdata_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
u'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payments.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
u'payments_logger.paymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'PaymentLogEntry'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['payments.Payment']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'projects.partnerorganization': {
'Meta': {'object_name': 'PartnerOrganization'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
MODEL_MAP['project']['model_lower']: {
'Meta': {'ordering': "['title']", 'object_name': MODEL_MAP['project']['class']},
'allow_overfunding': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'amount_asked': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'amount_donated': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'amount_needed': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'campaign_ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_funded': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'effects': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'for_who': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'future': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'is_campaign': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Language']", 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'mchanga_account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['organization']['model'])}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'popularity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'skip_monthly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectPhase']"}),
'story': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'utils.language': {
'Meta': {'ordering': "['language_name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'native_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vouchers.voucher': {
'Meta': {'object_name': 'Voucher'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchers'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'receiver_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sender'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'sender_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'sender_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['payments', 'payments_docdata', 'payments_logger', 'cowry_docdata', 'fund']
symmetrical = True
```
#### File: apps/fund/models.py
```python
import logging
import random
from django.utils.translation import ugettext as _
from django.conf import settings
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils import translation
from django.utils import timezone
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from django_iban.fields import IBANField, SWIFTBICField
from djchoices import DjangoChoices, ChoiceItem
from babel.numbers import format_currency
from registration.signals import user_activated
from apps.cowry_docdata.models import DocDataPaymentOrder
from apps.vouchers.models import VoucherStatuses
from .fields import DutchBankAccountField
logger = logging.getLogger(__name__)
random.seed()
class DonationStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
in_progress = ChoiceItem('in_progress', label=_("In progress"))
pending = ChoiceItem('pending', label=_("Pending"))
paid = ChoiceItem('paid', label=_("Paid"))
failed = ChoiceItem('failed', label=_("Failed"))
class ValidDonationsManager(models.Manager):
def get_queryset(self):
queryset = super(ValidDonationsManager, self).get_queryset()
return queryset.filter(status__in=(DonationStatuses.pending, DonationStatuses.paid))
class Donation(models.Model):
"""
Donation of an amount from a user to a project.
"""
class DonationTypes(DjangoChoices):
one_off = ChoiceItem('one_off', label=_("One-off"))
recurring = ChoiceItem('recurring', label=_("Recurring"))
voucher = ChoiceItem('voucher', label=_("Voucher"))
amount = models.PositiveIntegerField(_("amount (in cents)"))
currency = models.CharField(_("currency"), max_length=3, default='EUR')
# User is just a cache of the order user.
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("User"), null=True, blank=True)
project = models.ForeignKey(settings.PROJECTS_PROJECT_MODEL, verbose_name=_("Project"), related_name='old_donations')
fundraiser = models.ForeignKey('fundraisers.FundRaiser', verbose_name=_("fund raiser"), related_name='old_donations', null=True, blank=True)
status = models.CharField(_("Status"), max_length=20, choices=DonationStatuses.choices, default=DonationStatuses.new, db_index=True)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
# The timestamp the donation changed to pending or paid. This is auto-set in the save() method.
ready = models.DateTimeField(_("Ready"), blank=True, editable=False, null=True)
donation_type = models.CharField(_("Type"), max_length=20, choices=DonationTypes.choices, default=DonationTypes.one_off, db_index=True)
order = models.ForeignKey('Order', verbose_name=_("Order"), related_name='donations', null=True, blank=True)
voucher = models.ForeignKey('vouchers.Voucher', verbose_name=_("Gift card"), null=True, blank=True)
objects = models.Manager()
valid_donations = ValidDonationsManager()
@property
def payment_method(self):
""" The DocData payment method. """
if self.donation_type == self.DonationTypes.voucher:
return "Gift Card"
latest_payment = self.order.latest_payment
if latest_payment:
if getattr(latest_payment, 'docdata_payments', False):
latest_docdata_payment = latest_payment.latest_docdata_payment
if latest_docdata_payment:
return latest_docdata_payment.payment_method
return ''
class Meta:
verbose_name = _("donation")
verbose_name_plural = _("donations")
def __unicode__(self):
language = translation.get_language().split('-')[0]
if not language:
language = 'en'
return u'{0} - {1} - {2}'.format(str(self.id), self.project.title,
format_currency(self.amount / 100.0, self.currency, locale=language))
def save(self, *args, **kwargs):
# Automatically set the user and donation_type based on the order. This is required so that donations always
# have the correct user and donation_type regardless of how they are created. User is just a cache of the order
# user.
if not self.order and not self.voucher:
raise Exception("Either Order or Voucher should be set.")
if self.order:
if self.order.user != self.user:
self.user = self.order.user
if self.order.recurring and self.donation_type != self.DonationTypes.recurring:
self.donation_type = self.DonationTypes.recurring
elif not self.order.recurring and self.donation_type != self.DonationTypes.one_off:
self.donation_type = self.DonationTypes.one_off
if self.voucher:
self.donation_type = self.DonationTypes.voucher
if self.voucher.receiver:
self.user = self.voucher.receiver
if self.amount != self.voucher.amount:
self.amount = self.voucher.amount
if len(self.voucher.donation_set.all()) > 1:
raise Exception("Can't have more then one donation connected to a Voucher.")
if self.voucher.status not in [VoucherStatuses.paid, VoucherStatuses.cashed_by_proxy, VoucherStatuses.cashed]:
raise Exception("Voucher has the wrong status.")
# TODO: Move logic of changing voucher status to Voucher.
self.voucher.status = VoucherStatuses.cashed
self.voucher.save()
self.status = DonationStatuses.paid
# Set the datetime when the Donation became 'ready'. This is used for the donation time on the frontend.
if not self.ready and self.status in (DonationStatuses.pending, DonationStatuses.paid):
self.ready = timezone.now()
elif self.ready and self.status not in (DonationStatuses.pending, DonationStatuses.paid):
self.ready = None
super(Donation, self).save(*args, **kwargs)
class OrderStatuses(DjangoChoices):
current = ChoiceItem('current', label=_("Current")) # The single donation 'shopping cart' (editable).
recurring = ChoiceItem('recurring', label=_("Recurring")) # The recurring donation 'shopping cart' (editable).
closed = ChoiceItem('closed', label=_("Closed")) # Order with a paid, cancelled or failed payment (not editable).
class Order(models.Model):
"""
An order is a collection of Donations and vouchers with a connected payment.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("user"), related_name='old_orders', blank=True, null=True)
status = models.CharField(_("Status"), max_length=20, choices=OrderStatuses.choices, default=OrderStatuses.current, db_index=True)
recurring = models.BooleanField(default=False)
order_number = models.CharField(_("Order Number"), max_length=30, db_index=True, unique=True, help_text="Used to reference the Order from external systems.")
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
# The timestamp the order changed to closed. This is auto-set in the save() method.
closed = models.DateTimeField(_("Closed"), blank=True, editable=False, null=True)
@property
def latest_payment(self):
"""
Note: this might not always be the related succesful payment.
Potential fail case: user starts payment with one method, then creates
another before finishing. Does not finish second one but payment for
first one succeeds. Now this method still returns the value of the latest
initiated method.
"""
if self.payments.count() > 0:
return self.payments.order_by('-created').all()[0]
return None
@property
def first_donation(self):
if self.donations.count() > 0:
return self.donations.all()[0]
return None
@property
def total(self):
""" Calculated total for this Order. """
total = 0
for voucher in self.vouchers.all():
total += voucher.amount
for donation in self.donations.all():
total += donation.amount
return total
def __unicode__(self):
description = ''
if self.order_number:
description += self.order_number + " - "
description += "1%Club "
donations = self.donations.count()
vouchers = self.vouchers.count()
if self.recurring:
# TODO Use English / Dutch based on user primary_language.
description += "MAANDELIJKSE DONATIE"
elif donations == 0 and vouchers > 0:
if vouchers > 1:
description += _("GIFTCARDS")
else:
description += _("GIFTCARD")
description += str(self.id)
elif donations > 0 and vouchers == 0:
if donations > 1:
description += _("DONATIONS")
else:
description += _("DONATION")
else:
description += _("DONATIONS & GIFTCARDS")
description += " - " + _("THANK YOU!")
return description
class Meta:
ordering = ('-updated',)
def save(self, *args, **kwargs):
# http://stackoverflow.com/questions/2076838
if not self.order_number:
loop_num = 0
max_number = 1000000000 # 1 billion
order_number = str(random.randint(0, max_number))
while Order.objects.filter(order_number=order_number).exists():
if loop_num > 1000:
raise ValueError(_("Couldn't generate a unique order number."))
else:
order_number = str(random.randint(0, max_number))
loop_num += 1
self.order_number = order_number
# Set the datetime when the Order became 'closed'. This is used for sorting the Order in the admin.
if not self.closed and self.status == OrderStatuses.closed:
self.closed = timezone.now()
elif self.closed and self.status != OrderStatuses.closed:
self.closed = None
super(Order, self).save(*args, **kwargs)
### METADATA is rather specific here, fetching the metadata of either the fundraiser or the project itself
def get_tweet(self, **kwargs):
request = kwargs.get('request', None)
lang_code = request.LANGUAGE_CODE if request else 'en'
twitter_handle = settings.TWITTER_HANDLES.get(lang_code, settings.DEFAULT_TWITTER_HANDLE)
if self.first_donation:
if self.first_donation.fundraiser:
title = self.first_donation.fundraiser.owner.get_full_name()
else:
title = self.first_donation.project.get_fb_title()
tweet = _(u"I've just supported {title} {{URL}} via @{twitter_handle}")
return tweet.format(title=title, twitter_handle=twitter_handle)
return _(u"{{URL}} via @{twitter_handle}").format(twitter_handle=twitter_handle)
def get_share_url(self, **kwargs):
if self.first_donation:
request = kwargs.get('request')
# FIXME: Make these urls smarter. At least take language code from current user.
if self.first_donation.fundraiser:
fundraiser = self.first_donation.fundraiser
location = '/en/#!/fundraisers/{0}'.format(fundraiser.id)
else:
project = self.first_donation.project
location = '/en/#!/projects/{0}'.format(project.slug)
return request.build_absolute_uri(location)
return None
def link_anonymous_donations(sender, user, request, **kwargs):
"""
Search for anonymous donations with the same email address as this user and connect them.
"""
dd_orders = DocDataPaymentOrder.objects.filter(email=user.email).all()
from bluebottle.wallposts.models import SystemWallPost
wallposts = None
for dd_order in dd_orders:
dd_order.customer_id = user.id
dd_order.save()
dd_order.order.user = user
dd_order.order.save()
dd_order.order.donations.update(user=user)
ctype = ContentType.objects.get_for_model(Donation)
for donation_id in dd_order.order.donations.values_list('id', flat=True):
qs = SystemWallPost.objects.filter(related_type=ctype, related_id=donation_id)
if not wallposts:
wallposts = qs
else:
pass
# This causes errors...
# wallposts += qs
if wallposts:
wallposts.update(author=user)
# On account activation try to connect anonymous donations to this fails.
user_activated.connect(link_anonymous_donations)
from signals import *
from fundmail import *
```
#### File: apps/fund/serializers.py
```python
from apps.projects.serializers import ProjectPreviewSerializer
from apps.vouchers.serializers import VoucherSerializer, OrderCurrentVoucherSerializer
from bluebottle.bb_accounts.serializers import UserPreviewSerializer
from bluebottle.bluebottle_drf2.serializers import EuroField
from bluebottle.bb_projects.models import ProjectPhase
from bluebottle.utils.serializers import MetaField
from django.utils.translation import ugettext as _
from rest_framework import serializers
from .models import Donation, DonationStatuses, Order, OrderStatuses
class ProjectSupporterSerializer(serializers.ModelSerializer):
"""
For displaying donations on project and member pages.
"""
member = UserPreviewSerializer(source='user')
project = ProjectPreviewSerializer(source='project') # NOTE: is this really necessary?
date_donated = serializers.DateTimeField(source='ready')
class Meta:
model = Donation
fields = ('date_donated', 'project', 'member',)
class ProjectDonationSerializer(serializers.ModelSerializer):
member = UserPreviewSerializer(source='user')
date_donated = serializers.DateTimeField(source='ready')
amount = EuroField(source='amount')
class Meta:
model = Donation
fields = ('member', 'date_donated', 'amount',)
class DonationSerializer(serializers.ModelSerializer):
project = serializers.SlugRelatedField(source='project', slug_field='slug')
status = serializers.ChoiceField(read_only=True)
order = serializers.PrimaryKeyRelatedField()
amount = EuroField()
# TODO: Enable url field.
# This error is presented when the url field is enabled:
# Could not resolve URL for hyperlinked relationship using view name "fund-order-donation-detail". You may have
# failed to include the related model in your API, or incorrectly configured the `lookup_field` attribute on
# this field.
# url = serializers.HyperlinkedIdentityField(view_name='fund-order-donation-detail')
class Meta:
model = Donation
fields = ('id', 'project', 'amount', 'status', 'order', 'fundraiser')
def validate(self, attrs):
if self.object and self.object.status != DonationStatuses.new and attrs is not None:
raise serializers.ValidationError(_("You cannot modify a Donation that does not have status new."))
return attrs
def validate_amount(self, attrs, source):
# TODO: check requirements for fundraisers
value = attrs[source]
if self.object:
if self.object.order and self.object.order.recurring:
if value < 200:
raise serializers.ValidationError(_(u"Donations must be at least €2."))
else:
if value < 500:
raise serializers.ValidationError(_(u"Donations must be at least €5."))
else:
if value < 500:
raise serializers.ValidationError(_(u"Donations must be at least €5."))
return attrs
def validate_project(self, attrs, source):
value = attrs[source]
if value.status != ProjectPhase.objects.get(slug="campaign"):
raise serializers.ValidationError(_("You can only donate a project in the campaign phase."))
return attrs
class RecurringDonationSerializer(serializers.ModelSerializer):
project = serializers.SlugRelatedField(source='project', slug_field='slug')
status = serializers.ChoiceField(read_only=True)
order = serializers.PrimaryKeyRelatedField()
amount = EuroField()
class Meta:
model = Donation
fields = ('id', 'project', 'amount', 'status', 'order')
def validate(self, attrs):
if self.object and self.object.status != DonationStatuses.new and attrs is not None:
raise serializers.ValidationError(_("You cannot modify a Donation that does not have status new."))
return attrs
def validate_amount(self, attrs, source):
value = attrs[source]
if self.object:
if self.object.order and self.object.order.recurring:
if value < 200:
raise serializers.ValidationError(_(u"Donations must be at least €2."))
else:
if value < 500:
raise serializers.ValidationError(_(u"Donations must be at least €5."))
else:
if value < 500:
raise serializers.ValidationError(_(u"Donations must be at least €5."))
return attrs
def validate_project(self, attrs, source):
value = attrs[source]
if value.status != ProjectPhase.objects.get(slug="campaign"):
raise serializers.ValidationError(_("You can only donate a project in the campaign phase."))
return attrs
def validate_order(self, attrs, source):
order = attrs[source]
if not order.recurring:
raise serializers.ValidationError(_("Can only Recurring Donations to a Recurring Order."))
if not order.status == OrderStatuses.recurring:
raise serializers.ValidationError(_("Can only Recurring Donations to an active Recurring Order (status recurring)."))
return attrs
class NestedDonationSerializer(DonationSerializer):
order = serializers.PrimaryKeyRelatedField(read_only=True)
class OrderSerializer(serializers.ModelSerializer):
total = EuroField(read_only=True)
status = serializers.ChoiceField(read_only=True)
# If we had FKs to from the donations / vouchers to the Order this could be writable.
donations = DonationSerializer(source='donations', many=True, read_only=True)
vouchers = VoucherSerializer(source='vouchers', many=True, read_only=True)
payments = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='fund-order-detail')
# Most are not required because the link is pointing to a different page and facebook will look up the info on that page
meta_data = MetaField(
title = None,
fb_title = None,
description = None, # these are all not required because the link is pointing to a different page
keywords = None,
image_source = None,
tweet = 'get_tweet',
url = 'get_share_url',
)
def validate(self, attrs):
if self.object.status == OrderStatuses.closed and attrs is not None:
raise serializers.ValidationError(_("You cannot modify a closed Order."))
return attrs
class Meta:
model = Order
fields = ('id', 'user', 'url', 'total', 'status', 'recurring', 'donations', 'vouchers', 'payments', 'created', 'meta_data')
class RecurringOrderSerializer(serializers.ModelSerializer):
total = EuroField(read_only=True)
status = serializers.ChoiceField(read_only=True, default=OrderStatuses.recurring)
donations = DonationSerializer(source='donations', many=True, required=False)
recurring = serializers.BooleanField(read_only=True, default=True)
class Meta:
model = Order
fields = ('id', 'total', 'status', 'donations', 'created')
#
# Order 'current' overrides.
#
class OrderCurrentDonationSerializer(DonationSerializer):
url = serializers.HyperlinkedIdentityField(view_name='fund-order-current-donation-detail')
order = serializers.SerializerMethodField('get_current_order_ember_id')
def get_current_order_ember_id(self, donation):
return 'current'
class Meta:
model = Donation
fields = DonationSerializer.Meta.fields + ('url',)
class OrderCurrentSerializer(OrderSerializer):
# This is a hack to work around an issue with Ember-Data keeping the id as 'current'.
id_for_ember = serializers.IntegerField(source='id', read_only=True)
donations = OrderCurrentDonationSerializer(source='donations', many=True, read_only=True)
vouchers = OrderCurrentVoucherSerializer(source='vouchers', many=True, read_only=True)
class Meta:
model = Order
fields = OrderSerializer.Meta.fields + ('id_for_ember',)
# For showing the latest donations
class DonationInfoSerializer(serializers.ModelSerializer):
project = ProjectPreviewSerializer()
user = UserPreviewSerializer()
amount = EuroField()
class Meta:
model = Donation
fields = ('id', 'project', 'amount', 'user', 'created')
```
#### File: homepage/tests/test_api.py
```python
from datetime import timedelta
from bluebottle.bb_projects.models import ProjectPhase
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.utils import LanguageFactory
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from django.utils.text import slugify
from onepercentclub.tests.factory_models.fundraiser_factories import FundRaiserFactory
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory
from onepercentclub.tests.utils import OnePercentTestCase
from rest_framework import status
from apps.campaigns.models import Campaign
from apps.fund.models import Donation, DonationStatuses, Order
class HomepageTestCase(OnePercentTestCase):
""" Test that the homepage doesn't error out if no/a campaign is available """
def setUp(self):
self.init_projects()
# Create and activate user.
self.user = BlueBottleUserFactory.create(email='<EMAIL>', primary_language='en')
title = u'Mobile payments for everyone 2!'
language = LanguageFactory.create(code='en')
self.project = OnePercentProjectFactory.create(title=title, slug=slugify(title), amount_asked=100000, owner=self.user)
self.project.status = ProjectPhase.objects.get(slug='campaign')
self.project.is_campaign = True
self.project.money_donated = 0
self.project.language = language
self.project.save()
self.homepage_url = '/api/homepage/en'
# def test_homepage_without_campaign(self):
# response = self.client.get(self.homepage_url)
# self.assertEquals(response.status_code, status.HTTP_200_OK)
#
# self.assertEqual(None, response.data['campaign'])
#
# project = response.data['projects'][0]
# self.assertTrue(project['is_campaign'])
def test_homepage_with_campaign(self):
now = timezone.now()
start, end = now - timedelta(hours=8), now + timedelta(weeks=1)
Campaign.objects.create(start=start, end=end, title='FooBarCaMpAIgN', target=100000)
# make a donation before the campaign starts
order = Order.objects.create(user=self.user, order_number=1)
Donation.objects.create(amount=1000, user=self.user, project=self.project,
status=DonationStatuses.paid, order=order, ready=now-timedelta(days=1))
# and a couple of donations in campaign, for a total amount of 2000+3000+4000 cents = 90 euros
for i in range(1,4):
amount = (i+1)*1000
Donation.objects.create(amount=amount, user=self.user, project=self.project,
status=DonationStatuses.paid, order=order, ready=now+timedelta(days=i))
# and one after the campaign
Donation.objects.create(amount=5000, user=self.user, project=self.project,
status=DonationStatuses.paid, order=order, ready=now+timedelta(weeks=2))
self.project_with_fundraiser = OnePercentProjectFactory.create(amount_asked=50000)
self.project_with_fundraiser.is_campaign = True
self.project_with_fundraiser.save()
self.fundraiser = FundRaiserFactory.create(owner=self.user, project=self.project_with_fundraiser)
response = self.client.get(self.homepage_url)
self.assertNotEqual(None, response.data['campaign'])
self.assertEqual(response.data['campaign']['amount_donated'], '90.00')
```
#### File: management/commands/sync_mchanga.py
```python
from django.core.management.base import BaseCommand
from apps.mchanga.adapters import MchangaService
class Command(BaseCommand):
help = 'Synchronize M-Changa payments.'
def handle(self, *args, **options):
service = MchangaService()
service.sync_payments()
service.sync_fundraisers()
```
#### File: apps/mchanga/models.py
```python
from django.db import models
from django.utils.translation import ugettext as _
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
class MpesaPayment(models.Model):
@classmethod
def create_from_json(cls, pm):
from apps.projects.models import Project
payment, created = cls.objects.get_or_create(mpesa_id=pm['mmp_trid'])
if created:
try:
project = Project.objects.get(mchanga_account=pm['m-changa_acno'])
except Project.DoesNotExist:
project = None
payment.mpesa_id = pm['mmp_trid']
payment.project = project
payment.amount = pm['amount']
payment.mchanga_account = pm['m-changa_acno']
payment.mpesa_phone = pm['contributor_mobno']
payment.mpesa_name = pm['contributor_name']
payment.date = pm['payment_date']
payment.fundraiser_name = pm['fundraiser_name']
payment.save()
project = models.ForeignKey('projects.Project', null=True)
amount = models.IntegerField(null=True)
currency = models.CharField(max_length=10, blank=True)
fundraiser_name = models.CharField(max_length=100, blank=True)
mchanga_account = models.CharField(max_length=100, blank=True)
mpesa_id = models.CharField(max_length=100, blank=True)
mpesa_name = models.CharField(max_length=100, blank=True)
mpesa_phone = models.CharField(max_length=100, blank=True)
status = models.CharField(max_length=100, blank=True)
date = models.DateTimeField(null=True)
created = CreationDateTimeField(_("created"))
updated = ModificationDateTimeField(_("updated"))
class MpesaFundRaiser(models.Model):
@classmethod
def create_from_json(cls, fr):
account = fr['m-changa_acno']
fundraiser, created = cls.objects.get_or_create(account=account)
from apps.projects.models import Project
try:
project = Project.objects.get(mchanga_account=account)
except Project.DoesNotExist:
project = None
if created:
fundraiser.account = account
fundraiser.name = fr['fundraiser_name']
fundraiser.status = fr['status']
fundraiser.owner = fr['fundraiser_originator']
fundraiser.link = fr['fundraiser_statement_link']
fundraiser.project = project
fundraiser.total_amount = fr['historical_amt']
fundraiser.current_amount = fr['current_balance']
fundraiser.payments_count = fr['payments_count']
fundraiser.save()
if project:
project.update_amounts()
project = models.ForeignKey('projects.Project', null=True)
name = models.CharField(max_length=100, blank=True)
owner = models.CharField(max_length=100, blank=True)
link = models.CharField(max_length=100, blank=True)
account = models.CharField(max_length=100, blank=True)
total_amount = models.IntegerField(null=True)
current_amount = models.IntegerField(null=True)
payment_count = models.IntegerField(null=True)
status = models.CharField(max_length=10, blank=True)
created = CreationDateTimeField(_("created"))
updated = ModificationDateTimeField(_("updated"))
```
#### File: apps/payouts/admin.py
```python
from bluebottle.utils.utils import StatusDefinition
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.utils import timezone
from bluebottle.bb_payouts.admin import ProjectPayoutAdmin, OrganizationPayoutAdmin
from bluebottle.utils.model_dispatcher import get_project_payout_model, get_organization_payout_model
from django.contrib.admin.sites import NotRegistered
import logging
from bluebottle.utils.admin import export_as_csv_action
logger = logging.getLogger(__name__)
PROJECT_PAYOUT_MODEL = get_project_payout_model()
ORGANIZATION_PAYOUT_MODEL = get_organization_payout_model()
class OnePercentOrganizationPayoutAdmin(OrganizationPayoutAdmin):
actions = ('export_sepa', )
def export_sepa(self, request, queryset):
"""
Dowload a sepa file with selected ProjectPayments
"""
objs = queryset.all()
if not request.user.is_staff:
raise PermissionDenied
response = HttpResponse(mimetype='text/xml')
date = timezone.datetime.strftime(timezone.now(), '%Y%m%d%H%I%S')
response['Content-Disposition'] = 'attachment; filename=payments_sepa%s.xml' % date
response.write(ORGANIZATION_PAYOUT_MODEL.create_sepa_xml(objs))
return response
export_sepa.short_description = "Export SEPA file."
try:
admin.site.unregister(ORGANIZATION_PAYOUT_MODEL)
except NotRegistered:
pass
admin.site.register(ORGANIZATION_PAYOUT_MODEL, OnePercentOrganizationPayoutAdmin)
class OnePercentProjectPayoutAdmin(ProjectPayoutAdmin):
list_filter = ['status', 'payout_rule', 'project__partner_organization']
export_fields = ['project', 'status', 'payout_rule', 'amount_raised', 'organization_fee', 'amount_payable',
'created', 'submitted']
actions = ('change_status_to_new', 'change_status_to_progress', 'change_status_to_settled',
'export_sepa', 'recalculate_amounts', export_as_csv_action(fields=export_fields))
def export_sepa(self, request, queryset):
"""
Dowload a sepa file with selected ProjectPayments
"""
objs = queryset.all()
if not request.user.is_staff:
raise PermissionDenied
response = HttpResponse(mimetype='text/xml')
date = timezone.datetime.strftime(timezone.now(), '%Y%m%d%H%I%S')
response['Content-Disposition'] = 'attachment; filename=payments_sepa%s.xml' % date
response.write(PROJECT_PAYOUT_MODEL.create_sepa_xml(objs))
return response
export_sepa.short_description = "Export SEPA file."
try:
admin.site.unregister(PROJECT_PAYOUT_MODEL)
except NotRegistered:
pass
admin.site.register(PROJECT_PAYOUT_MODEL, OnePercentProjectPayoutAdmin)
```
#### File: projects/templatetags/project_tags.py
```python
from django import template
register = template.Library()
@register.assignment_tag
def get_project(project_id):
from apps.projects.models import Project
return Project.objects.get(pk=int(project_id))
```
#### File: projects/tests/test_functional.py
```python
import os
import time
from decimal import Decimal
from django.conf import settings
from django.utils.text import slugify
from django.utils.unittest.case import skipUnless
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from bluebottle.bb_projects.models import ProjectPhase, ProjectTheme
from bluebottle.utils.models import Language
from onepercentclub.tests.utils import OnePercentSeleniumTestCase
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory, PartnerFactory
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.geo import CountryFactory
from ..models import Project
@skipUnless(getattr(settings, 'SELENIUM_TESTS', False),
'Selenium tests disabled. Set SELENIUM_TESTS = True in your settings.py to enable.')
class ProjectSeleniumTests(OnePercentSeleniumTestCase):
"""
Selenium tests for Projects.
"""
def setUp(self):
self.init_projects()
self.projects = dict([(slugify(title), title) for title in [
u'Mobile payments for everyone 2!', u'Schools for children 2', u'Women first 2'
]])
self.user = BlueBottleUserFactory.create(email='<EMAIL>', primary_language='en')
campaign_phase = ProjectPhase.objects.get(slug='campaign')
for slug, title in self.projects.items():
project = OnePercentProjectFactory.create(title=title, slug=slug, owner=self.user,
amount_asked=1000, status=campaign_phase)
def visit_project_list_page(self, lang_code=None):
self.visit_path('/projects', lang_code)
self.assertTrue(self.browser.is_element_present_by_css('.project-item'),
'Cannot load the project list page.')
def test_navigate_to_project_list_page(self):
"""
Test navigate to the project list page.
"""
self.visit_project_list_page()
time.sleep(10)
# Validate that we are on the intended page.
self.assertTrue(self.browser.is_element_present_by_css('.project-item'), 'Cannot load the project list page.')
self.assertEqual(self.browser.url, '%s/en/#!/projects' % self.live_server_url)
def test_view_project_list_page(self):
"""
Test view the project list page correctly.
"""
self.visit_project_list_page()
# Besides the waiting for JS to kick in, we also need to wait for the funds raised animation to finish.
time.sleep(2)
def convert_money_to_int(money_text):
amount = money_text.strip(u'€').strip(u'\u20ac').replace('.', '').replace(',', '')
if not amount:
amount = 0
return amount
#return int(amount)
# NOTE: Due to a recent change, its harder to calculate/get the financiel data from the front end.
# Hence, these calculations are commented. Perhaps enable in the future if this data becomes available again.
# Create a dict of all projects on the web page.
web_projects = []
for p in self.browser.find_by_css('#search-results .project-item'):
title = p.find_by_css('h3').first.text
needed = convert_money_to_int(p.find_by_css('.project-fund-amount strong').first.text)
web_projects.append({
'title': title,
# 'amount_donated': needed,
})
# Make sure there are some projects to compare.
self.assertTrue(len(web_projects) > 0)
# Create dict of projects in the database.
expected_projects = []
for p in Project.objects.order_by('popularity')[:len(web_projects)]:
expected_projects.append({
'title': p.title.upper(), # Uppercase the title for comparison.
# 'amount_donated': int(round(p.amount_donated / Decimal(100.0))),
})
# Compare all projects found on the web page with those in the database, in the same order.
# FIXME: Fix me! Please fix me!
# This isn't working because popularity & donations isn't.
# self.assertListEqual(web_projects, expected_projects)
def test_upload_multiple_wallpost_images(self):
""" Test uploading multiple images in a media wallpost """
self.assertTrue(self.login(self.user.email, 'testing'))
self.visit_project_list_page()
self.close_modal()
# pick a project
self.wait_for_element_css('.project-item')
self.browser.find_by_css('.project-item').first.find_by_tag('a').first.click()
# Wait for form to animate down
form = self.wait_for_element_css('#wallpost-form')
form.find_element_by_css_selector('textarea').send_keys('These are some sample pictures from this non-existent project!')
#
# TODO: re-enable this when we finish
#
# verify that no previews are there yet
ul = form.find_element_by_css_selector('ul.upload-photos')
previews = ul.find_elements_by_tag_name('li')
# Number of li elements should be 1 - the add image button is in the first li
self.assertEqual(1, len(previews))
# attach file
self.browser.driver.find_element_by_css_selector('a[data-action-type="show-photo-upload"]').click()
file_path = os.path.join(settings.PROJECT_ROOT, 'static', 'tests', 'kitten_snow.jpg')
file_field = self.wait_for_element_css('.wallpost-photos .action-upload')
file_field.find_element_by_css_selector('input').send_keys(file_path)
# verify that one picture was added, after waiting for the preview to load
self.wait_for_element_css('ul.form-wallpost-photos li:nth-of-type(2)')
ul = form.find_element_by_css_selector('ul.upload-photos')
previews = ul.find_elements_by_tag_name('li')
self.assertEqual(2, len(previews))
# verify that a second picture was added
file_path = os.path.join(settings.PROJECT_ROOT, 'static', 'tests', 'chameleon.jpg')
file_field = self.wait_for_element_css('.wallpost-photos .action-upload')
file_field.find_element_by_css_selector('input').send_keys(file_path)
# Wait for the second item to be added
self.wait_for_element_css('ul.form-wallpost-photos li:nth-of-type(3)')
ul = form.find_element_by_css_selector('ul.upload-photos')
previews = ul.find_elements_by_tag_name('li')
self.assertEqual(3, len(previews))
# submit the form
form.find_element_by_css_selector('button.action-submit').click()
# check if the wallpostis there
wallpost = self.browser.driver.find_element_by_css_selector('#wallposts article')
# Check for cover photo
cover_photos = wallpost.find_elements_by_css_selector('ul.photo-viewer li.cover')
self.assertEqual(len(cover_photos), 1)
# Check for other photo
other_photos = wallpost.find_elements_by_css_selector('ul.photo-viewer li.photo')
self.assertEqual(len(other_photos), 1)
def test_meta_tag(self, lang_code=None):
self.visit_path('/projects/schools-for-children-2', lang_code)
time.sleep(4)
# check meta url
meta_url = self.browser.find_by_xpath("//html/head/meta[@property='og:url']").first
self.assertEqual(self.browser.url, meta_url['content'])
# TODO: check that the default description is overwritten, add description to plan
# def test_project_plan(self):
# self.visit_path('/projects/schools-for-children-2')
#
# element = self.wait_for_element_css('.project-more')
# element.click()
#
# self.wait_for_element_css('.project-plan-navigation-links')
# self.assertTrue(self.browser.is_element_not_present_by_css('.project-plan-download-pdf'), 'PDF download should not be available')
@skipUnless(getattr(settings, 'SELENIUM_TESTS', False),
'Selenium tests disabled. Set SELENIUM_TESTS = True in your settings.py to enable.')
class ProjectCreateSeleniumTests(OnePercentSeleniumTestCase):
"""
Selenium tests for Projects.
"""
def setUp(self):
self.init_projects()
self.user = BlueBottleUserFactory.create()
self.country_1 = CountryFactory.create(name="Afghanistan")
self.country_2 = CountryFactory.create(name="Albania")
self.theme = ProjectTheme.objects.all()[0]
self.language = Language.objects.all()[0]
self.login(self.user.email, 'testing')
self.project_data = {
'title': 'Velit esse cillum dolore',
'slug': 'velit-esse-cillum-dolore',
'pitch': 'Quis aute iure reprehenderit in voluptate eu fugiat nulla pariatur.',
'tags': ['okoali', 'kertan', 'lorem'],
'description': 'Stet clita kasd gubergren.\nNo sea takimata sanctus est Lorem ipsum dolor sit amet. Sanctus sea sed takimata ut vero voluptua.\n\nStet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Sanctus sea sed takimata ut vero voluptua. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'goal': 'Lorem ipsum dolor sit amet. Sanctus sea sed takimata ut vero voluptua. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'destination_impact': 'Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.',
'amount_asked': 5000,
'budget': [
{'description': 'Ghaks', 'amount': 4000},
{'description': 'Rausno', 'amount': 500},
{'description': '<NAME>', 'amount': 500}
]
}
def test_create_project(self):
"""
Creating a project. The positive flow.
"""
self.visit_path('/my/projects')
# Click "Pitch Smart Idea" btn
self.assertTrue(self.browser.is_element_present_by_css('#create_project', wait_time=5))
self.assertTrue(self.is_visible('#create_project'))
# This click can ocassially cause a problem. What the exact reason is seems to vary per person. See this thread: https://code.google.com/p/selenium/issues/detail?id=2766
# There are various fixes but, they too, are not reliable. The only consistent solution that seems to work is a time.sleep(1)
time.sleep(1)
self.browser.find_by_id("create_project").first.click()
###
# Intro Section
###
self.assertTrue(self.is_visible('section h1.page-title'))
self.visit_path('/my/projects/new/pitch')
###
# Project Section
###
self.assertTrue(self.browser.is_element_present_by_css('.language'))
self.assertTrue(self.is_visible('.language'))
self.browser.select('language', self.language.id)
self.assertTrue(self.is_visible('input[name="title"]'))
self.browser.fill('title', self.project_data['title'])
self.browser.fill('pitch', self.project_data['pitch'])
btn = self.browser.attach_file('img_upload', '{0}/apps/projects/test_images/upload.png'.format(settings.PROJECT_ROOT))
self.browser.find_by_css('.map-look-up input').type('Lyutidol')
self.browser.find_by_css('.map-look-up button').click()
# Splinter takes the value of the select option
self.browser.select('theme', self.theme.id)
for tag in self.project_data['tags']:
self.browser.fill('tag', tag)
self.browser.find_by_css("button.add-tag").first.click()
self.browser.select('country', self.country_1.id)
self.scroll_to_and_click_by_css("button.next")
###
# Goal Section
###
self.assertTrue(self.browser.is_text_present('Budget', wait_time=5))
self.assertTrue(self.is_visible('input[name="amount_asked"]'))
self.browser.fill('amount_asked', self.project_data['amount_asked'])
# Pick a deadline next month
self.assertTrue(self.scroll_to_and_click_by_css(".btn-date-picker"))
# Wait for date picker popup
self.assertTrue(self.browser.is_element_present_by_css("#ui-datepicker-div"))
# Click Next to get a date in the future
self.browser.find_by_css("[title=Next]").first.click()
self.assertTrue(self.browser.is_text_present("10"))
self.browser.find_link_by_text("10").first.click()
for line in self.project_data['budget']:
self.browser.fill('budget_line_amount', line['amount'])
self.browser.fill('budget_line_description', line['description'])
time.sleep(2)
self.browser.find_by_css("a.add-budget").first.click()
self.scroll_to_and_click_by_css("button.next")
###
# Description Section
###
self.assertTrue(self.is_visible('.redactor_editor'))
self.assertEqual(self.browser.url,
'{0}/en/#!/my/projects/{1}/story'.format(self.live_server_url,
self.project_data['slug']))
story = self.browser.find_by_css('.redactor_redactor').first
story.type(self.project_data['description'])
self.scroll_to_and_click_by_css("button.next")
###
# Organisation Section
###
self.wait_for_element_css('input[name="name"]')
organisation = {
"name": "<NAME>",
"email": "<EMAIL>",
"phone": "123456789",
"website": "http://www.testorg.com",
"twitter": "@testorg",
"facebook": "testorg",
"skype": "testorg"
}
self.browser.fill('name', organisation['name'])
self.browser.fill('email', organisation['email'])
self.browser.fill('phone', organisation['phone'])
self.browser.fill('website', organisation['website'])
self.browser.fill('twitter', organisation['twitter'])
self.browser.fill('facebook', organisation['facebook'])
self.browser.fill('skype', organisation['skype'])
btn = self.browser.attach_file('documents', '{0}/apps/projects/test_images/upload.png'.format(settings.PROJECT_ROOT))
self.scroll_to_and_click_by_css("button.next")
###
# Bank Section
###
bank_details = {
"name": "<NAME>",
"address": "144 Tolstraat",
"postcode": "1074 VM",
"city": "Amsterdam",
"iban": "NL91ABNA0417164300",
"bic": "ABNANL2AXXX"
}
self.assertTrue(self.is_visible('input[name="account-holder-name"]'))
self.browser.fill('account-holder-name', bank_details['name'])
self.browser.fill('account-holder-address', bank_details['address'])
self.browser.fill('account-holder-postcode', bank_details['postcode'])
self.browser.fill('account-holder-city', bank_details['city'])
select = Select(self.browser.driver.find_element_by_name("account-holder-country"))
select.select_by_visible_text("Afghanistan")
self.scroll_to_and_click_by_css('ul.fieldset-tabs .tab-first a')
self.browser.fill('account-iban', bank_details['iban'])
self.browser.fill('account-bic', bank_details['bic'])
self.scroll_to_and_click_by_css("button.next")
###
# Submit Section
###
# TODO: Add a test here to confirm that a valid project was completed by the user
# .... then create a new test for an invalid one.
# confirm the project record was created
# TODO: Also check it has the expected fields.
self.assertTrue(Project.objects.filter(slug=self.project_data['slug']).exists())
def test_change_project_goal(self):
plan_phase = ProjectPhase.objects.get(slug='plan-new')
project = OnePercentProjectFactory.create(title='Project Goal Changes', owner=self.user, status=plan_phase)
self.visit_path('/my/projects/{0}/goal'.format(project.slug))
# Check that deadline is set to 100 days now
days_left = self.browser.find_by_css('.project-days-left strong').first
self.assertEqual(days_left.text, '100')
# Let's pick a date
# Click Next to get a date in the future
self.assertTrue(self.scroll_to_and_click_by_css(".btn-date-picker"))
self.browser.find_by_css("[title=Prev]").first.click()
self.browser.find_by_css("[title=Prev]").first.click()
self.browser.find_by_css("[title=Prev]").first.click()
self.browser.find_by_css("[title=Next]").first.click()
self.assertTrue(self.browser.is_text_present("4"))
self.browser.find_link_by_text("4").first.click()
# remember the days left now
days_left1 = self.browser.find_by_css('.project-days-left strong').first.text
time.sleep(2)
self.assertTrue(self.scroll_to_and_click_by_css(".btn-date-picker"))
self.assertTrue(self.browser.is_text_present("14"))
self.browser.find_link_by_text("14").first.click()
days_left2 = self.browser.find_by_css('.project-days-left strong').first.text
days_diff = int(days_left2) - int(days_left1)
self.assertEqual(days_diff, 10)
def test_create_partner_project(self):
"""
Creating a partner project should set the partner on the new project
"""
self.partner = PartnerFactory.create()
self.visit_path('/my/projects/pp:{0}'.format(self.partner.slug))
# Wait for title to show
self.wait_for_element_css("h3")
self.assertEqual(self.browser.find_by_css("h3").text, self.partner.name.upper())
@skipUnless(getattr(settings, 'SELENIUM_TESTS', False),
'Selenium tests disabled. Set SELENIUM_TESTS = True in your settings.py to enable.')
class ProjectWallPostSeleniumTests(OnePercentSeleniumTestCase):
"""
Selenium tests for Projects.
"""
def setUp(self):
self.init_projects()
super(ProjectWallPostSeleniumTests, self).setUp()
self.user = BlueBottleUserFactory.create()
owner = BlueBottleUserFactory.create()
self.project = OnePercentProjectFactory.create(owner=owner)
self.project.save()
self.post1 = {
'text': 'Ziltch emit doler omit et dametis!'
}
self.post2 = {
'title': 'Hora est',
'text': 'Rolum dohar in amuet redicer...'
}
def test_write_wall_post(self):
"""
Test to write wall-posts on project page
"""
self.login(self.user.email, 'testing')
self.visit_path('/projects/{0}'.format(self.project.slug))
wallpost_form = self.wait_for_element_css('#wallposts form')
# Write wallpost as normal user
wallpost_form.find_element_by_css_selector('textarea').send_keys(self.post1['text'])
wallpost_form.find_element_by_css_selector('button.action-submit').click()
wallpost = self.wait_for_element_css('#wallposts article:first-of-type')
self.assertEqual(wallpost.find_element_by_css_selector('.user-name').text.upper(), self.user.full_name.upper())
self.assertEqual(wallpost.find_element_by_css_selector('.wallpost-body').text, self.post1['text'])
# Login as the project owner
self.login(username=self.project.owner.email, password='<PASSWORD>')
# Should see the post by the first user.
self.visit_path('/projects/{0}'.format(self.project.slug))
wallpost = self.wait_for_element_css('#wallposts article:first-of-type')
self.assertEqual(wallpost.find_element_by_css_selector('.wallpost-body').text, self.post1['text'])
# Post as project owner
wallpost_form = self.wait_for_element_css('#wallposts form')
wallpost_form.find_element_by_css_selector('textarea').send_keys(self.post2['text'])
wallpost_form.find_element_by_css_selector('button.action-submit').click()
# Wait for the two posts to load. Wait for the second first to ensure both have loaded.
original_wallpost = self.wait_for_element_css_index('article.m-wallpost', 1)
owner_wallpost = self.wait_for_element_css_index('article.m-wallpost', 0)
self.assertEqual(owner_wallpost.find_element_by_css_selector('.user-name').text.upper(), self.project.owner.full_name.upper())
self.assertEqual(owner_wallpost.find_element_by_css_selector('.wallpost-body').text, self.post2['text'])
# And the first post should still be shown as second
self.assertEqual(original_wallpost.find_element_by_css_selector('.user-name').text.upper(), self.user.full_name.upper())
self.assertEqual(original_wallpost.find_element_by_css_selector('.wallpost-body').text, self.post1['text'])
```
#### File: apps/projects/views.py
```python
from apps.projects.models import ProjectBudgetLine, PartnerOrganization
from bluebottle.bb_projects.views import ProjectPreviewList
from bluebottle.geo.models import Country
from bluebottle.geo.serializers import CountrySerializer
import django_filters
from django.db.models.query_utils import Q
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.http import Http404
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.generic.detail import DetailView
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from apps.fund.models import Donation, DonationStatuses
from apps.projects.serializers import (
ProjectSupporterSerializer, ProjectPreviewSerializer, ProjectThemeSerializer, ProjectBudgetLineSerializer)
from apps.projects.permissions import IsProjectOwner
from bluebottle.fundraisers.models import FundRaiser
from bluebottle.utils.model_dispatcher import get_project_model
from .models import Project
from .serializers import ProjectSerializer, ProjectDonationSerializer
class ManageProjectBudgetLineList(generics.ListCreateAPIView):
model = ProjectBudgetLine
serializer_class = ProjectBudgetLineSerializer
paginate_by = 50
permission_classes = (IsProjectOwner, )
class ManageProjectBudgetLineDetail(generics.RetrieveUpdateDestroyAPIView):
model = ProjectBudgetLine
serializer_class = ProjectBudgetLineSerializer
permission_classes = (IsProjectOwner, )
# Django template Views
class ProjectDetailView(DetailView):
""" This is the project view that search engines will use. """
model = Project
template_name = 'project_detail.html'
class ProjectIframeView(DetailView):
model = Project
template_name = 'project_iframe.html'
@method_decorator(xframe_options_exempt)
def dispatch(self, *args, **kwargs):
return super(ProjectIframeView, self).dispatch(*args, **kwargs)
class MacroMicroListView(generics.ListAPIView):
model = Project
queryset = Project.objects.filter(partner_organization__slug='macro_micro')
def render_to_response(self, context, **response_kwargs):
return super(MacroMicroListView, self).render_to_response(
context,
mimetype='application/xml',
**response_kwargs)
```
#### File: apps/recurring_donations/models.py
```python
from django.conf import settings
from django.db import models
from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField
from django_iban.fields import IBANField, SWIFTBICField
from django.utils.translation import ugettext as _
class MonthlyDonor(models.Model):
"""
Information about a user that wants to donate monthly.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
active = models.BooleanField(default=True)
amount = models.DecimalField(_("amount"), max_digits=6, decimal_places=2)
iban = IBANField(blank=True, default='')
bic = SWIFTBICField(blank=True, default='')
name = models.CharField(max_length=35)
city = models.CharField(max_length=35)
country = models.ForeignKey('geo.Country', blank=True, null=True)
@property
def is_valid(self):
# Check if we're above the DocData minimum for direct debit.
if self.amount < 1.13:
return False
# Check if the IBAN / BIC is stored correctly.
if self.iban == '' or self.bic == '' or self.bic[:4] != self.iban[4:8]:
return False
# Check if the IBAN / BIC is match.
# FIXME: Check if this goes for all IBAN/Bic or just just for The Netherlands.
if self.bic[:4] != self.iban[4:8]:
return False
return True
class MonthlyDonorProject(models.Model):
"""
Preferred projects by a monthly donor.
"""
donor = models.ForeignKey(MonthlyDonor, related_name='projects')
project = models.ForeignKey(settings.PROJECTS_PROJECT_MODEL)
class MonthlyBatch(models.Model):
date = models.DateField()
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
def __unicode__(self):
return self.date.strftime('%B %Y')
class Meta:
verbose_name = _('Monthly batch')
verbose_name_plural = _('Monthly batches')
class MonthlyProject(models.Model):
"""
Aggregated amount for projects.
"""
batch = models.ForeignKey(MonthlyBatch)
project = models.ForeignKey(settings.PROJECTS_PROJECT_MODEL)
amount = models.DecimalField(_("amount"), default=0, max_digits=6, decimal_places=2)
class MonthlyOrder(models.Model):
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
batch = models.ForeignKey(MonthlyBatch, related_name='orders')
user = models.ForeignKey(settings.AUTH_USER_MODEL)
amount = models.DecimalField(_("Amount"), max_digits=16, decimal_places=2, default=0)
currency = models.CharField(max_length=3, default='EUR')
name = models.CharField(max_length=35)
city = models.CharField(max_length=35)
iban = IBANField(blank=True, default='')
bic = SWIFTBICField(blank=True, default='')
country = models.CharField(max_length=2, default='')
processed = models.BooleanField(help_text=_("Whether a payment has been created for this order."), default=False)
error = models.CharField(max_length=1000, blank=True, null=True, default='')
def __unicode__(self):
return "{0}: {1}".format(self.user, self.amount)
class MonthlyDonation(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
order = models.ForeignKey(MonthlyOrder, related_name='donations')
project = models.ForeignKey(settings.PROJECTS_PROJECT_MODEL)
amount = models.DecimalField(_("Amount"), max_digits=16, decimal_places=2, default=0)
```
#### File: apps/recurring_donations/service.py
```python
from apps.donations.models import MonthlyBatch
from django.utils.timezone import now, timedelta
class MonthlyBatchService(object):
def __init__(self, date=None):
batches = MonthlyBatch.objects.order_by('-date')
if batches.count():
last_batch = batches.all()[0]
else:
last_batch = None
if date:
self.batch, created = MonthlyBatch.objects.get_or_create(date=date)
if created:
self.generate_donations()
else:
if last_batch.date > (now() - timedelta(days=10)):
self.batch = last_batch
else:
self.batch = MonthlyBatch.objects.create(date=date)
def generate_donations(self):
self.batch
```
#### File: sepa/tests/integration.py
```python
import os
import unittest
import decimal
from lxml import etree
from apps.sepa.sepa import SepaAccount, SepaDocument
from .base import SepaXMLTestMixin
class ExampleXMLTest(SepaXMLTestMixin, unittest.TestCase):
""" Attempt to test recreating an example XML file """
def setUp(self):
super(ExampleXMLTest, self).setUp()
# Read and validate example XML file
example_file = os.path.join(
self.directory, 'BvN-pain.001.001.03-example-message.xml'
)
self.example = etree.parse(example_file)
self.xmlschema.assertValid(self.example)
def test_generate_example(self):
""" Attempt to recreate example XML file. """
pass
class CalculateMoneyDonatedTests(SepaXMLTestMixin, unittest.TestCase):
"""
Generate and attempt to validate an XML file modelled after actual
transactions
"""
def setUp(self):
super(CalculateMoneyDonatedTests, self).setUp()
self.some_account = {
'name': '1%CLUB',
'iban': 'NL45RABO0132207044',
'bic': 'RABONL2U',
'id': 'A01'
}
self.another_account = {
'name': '<NAME>',
'iban': 'NL13TEST0123456789',
'bic': 'TESTNL2A',
'id': 'P551'
}
self.third_account = {
'name': 'SHO',
'iban': 'NL28INGB0000000777',
'bic': 'INGBNL2A',
'id': 'P345'
}
self.payment1 = {
'amount': decimal.Decimal('50.00'),
'id': 'PAYMENT 1253675',
'remittance_info': 'some info'
}
self.payment2 = {
'amount': decimal.Decimal('25.00'),
'id': 'PAYMENT 234532',
'remittance_info': 'my info'
}
self.message_id = 'BATCH-1234'
payment_id = 'PAYMENTS TODAY'
# Create base for SEPA
sepa = SepaDocument(type='CT')
sepa.set_info(message_identification=self.message_id, payment_info_id=payment_id)
sepa.set_initiating_party(name=self.some_account['name'], id=self.some_account['id'])
some_account = SepaAccount(name=self.some_account['name'], iban=self.some_account['iban'],
bic=self.some_account['bic'])
sepa.set_debtor(some_account)
# Add a payment
another_account = SepaAccount(name=self.another_account['name'], iban=self.another_account['iban'],
bic=self.another_account['bic'])
sepa.add_credit_transfer(creditor=another_account, amount=self.payment1['amount'],
creditor_payment_id=self.payment1['id'],
remittance_information=self.payment1['remittance_info'])
# Add another payment
third_account = SepaAccount(name=self.third_account['name'], iban=self.third_account['iban'],
bic=self.third_account['bic'])
sepa.add_credit_transfer(creditor=third_account, creditor_payment_id=self.payment2['id'],
amount=self.payment2['amount'],
remittance_information=self.payment2['remittance_info'])
# Now lets get the xml for these payments
self.xml = sepa.as_xml()
def test_parse_xml(self):
""" Test parsing the generated XML """
# Still no errors? Lets check the xml.
tree = etree.XML(self.xml)
main = tree[0]
self.assertEqual(main.tag,
'{urn:iso:std:iso:20022:tech:xsd:pain.001.001.03}CstmrCdtTrfInitn'
)
header = main[0]
self.assertEqual(header.tag,
'{urn:iso:std:iso:20022:tech:xsd:pain.001.001.03}GrpHdr')
self.assertEqual(header[0].text, self.message_id)
# We should have two payments
self.assertEqual(header[2].text, "2")
# Total amount should be the sum of two payments coverted to euros
self.assertEqual(header[3].text, '75.00')
# Now lets check The second payment IBANs
second_payment = main[2]
namespaces = {
# Default
'pain': 'urn:iso:std:iso:20022:tech:xsd:pain.001.001.03',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
self.assertEqual(
second_payment.find(
'pain:DbtrAcct/pain:Id/pain:IBAN', namespaces=namespaces
).text,
self.some_account['iban']
)
self.assertEqual(
second_payment.find(
'pain:CdtTrfTxInf/pain:CdtrAcct/pain:Id/pain:IBAN', namespaces=namespaces
).text,
self.third_account['iban']
)
def test_validate_xml(self):
""" Assert the XML is valid according to schema """
tree = etree.XML(self.xml)
self.xmlschema.assertValid(tree)
```
#### File: apps/vouchers/mails.py
```python
from babel.dates import format_date
from babel.numbers import format_currency
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from django.utils.translation import ugettext as _
from celery import task
@task
def mail_new_voucher(voucher, *args, **kwargs):
# TODO: Put this in config
system_email = '<EMAIL>'
server = Site.objects.get_current().domain
if server == 'localhost:8000':
server = 'http://' + server
else:
server = 'https://' + server
subject = _(u'You received a 1%GIFTCARD!')
text_content = _(u'You received a 1%GIFTCARD with this code:') + ' ' + voucher.code.upper()
context = Context({'voucher': voucher, 'server': server})
html_content = get_template('voucher_new.mail.html').render(context)
msg = EmailMultiAlternatives(subject=subject, body=text_content, from_email=system_email,
to=[voucher.receiver_email], cc=[voucher.sender_email])
msg.attach_alternative(html_content, "text/html")
msg.send()
@task
def mail_voucher_redeemed(voucher, *args, **kwargs):
# TODO: Put this in config
system_email = '<EMAIL>'
server = 'https://' + Site.objects.get_current().domain
subject = voucher.receiver_name + ' ' + _(u'has supported a 1%PROJECT using your 1%GIFTCARD')
text_content = voucher.receiver_name + ' ' + _(u'has supported a 1%PROJECT using your 1%GIFTCARD')
context = Context({'voucher': voucher, 'server': server})
html_content = get_template('voucher_redeemed.mail.html').render(context)
msg = EmailMultiAlternatives(subject=subject, body=text_content, from_email=system_email,
to=[voucher.receiver_email], cc=[voucher.sender_email])
msg.attach_alternative(html_content, "text/html")
msg.send()
@task
def mail_custom_voucher_request(voucher_request, *args, **kwargs):
# TODO: Put this in config
#system_email = '<EMAIL>'
system_email = '<EMAIL>'
server = 'https://' + Site.objects.get_current().domain
subject = voucher_request.contact_name + ' ' + _(u'has a custom 1%GIFTCARD request')
text_content = voucher_request.contact_name + ' ' + _(u'has a custom 1%GIFTCARD request')
context = Context({'voucher_request': voucher_request, 'server': server})
html_content = get_template('custom_voucher_request.mail.html').render(context)
msg = EmailMultiAlternatives(subject=subject, body=text_content, from_email=voucher_request.contact_email,
to=[system_email])
msg.attach_alternative(html_content, "text/html")
msg.send()
```
#### File: jfterpstra/onepercentclub-site/dashboard.py
```python
from fluent_dashboard.dashboard import FluentIndexDashboard
from apps.projects.dashboard import SubmittedPlans, EndedProjects, StartedCampaigns
from apps.tasks.dashboard import RecentTasks
class CustomIndexDashboard(FluentIndexDashboard):
"""
Custom Dashboard for onepercentclub-site.
"""
columns = 3
def init_with_context(self, context):
self.children.append(SubmittedPlans())
self.children.append(StartedCampaigns())
self.children.append(EndedProjects())
self.children.append(RecentTasks())
```
|
{
"source": "jfthuong/jfthuong.github.io",
"score": 4
}
|
#### File: Solution/level_FINAL/processRecords.py
```python
import airlines
import re, argparse, datetime
# tag::header[]
def read_flight_records(file_path: str):
"""\
Read flight records from a given file and returns the list of take-off records.
Each element is a dict with keys: date, time, code, airline, destination, take-off
Args:
file_path: Path of file to read
Returns:
list of take off records
"""
# end::header[]
list_records = list()
record_keys = ["date", "time", "code", "airline", "destination", "take-off"]
# We will extract each info and match it to "record_keys" by using zip(l1, l2)
# NOTE: zip transforms 2 list in a list of tuple; for example:
# zip([1, 2, 3], ["a", "b", "c"]) => [(1, "a"), (2, "b"), (3, "c")]
# Using "dict(...)" on this zipped list will create a dictionary with key and value
# 2 solutions to split line in elements
# Solution #1: Using regular expression to ensure strict pattern matching
pattern = (
r"^([\d\-]+),\s*([\d:]+),\s*([A-Z0-9]+),\s*([\w ]+),\s*(\w.+?),\s*([\d:]+)"
)
# Solution #2: using <line>.split(",") and stripping all spaces around each element
# We then need to check the number of elements in split result
try:
with open(file_path) as f:
for record in f:
# == Solution #1 (regex) ==
# if record.strip() == "":
# continue # Skip empty lines
# match_record = re.match(pattern, record)
# if match_record:
# record_dict = dict(zip(record_keys, match_record.groups()))
# list_records.append(record_dict)
# else:
# print(f"Following line of {file_path!r} does not match pattern: {record!r}")
# == Solution #2 (split) ==
if record.strip() == "":
continue # Skip empty lines
data = [d.strip() for d in record.split(",")]
# NOTE: we can also use regular expression
# data = re.split(r"\s*,\s*", record.rstrip())
if len(data) != len(record_keys):
print(
f"Following line of {file_path!r} has incorrect number of data: {record!r}"
)
else:
record_dict = dict(zip(record_keys, data))
list_records.append(record_dict)
except Exception as e:
raise IOError(f"Error while trying to read input file {file_path!r}: {e}")
return list_records
# tag::header[]
def get_ratings_airlines(list_records):
"""From the list of records, create a dictionary with all Airlines
Args:
list_records(list): list of dict returned by read_flight_records
Returns:
dictionary "airlines_dic" with:
* key = airline name
* value = "Airline" object with all the records of that airline
"""
# end::header[]
# We will loop each element of input list and
# * Create a new Airline if it does not exist in the dictionary
# * Add the record in the Airline object
airlines_dic = dict()
for record in list_records:
name = record["airline"]
# Create airline if required
if name not in airlines_dic:
code = record["code"][:2] # First 2 characters of flight
airlines_dic[name] = airlines.Airline(name, code)
# Add the record information
airlines_dic[name].add_info(record)
return airlines_dic
# tag::header[]
def list_sorted_ratings(airlines_dic):
"""Sort the airlines and flights based on the probability to be late
Args:
airlines_dic(dict): Dictionary with Airline objects
Returns:
rating_airlines(list): list of Airlines sorted by late probability (less late first)
rating_flights(list): list of flights sorted by late probability (less late first)
NOTE: Each element of the returned lists contains the following tuple:
(<airline|flight code>, <% late>, <average delay>)
"""
# end::header[]
rating_airlines = list()
rating_flights = list()
# Generate the lists
for name, airline in airlines_dic.items():
# Airline
rating_airline = airline.get_rating_airline()
# TODO: add code of Airline in the ranking
# name_code = f"{name} [{airline.code}]"
# rating_airlines.append((name_code, *rating_airline))
# NOTE: We add the name before the tuple by using "*args" to get elements of tuple
rating_airlines.append((name, *rating_airline))
# Flights
for flight in airline.flights.keys():
rating_flight = airline.get_rating_flight(flight)
# TODO: add destination of flight in the ranking
# code_dest = f"{flight} ({airline.destination[flight]})"
# rating_flights.append((code_dest, *rating_flight))
rating_flights.append((flight, *rating_flight))
# Sort the lists based on the probability to be late
def score_airline(t):
"""From tuple (% late, average delay) calculates a score
"% late" * 1000 + "average delay" to sort the airlines
"""
return t[1] * 1000 + t[2]
rating_airlines = sorted(rating_airlines, key=score_airline)
rating_flights = sorted(rating_flights, key=score_airline)
return rating_airlines, rating_flights
# tag::header[]
def get_first_last_elem(sorted_list, nb_elem: int):
"""Return lists with first/last <nb_elem> elements
Args:
sorted_list(list): list of elements
nb_elem(int): number of first/last elements
Return:
first(list): first nb_elem elements of sorted_list
last(list): last nb_elem elements of sorted_list in reverse order
"""
# end::header[]
if nb_elem < 1:
return [], []
else:
first_elements = sorted_list[:nb_elem]
last_elements = sorted_list[-nb_elem:]
last_elements.reverse()
return first_elements, last_elements
# tag::header[]
##=== MAIN PROGRAM ===##
# tag::main_function[]
if __name__ == "__main__":
# tag::argparse[]
# Main Program
description = """processRecords.py - Generating a report of airines and flights based on their delay"""
# end::header[]
# Parsing options and arguments
parser = argparse.ArgumentParser(description=description)
parser.add_argument("input_path")
parser.add_argument(
"-o",
"--output",
dest="report_path",
default="report_airlines.html",
help="Path of HTML report to generate",
)
parser.add_argument(
"-n",
dest="nb_ranking",
default=10,
type=int,
help="Number of best/worse airlines and flights",
)
cmd = parser.parse_args()
# end::argparse[]
# =DEBUG=#
# cmd = parser.parse_args(["list_records.txt"])
# Getting list of sorted elements
records = read_flight_records(cmd.input_path)
airlines_dic = get_ratings_airlines(records)
rating_airlines, rating_flights = list_sorted_ratings(airlines_dic)
# Generate list of best and worse <nb_ranking> airlines/flights
best_airlines, worse_airlines = get_first_last_elem(rating_airlines, cmd.nb_ranking)
best_flights, worse_flights = get_first_last_elem(rating_flights, cmd.nb_ranking)
# Retrieve template of report
try:
with open("report_Template.html") as f:
template = f.read()
# Replace single curly brackets by double ones in <STYLE> block
for block in re.findall(r"<style>.*?</style>", template, re.I | re.S):
updated_block = re.sub(r"([{}])\1*", r"\1" * 2, block)
template = template.replace(block, updated_block)
except Exception as e:
msg = f"Error while reading template 'report_Template.html' ({e})"
print(msg)
template = f"<html><body>{msg}</body></html>"
# tag::report_content[]
# Prepare the report
report = dict()
# time
report["date"] = str(datetime.date.today())
report["time"] = str(datetime.datetime.now().time())[:8]
# rankings
def store_ranking(key, ranking):
str_info = "<b>{0}</b> ({1}%, avg={2} min)"
report[key] = "</li>\n<li>".join([str_info.format(*t) for t in ranking])
store_ranking("best_airlines", best_airlines)
store_ranking("worse_airlines", worse_airlines)
store_ranking("best_flights", best_flights)
store_ranking("worse_flights", worse_flights)
# Generate the report
report_content = template.format(**report)
# end::report_content[]
try:
with open(cmd.report_path, "w") as f:
f.write(report_content)
except Exception as e:
print(f"Error while trying to write in '{cmd.report_path}' ({e})")
else:
print(f"Successfully wrote report {cmd.report_path!r}")
# end::main_function[]
```
#### File: Solution/level_FINAL/test_airlines.py
```python
"pip install pytest"
# 2. Run Tests
"pytest -v test_airlines.py"
# NOTE: if "pytest" is not found, run "python -m pytest ..."
##############
import pytest
from typing import Dict, List, Any
from airlines import Airline, get_delay
try:
from airlines import get_delay_2
except ImportError:
get_delay_2 = get_delay
import processRecords as rec
def test_delay():
assert get_delay("12:10", "12:45") == 35
assert get_delay("12:10", "11:45") == -25
assert get_delay("23:45", "00:45") == 60
assert get_delay("00:45", "23:45") == -60
# For alternate solution
assert get_delay_2("12:10", "12:45") == 35
assert get_delay_2("12:10", "11:45") == -25
assert get_delay_2("23:45", "00:45") == 60
assert get_delay_2("00:45", "23:45") == -60
# Test class "Airline"
class Test_Airline:
"""Test class 'Airline'"""
def test_01_init(self):
""" Test initialization of class Airline"""
sq = Airline("Singapore Airlines", "SQ")
assert sq.flights == {}
assert sq.destination == {}
assert sq.name == "Singapore Airlines"
assert sq.code == "SQ"
def add_info_SQ_MU(self):
"""Function to add records - to be used for other tests"""
sq = Airline("Singapore Airlines", "SQ")
mu = Airline("China Eastern", "MU")
sq.add_info(
{
"date": "2015-08-20",
"time": "08:05",
"code": "SQ827",
"destination": "Singapore",
"take-off": "09:08",
}
)
sq.add_info(
{
"date": "2015-08-21",
"time": "08:05",
"code": "SQ827",
"destination": "Singapore",
"take-off": "08:35",
}
)
mu.add_info(
{
"date": "2015-08-20",
"time": "08:20",
"code": "MU511",
"destination": "Osaka Kansai",
"take-off": "8:25",
}
)
mu.add_info(
{
"date": "2015-08-20",
"time": "08:30",
"code": "MU721",
"destination": "Seoul",
"take-off": "9:30",
}
)
return sq, mu
def test_02_add_info(self):
"""Add record to list of flights"""
sq, mu = self.add_info_SQ_MU()
assert sq.flights == {
"SQ827": [
{"date": "2015-08-20", "delay": 63, "time": "08:05"},
{"date": "2015-08-21", "delay": 30, "time": "08:05"},
]
}
assert sq.destination == {"SQ827": "Singapore"}
assert mu.flights == {
"MU511": [{"date": "2015-08-20", "delay": 5, "time": "08:20"}],
"MU721": [{"date": "2015-08-20", "delay": 60, "time": "08:30"}],
}
assert mu.destination == {"MU511": "Osaka Kansai", "MU721": "Seoul"}
def test_03_get_rating_flight(self):
"""Get the rating of a given flight"""
sq, mu = self.add_info_SQ_MU()
assert sq.get_rating_flight("SQ827") == (50, 46)
assert mu.get_rating_flight("MU511") == (0, 5)
def test_04_get_rating_airline(self):
"""Get the rating of the airline"""
sq, mu = self.add_info_SQ_MU()
assert sq.get_rating_airline() == (50, 46)
assert mu.get_rating_airline() == (50, 32)
# Test main program
class TestMain(object):
"""Test main program (processRecords.py)"""
def test_01_read_flight_records_robustness(self):
"""Read flight records from an unknown file"""
with pytest.raises(IOError):
rec.read_flight_records("unknown.txt")
def test_01_read_flight_records(self):
"""Read flight records from an existing file"""
records = rec.read_flight_records("mini_record.txt")
assert records == [
{
"code": "EK303",
"take-off": "0:41",
"destination": "Dubai",
"airline": "Emirates Airlines",
"time": "0:05",
"date": "2015-08-20",
},
{
"code": "MU553",
"take-off": "0:15",
"destination": "Paris Ch. de Gaulle",
"airline": "China Eastern Airlines",
"time": "0:05",
"date": "2015-08-20",
},
{
"code": "MU219",
"take-off": "0:45",
"destination": "Frankfurt",
"airline": "China Eastern Airlines",
"time": "0:05",
"date": "2015-08-20",
},
]
def test_02_get_ratings_flight(self):
"""Get rating of flight"""
af = Airline("Air France", "Air France")
af.add_info(
{
"date": "2015-08-24",
"time": "12:00",
"code": "AF117",
"destination": "CDG",
"take-off": "12:46",
}
)
assert af.get_rating_flight("Unknown") == (None, None)
assert af.get_rating_flight("AF117") == (100, 46)
af.add_info(
{
"date": "2015-08-24",
"time": "12:00",
"code": "AF117",
"destination": "CDG",
"take-off": "11:46",
}
)
assert af.get_rating_flight("AF117") == (50, 16)
def test_03_get_ratings_airline(self):
"""Get ratings of airlines based on the records"""
records = rec.read_flight_records("mini_record.txt")
airlines = rec.get_ratings_airlines(records)
assert airlines["Emirates Airlines"].__dict__ == {
"destination": {"EK303": "Dubai"},
"flights": {"EK303": [{"date": "2015-08-20", "delay": 36, "time": "0:05"}]},
"code": "EK",
"name": "Emirates Airlines",
}
assert airlines["China Eastern Airlines"].__dict__ == {
"destination": {"MU553": "Paris Ch. de Gaulle", "MU219": "Frankfurt"},
"flights": {
"MU553": [{"date": "2015-08-20", "delay": 10, "time": "0:05"}],
"MU219": [{"date": "2015-08-20", "delay": 40, "time": "0:05"}],
},
"code": "MU",
"name": "China Eastern Airlines",
}
def test_04_list_sorted_ratings(self):
"""Sort the airlines and flights based on the chances to be late"""
records = rec.read_flight_records("mini_record.txt")
airlines = rec.get_ratings_airlines(records)
rating_airlines, rating_flights = rec.list_sorted_ratings(airlines)
assert rating_airlines == [
("China Eastern Airlines", 50, 25),
("Emirates Airlines", 100, 36),
]
assert rating_flights == [
("MU553", 0, 10),
("EK303", 100, 36),
("MU219", 100, 40),
]
def test_05_get_first_last_elem(self):
"""Return lists with first/last <nb_elem> elements"""
first, last = rec.get_first_last_elem(["f", "e", "d", "c", "b", "a"], 0)
assert first == []
assert last == []
first, last = rec.get_first_last_elem(["f", "e", "d", "c", "b", "a"], 4)
assert first == ["f", "e", "d", "c"]
assert last == ["a", "b", "c", "d"]
rating_flights = [("AA", 100), ("ZZ", 2), ("CC", 3), ("dd", 400), ("EE", 3)]
first, last = rec.get_first_last_elem(rating_flights, 2)
assert first == [("AA", 100), ("ZZ", 2)]
assert last == [("EE", 3), ("dd", 400)]
if __name__ == "__main__":
pytest.main(args=["-v"])
```
|
{
"source": "jfthuong/photo-organizer",
"score": 2
}
|
#### File: photo-organizer/recognition/streamlit_app.py
```python
import requests # type:ignore
import sys
import tempfile
from operator import itemgetter
from pathlib import Path
from typing import Callable, List, Tuple, Union
import streamlit as st
from streamlit.uploaded_file_manager import UploadedFile
import torch
import torchvision.transforms as transforms
from facenet_pytorch import MTCNN, InceptionResnetV1, extract_face
from fastai.vision.core import PILImage
from PIL import Image, ImageDraw, ImageFont
from torch import Tensor
PathLike = Union[Path, str]
pil2t = transforms.ToTensor()
t2pil = transforms.ToPILImage()
st.set_page_config(page_title="ML deployment, by unpackAI", page_icon="🖼️")
st.image("https://unpackai.github.io/unpackai_logo.svg")
st.title("Facial Identification")
st.write("### one-shot inference")
st.write("💉 *by Jeff* 🔫")
st.write("---")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Create an inception resnet (in eval mode for inference)
@st.cache(allow_output_mutation=True)
def get_resnet():
return InceptionResnetV1(pretrained="vggface2").eval().to(device)
resnet = get_resnet()
@st.cache
def get_image(img: PathLike) -> PILImage:
"""Get picture from either a path or URL"""
if str(img).startswith("http"):
with tempfile.TemporaryDirectory() as tmpdirname:
dest = Path(tmpdirname) / str(img).split("?")[0].rpartition("/")[-1]
with requests.get(str(img)) as resp:
resp.raise_for_status()
dest.write_bytes(resp.content)
return PILImage.create(dest)
else:
return PILImage.create(img)
def img_2_embedding(img_path: Union[PathLike, UploadedFile]) -> Tensor:
"""Calculate the embedding of a picture"""
resnet.classify = False
# We need to feed with a Tensor of pixels => we need to transform first
# We use `unsqueeze` to add batch dimension
img = pil2t(Image.open(img_path))
return resnet(img.unsqueeze(0).to(device))
DistFunc = Callable[[Tensor, Tensor], Union[float, Tensor]]
ListReferences = List[Union[Path, UploadedFile]]
def get_img_stem(img: Union[Path, UploadedFile]) -> str:
if isinstance(img, UploadedFile):
img = Path(img.name)
return img.stem
class FacialRecognizer:
# @st.cache(allow_output_mutation=True)
def __init__(
self, references: ListReferences, distance_func: DistFunc = torch.dist
):
self.distance = distance_func
self.references = {
get_img_stem(img): img_2_embedding(img) for img in references
}
def _embedding_2_person(
self, emb: Tensor, alpha: float = 0.2, threshold: float = 1.2
) -> Tuple[str, float]:
if not self.references:
raise ValueError("No reference images loaded")
dist = {p: self.distance(emb, img) for p, img in self.references.items()}
(p1, min_1), (_, min_2) = sorted(dist.items(), key=itemgetter(1))[:2]
if min_1 > threshold:
return "??", threshold
triplet_dist = float(min_1 - min_2 + alpha)
if triplet_dist <= 0:
person = p1
elif triplet_dist <= alpha:
person = f"~{p1}"
else:
person = f"?? ({p1}?)"
return person, triplet_dist
def get_person(self, img_path: PathLike, alpha: float = 0.2) -> Tuple[str, float]:
"""Return most likely person from an image path"""
new_img = img_2_embedding(img_path)
return self._embedding_2_person(new_img, alpha)
def face_2_person(self, face: Tensor, alpha: float = 0.2) -> Tuple[str, float]:
"""Return most likely person from a face"""
# We first need to embed the face for comparison
new_img = resnet(face.unsqueeze(0).to(device))
return self._embedding_2_person(new_img, alpha)
st.sidebar.header("References Images")
select_references = st.sidebar.radio(
"Which references to choose", ["Friends", "Custom"]
)
if select_references == "Friends":
st.sidebar.write(
'*We are using one picture for each one of the "Friends": Ross, Rachel, etc.*'
)
reference_dir = Path(__file__).parent / "friends_reference"
references = list(reference_dir.glob("*.jpg"))
else:
st.sidebar.write(
"Update one picture per person named after the person "
"*(e.g. 'Ada.jpg' for person named Ada)*"
)
references = st.sidebar.file_uploader(
"Upload reference pictures", accept_multiple_files=True
)
recognizer = FacialRecognizer(references=references)
if references:
st.sidebar.write(f"✔️🖼️ {len(references)} reference images found")
else:
st.sidebar.write(f"❌🖼️ No reference image found")
# @st.cache()
def show_persons_in_photos(
img: Image.Image,
recognizer: FacialRecognizer,
keep_all=True,
show_score=False,
min_face_size=20,
threshold=0.8,
ratio_face=0.02,
) -> Tuple[str, Image.Image]:
"""Crop face with MTCNN and return cropped & prewhitened image tensor"""
mtcnn = MTCNN(
image_size=160,
margin=0,
device=device,
keep_all=keep_all,
select_largest=True,
min_face_size=min_face_size,
)
try:
with tempfile.TemporaryDirectory() as tmpdirname:
boxes, probs = mtcnn.detect(img)
if boxes is None or not boxes.size:
return "No face", img
img_draw = img.copy()
draw = ImageDraw.Draw(img_draw)
persons: List[str] = list()
boxes_probs = sorted(zip(boxes, probs), key=lambda t: t[0][0])
for i, (box, prob) in enumerate(boxes_probs, start=1):
face = box.tolist()
ratio_w = (face[2] - face[0]) / img.shape[0]
ratio_h = (face[3] - face[1]) / img.shape[1]
if min(ratio_w, ratio_h) < ratio_face:
continue
if prob >= threshold:
# We need to save the picture and load it again
# ... because feeding a extract from the box did not seem to work
# ... or maybe it's because I did not understand how to do it
pic_path = Path(tmpdirname) / f"pic_{i}.jpg"
_ = extract_face(img, box, save_path=pic_path)
person, score = recognizer.get_person(pic_path)
persons.append(person)
draw.rectangle(face, width=5)
name_score = (
f"#{i}.{person}:{score:.2f}" if show_score else f"#{i}.{person}"
)
arial = Path(__file__).with_name("arial.ttf")
font = ImageFont.truetype(str(arial), 20)
draw.text(face, name_score, font=font, fill=(255, 255, 255, 255))
found_persons = ", ".join(persons) if persons else "No face identified"
return found_persons, img_draw
# We want to remove mtcnn at the end to reduce memory usage
finally:
del mtcnn
@st.cache()
def pic_2_predictions(pic):
img = get_image(pic)
return show_persons_in_photos(img, recognizer=recognizer)
def display_prediction(pic):
persons, img_annotation = pic_2_predictions(pic)
col_img, col_pred = st.columns(2)
col_img.image(img_annotation, caption=getattr(pic, "name", None))
col_pred.write(f"### {persons}")
select = st.radio(
"How to load pictures?", ["from files", "from samples of Friends", "from URL"]
)
st.write("---")
if select == "from URL":
url = st.text_input("url")
if url:
display_prediction(url)
elif select == "from samples of Friends":
pictures = sorted(Path(__file__).parent.glob("friends_samples/*.jpg"))
for pic in pictures:
display_prediction(pic)
else:
pictures = st.file_uploader("Choose pictures", accept_multiple_files=True)
for pic in pictures: # type:ignore # this is an iterable
display_prediction(pic)
```
|
{
"source": "jfthuong/pydpf-core",
"score": 2
}
|
#### File: core/operators/build.py
```python
import copy
import os
from datetime import datetime
from textwrap import wrap
import black
import chevron
from ansys.dpf import core as dpf
from ansys.dpf.core import common
from ansys.dpf.core.dpf_operator import available_operator_names
from ansys.dpf.core.outputs import _make_printable_type
from ansys.dpf.core.mapping_types import map_types_to_python
def build_docstring(specification):
"""Used to generate class docstrings."""
docstring = ""
if specification.description:
docstring += "\n".join(
wrap(specification.description, subsequent_indent=" ")
)
docstring += "\n\n"
docstring = docstring.rstrip()
return docstring.replace('"', "'")
def map_types(cpp_types):
"""Map C++ object types to Python types."""
types = []
# These types don't get mapped to Python types
types_to_ignore = ["vector", "umap", "enum"]
for cpp_type in cpp_types:
if any(type_name in cpp_type for type_name in types_to_ignore):
continue
else:
types.append(map_types_to_python[cpp_type])
return types
def update_type_names_for_ellipsis(type_names):
# Remove vector and umap types from the Python type
new_types = []
for name in type_names:
if name == "vector<double>" or name == "vector<int32>":
new_types.append(name)
elif "vector" not in name and "umap" not in name:
new_types.append(name)
return new_types
def build_pin_data(pins, output=False):
"""Build pin data for use within template."""
pin_ids = [pin for pin in pins]
pin_ids.sort()
data = []
for id in pin_ids:
specification = pins[id]
type_names = specification.type_names
if specification.ellipsis:
type_names = update_type_names_for_ellipsis(type_names)
docstring_types = map_types(type_names)
parameter_types = " or ".join(docstring_types)
parameter_types = "\n".join(
wrap(parameter_types, subsequent_indent=" ", width=60)
)
pin_name = specification.name
pin_name = pin_name.replace("<", "_")
pin_name = pin_name.replace(">", "_")
main_type = docstring_types[0] if len(docstring_types) >= 1 else ""
built_in_types = ("int", "double", "string", "bool", "float", "str")
# Case where output pin has multiple types.
multiple_types = len(type_names) >= 2
printable_type_names = type_names
if multiple_types and output:
printable_type_names = [_make_printable_type(name) for name in type_names]
pin_data = {
"id": id,
"name": pin_name,
"pin_name": pin_name, # Base pin name, without numbers for when pin is ellipsis
"has_types": len(type_names) >= 1,
"multiple_types": multiple_types,
"printable_type_names": printable_type_names,
"types": type_names,
"types_for_docstring": parameter_types,
"main_type": main_type,
"built_in_main_type": main_type in built_in_types,
"optional": specification.optional,
"document": "\n".join(
wrap(
specification.document.capitalize(),
subsequent_indent=" ",
width=45,
)
),
"ellipsis": 0 if specification.ellipsis else -1,
}
if specification.ellipsis:
# Create two pins for ellipsis field with exactly the same
# properties, just different names, ids, and ellipsis values
pin_data["name"] = pin_name + "1"
data.append(pin_data)
second_pin_data = copy.deepcopy(pin_data)
second_pin_data["name"] = pin_name + "2"
second_pin_data["id"] = id + 1
second_pin_data["ellipsis"] = 1
data.append(second_pin_data)
else:
data.append(pin_data)
return data
def build_operator(
specification, operator_name, class_name, capital_class_name, category
):
input_pins = []
if specification.inputs:
input_pins = build_pin_data(specification.inputs)
output_pins = []
if specification.outputs:
output_pins = build_pin_data(specification.outputs, output=True)
multiple_output_types = any(pin["multiple_types"] for pin in output_pins)
docstring = build_docstring(specification)
specification_description = "\n".join(
wrap(specification.description, subsequent_indent=" ")
)
date_and_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
data = {
"operator_name": operator_name,
"class_name": class_name,
"capital_class_name": capital_class_name,
"docstring": docstring,
"specification_description": specification_description,
"input_pins": input_pins,
"output_pins": output_pins,
"outputs": len(output_pins) >= 1,
"multiple_output_types": multiple_output_types,
"category": category,
"date_and_time": date_and_time,
}
this_path = os.path.dirname(os.path.abspath(__file__))
mustache_file = os.path.join(this_path, "operator.mustache")
with open(mustache_file, "r") as f:
cls = chevron.render(f, data)
return black.format_str(cls, mode=black.FileMode())
if __name__ == "__main__":
this_path = os.path.dirname(os.path.abspath(__file__))
available_operators = available_operator_names()
succeeded = 0
for operator_name in available_operators:
specification = dpf.Operator.operator_specification(operator_name)
category = specification.properties.get("category", "")
if not category:
raise (f"Category not defined for operator {operator_name}.")
scripting_name = specification.properties.get("scripting_name", "")
# Make directory for new category
category_path = os.path.join(this_path, category)
if not os.path.exists(category_path):
os.mkdir(category_path)
# Clean up scripting name
if scripting_name == "":
scripting_name = operator_name
if "::" in scripting_name:
scripting_name = scripting_name.split("::")[-1]
if "." in scripting_name:
scripting_name = scripting_name.split(".")[-1]
# Get python class name from scripting name
capital_class_name = common._snake_to_camel_case(scripting_name)
# Write to operator file
operator_file = os.path.join(category_path, scripting_name + ".py")
with open(operator_file, "w") as f:
try:
operator_str = build_operator(
specification,
operator_name,
scripting_name,
capital_class_name,
category,
)
exec(operator_str)
f.write(operator_str)
succeeded += 1
except SyntaxError as e:
error_message = (
f"Unable to generate {operator_name}, {scripting_name}, {capital_class_name}.\n"
f"Error message: {e}\n"
)
with open(os.path.join(this_path, "failures.txt"), "w") as error_file:
error_file.write(error_message)
error_file.write(f"Class: {operator_str}")
print(error_message)
print(f"Generated {succeeded} out of {len(available_operators)}")
dpf.SERVER.shutdown()
```
#### File: operators/math/add_fc.py
```python
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class add_fc(Operator):
"""Select all fields having the same label space in the input fields
container, and add those together. If fields, doubles, or vectors
of doubles are put in input, they are added to all the fields.
Parameters
----------
fields_container1 : FieldsContainer or Field or float
fields_container2 : FieldsContainer or Field or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.add_fc()
>>> # Make input connections
>>> my_fields_container1 = dpf.FieldsContainer()
>>> op.inputs.fields_container1.connect(my_fields_container1)
>>> my_fields_container2 = dpf.FieldsContainer()
>>> op.inputs.fields_container2.connect(my_fields_container2)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.add_fc(
... fields_container1=my_fields_container1,
... fields_container2=my_fields_container2,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(
self, fields_container1=None, fields_container2=None, config=None, server=None
):
super().__init__(name="add_fc", config=config, server=server)
self._inputs = InputsAddFc(self)
self._outputs = OutputsAddFc(self)
if fields_container1 is not None:
self.inputs.fields_container1.connect(fields_container1)
if fields_container2 is not None:
self.inputs.fields_container2.connect(fields_container2)
@staticmethod
def _spec():
description = """Select all fields having the same label space in the input fields
container, and add those together. If fields, doubles, or
vectors of doubles are put in input, they are added to all
the fields."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=[
"fields_container",
"field",
"double",
"vector<double>",
],
optional=False,
document="""""",
),
1: PinSpecification(
name="fields_container",
type_names=[
"fields_container",
"field",
"double",
"vector<double>",
],
optional=False,
document="""""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="add_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsAddFc
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsAddFc
"""
return super().outputs
class InputsAddFc(_Inputs):
"""Intermediate class used to connect user inputs to
add_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.add_fc()
>>> my_fields_container1 = dpf.FieldsContainer()
>>> op.inputs.fields_container1.connect(my_fields_container1)
>>> my_fields_container2 = dpf.FieldsContainer()
>>> op.inputs.fields_container2.connect(my_fields_container2)
"""
def __init__(self, op: Operator):
super().__init__(add_fc._spec().inputs, op)
self._fields_container1 = Input(add_fc._spec().input_pin(0), 0, op, 0)
self._inputs.append(self._fields_container1)
self._fields_container2 = Input(add_fc._spec().input_pin(1), 1, op, 1)
self._inputs.append(self._fields_container2)
@property
def fields_container1(self):
"""Allows to connect fields_container1 input to the operator.
Parameters
----------
my_fields_container1 : FieldsContainer or Field or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.add_fc()
>>> op.inputs.fields_container1.connect(my_fields_container1)
>>> # or
>>> op.inputs.fields_container1(my_fields_container1)
"""
return self._fields_container1
@property
def fields_container2(self):
"""Allows to connect fields_container2 input to the operator.
Parameters
----------
my_fields_container2 : FieldsContainer or Field or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.add_fc()
>>> op.inputs.fields_container2.connect(my_fields_container2)
>>> # or
>>> op.inputs.fields_container2(my_fields_container2)
"""
return self._fields_container2
class OutputsAddFc(_Outputs):
"""Intermediate class used to get outputs from
add_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.add_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(add_fc._spec().outputs, op)
self._fields_container = Output(add_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.add_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container
```
#### File: operators/utility/set_property.py
```python
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.outputs import _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class set_property(Operator):
"""Set a property to an input field/field container
Parameters
----------
field : Field or FieldsContainer
property_name : str
Property to set
property_value : str or int or float
Property to set
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.utility.set_property()
>>> # Make input connections
>>> my_field = dpf.Field()
>>> op.inputs.field.connect(my_field)
>>> my_property_name = str()
>>> op.inputs.property_name.connect(my_property_name)
>>> my_property_value = str()
>>> op.inputs.property_value.connect(my_property_value)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.utility.set_property(
... field=my_field,
... property_name=my_property_name,
... property_value=my_property_value,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(
self,
field=None,
property_name=None,
property_value=None,
config=None,
server=None,
):
super().__init__(name="field::set_property", config=config, server=server)
self._inputs = InputsSetProperty(self)
self._outputs = OutputsSetProperty(self)
if field is not None:
self.inputs.field.connect(field)
if property_name is not None:
self.inputs.property_name.connect(property_name)
if property_value is not None:
self.inputs.property_value.connect(property_value)
@staticmethod
def _spec():
description = """Set a property to an input field/field container"""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="field",
type_names=["field", "fields_container"],
optional=False,
document="""""",
),
1: PinSpecification(
name="property_name",
type_names=["string"],
optional=False,
document="""Property to set""",
),
2: PinSpecification(
name="property_value",
type_names=["string", "int32", "double"],
optional=False,
document="""Property to set""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field", "fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="field::set_property", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsSetProperty
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsSetProperty
"""
return super().outputs
class InputsSetProperty(_Inputs):
"""Intermediate class used to connect user inputs to
set_property operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.set_property()
>>> my_field = dpf.Field()
>>> op.inputs.field.connect(my_field)
>>> my_property_name = str()
>>> op.inputs.property_name.connect(my_property_name)
>>> my_property_value = str()
>>> op.inputs.property_value.connect(my_property_value)
"""
def __init__(self, op: Operator):
super().__init__(set_property._spec().inputs, op)
self._field = Input(set_property._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._field)
self._property_name = Input(set_property._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._property_name)
self._property_value = Input(set_property._spec().input_pin(2), 2, op, -1)
self._inputs.append(self._property_value)
@property
def field(self):
"""Allows to connect field input to the operator.
Parameters
----------
my_field : Field or FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.set_property()
>>> op.inputs.field.connect(my_field)
>>> # or
>>> op.inputs.field(my_field)
"""
return self._field
@property
def property_name(self):
"""Allows to connect property_name input to the operator.
Property to set
Parameters
----------
my_property_name : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.set_property()
>>> op.inputs.property_name.connect(my_property_name)
>>> # or
>>> op.inputs.property_name(my_property_name)
"""
return self._property_name
@property
def property_value(self):
"""Allows to connect property_value input to the operator.
Property to set
Parameters
----------
my_property_value : str or int or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.set_property()
>>> op.inputs.property_value.connect(my_property_value)
>>> # or
>>> op.inputs.property_value(my_property_value)
"""
return self._property_value
class OutputsSetProperty(_Outputs):
"""Intermediate class used to get outputs from
set_property operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.set_property()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(set_property._spec().outputs, op)
self.field_as_field = Output(
_modify_output_spec_with_one_type(
set_property._spec().output_pin(0), "field"
),
0,
op,
)
self._outputs.append(self.field_as_field)
self.field_as_fields_container = Output(
_modify_output_spec_with_one_type(
set_property._spec().output_pin(0), "fields_container"
),
0,
op,
)
self._outputs.append(self.field_as_fields_container)
```
#### File: tests/slow/test_remoteworkflow.py
```python
import numpy as np
import pytest
from ansys.dpf import core
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops
from ansys.dpf.core.check_version import meets_version, get_server_version
from conftest import local_servers
SERVER_VERSION_HIGHER_THAN_3_0 = meets_version(get_server_version(core._global_server()), "3.0")
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_simple_remote_workflow(simple_bar, local_server):
data_sources1 = core.DataSources(simple_bar)
wf = core.Workflow()
op = ops.result.displacement(data_sources=data_sources1)
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_output_name("out", average.outputs.fields_container)
local_wf = core.Workflow()
min_max = ops.min_max.min_max_fc()
local_wf.add_operator(min_max)
local_wf.set_input_name("in", min_max.inputs.fields_container)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(local_server.ip + ":" + str(local_server.port), "grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
local_wf.connect_with(remote_workflow, ("out", "in"))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [2.52368345e-05])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_remote_workflow():
files = examples.download_distributed_files()
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i])
wf = core.Workflow()
op = ops.result.displacement(data_sources=data_sources1)
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_output_name("distrib" + str(i), average.outputs.fields_container)
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[i].ip + ":" + str(local_servers[i].port), "grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
workflows.append(remote_workflow)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf)
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_connect_remote_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_input_name("data_sources", op.inputs.data_sources)
wf.set_output_name("distrib", average.outputs.fields_container)
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i])
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[i].ip + ":" + str(local_servers[i].port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
remote_workflow.connect("data_sources", data_sources1)
workflows.append(remote_workflow)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf, ("distrib", "distrib" + str(i)))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_connect_operator_remote_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_input_name("data_sources", op.inputs.data_sources)
wf.set_output_name("distrib", average.outputs.fields_container)
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i])
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[i].ip + ":" + str(local_servers[i].port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
forward = ops.utility.forward(data_sources1)
remote_workflow.connect("data_sources", forward, 0)
workflows.append(remote_workflow)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf, ("distrib", "distrib" + str(i)))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_getoutput_remote_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_input_name("data_sources", op.inputs.data_sources)
wf.set_output_name("distrib", average.outputs.fields_container)
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i])
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[i].ip + ":" + str(local_servers[i].port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
remote_workflow.connect("data_sources", data_sources1)
workflows.append(remote_workflow)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
tmp = wf.get_output("distrib", core.types.fields_container)
local_wf.connect("distrib" + str(i), tmp)
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(True or not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_chain_remote_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_input_name("data_sources", op.inputs.data_sources)
wf.set_output_name("distrib", average.outputs.fields_container)
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i])
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[i].ip + ":" + str(local_servers[i].port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
remote_workflow.connect("data_sources", data_sources1)
workflows.append(remote_workflow)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[2].ip + ":" + str(local_servers[2].port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, local_wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
for i, wf in enumerate(workflows):
remote_workflow.connect_with(wf, ("distrib", "distrib" + str(i)))
max = remote_workflow.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_remote_workflow_info(local_server):
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_input_name("data_sources", op.inputs.data_sources)
wf.set_output_name("distrib", average.outputs.fields_container)
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_server.ip + ":" + str(local_server.port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
assert "data_sources" in remote_workflow.input_names
assert "distrib" in remote_workflow.output_names
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_local_remote_local_remote_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
average = core.operators.math.norm_fc()
wf.add_operators([average])
wf.set_input_name("u", average.inputs.fields_container)
wf.set_output_name("distrib", average.outputs.fields_container)
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i])
grpc_stream_provider = ops.metadata.streams_provider()
grpc_data_sources = core.DataSources()
grpc_data_sources.set_result_file_path(
local_servers[i].ip + ":" + str(local_servers[i].port),
"grpc")
grpc_stream_provider.inputs.data_sources(grpc_data_sources)
remote_workflow_prov = core.Operator("remote_workflow_instantiate")
remote_workflow_prov.connect(3, grpc_stream_provider, 0)
remote_workflow_prov.connect(0, wf)
remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
first_wf = core.Workflow()
op = ops.result.displacement()
first_wf.add_operator(op)
first_wf.set_input_name("data_sources", op.inputs.data_sources)
first_wf.set_output_name("u", op.outputs.fields_container)
first_wf.connect("data_sources", data_sources1)
remote_workflow.connect_with(first_wf)
workflows.append(remote_workflow)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf, ("distrib", "distrib" + str(i)))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_transparent_api_remote_workflow():
files = examples.download_distributed_files()
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i], server=local_servers[i])
wf = core.Workflow(server=local_servers[i])
op = ops.result.displacement(data_sources=data_sources1, server=local_servers[i])
average = core.operators.math.norm_fc(op, server=local_servers[i])
wf.add_operators([op, average])
wf.set_output_name("distrib" + str(i), average.outputs.fields_container)
workflows.append(wf)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf)
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_with_names_transparent_api_remote_workflow():
files = examples.download_distributed_files()
workflows = []
for i in files:
data_sources1 = core.DataSources(files[i], server=local_servers[i])
wf = core.Workflow(server=local_servers[i])
op = ops.result.displacement(data_sources=data_sources1, server=local_servers[i])
average = core.operators.math.norm_fc(op, server=local_servers[i])
wf.add_operators([op, average])
wf.set_output_name("distrib", average.outputs.fields_container)
workflows.append(wf)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf, ("distrib", "distrib" + str(i)))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_transparent_api_connect_local_datasources_remote_workflow():
files = examples.download_distributed_files()
workflows = []
for i in files:
wf = core.Workflow(server=local_servers[i])
op = ops.result.displacement(server=local_servers[i])
average = core.operators.math.norm_fc(op, server=local_servers[i])
wf.add_operators([op, average])
wf.set_output_name("distrib" + str(i), average.outputs.fields_container)
wf.set_input_name("ds", op.inputs.data_sources)
workflows.append(wf)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
data_sources1 = core.DataSources(files[i])
wf.connect("ds", data_sources1)
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf)
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_transparent_api_connect_local_op_remote_workflow():
files = examples.download_distributed_files()
workflows = []
for i in files:
wf = core.Workflow(server=local_servers[i])
op = ops.result.displacement(server=local_servers[i])
average = core.operators.math.norm_fc(op, server=local_servers[i])
wf.add_operators([op, average])
wf.set_output_name("distrib" + str(i), average.outputs.fields_container)
wf.set_input_name("ds", op.inputs.data_sources)
workflows.append(wf)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i, wf in enumerate(workflows):
data_sources1 = core.DataSources(files[i])
forward = ops.utility.forward(data_sources1)
wf.connect("ds", forward, 0)
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(wf)
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_transparent_api_create_on_local_remote_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_output_name("distrib", average.outputs.fields_container)
wf.set_input_name("ds", op.inputs.data_sources)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i in files:
data_sources1 = core.DataSources(files[i])
remote_wf = wf.create_on_other_server(server=local_servers[i])
remote_wf.connect("ds", data_sources1)
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(remote_wf, ("distrib", "distrib" + str(i)))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_multi_process_transparent_api_create_on_local_remote_ith_address_workflow():
files = examples.download_distributed_files()
wf = core.Workflow()
op = ops.result.displacement()
average = core.operators.math.norm_fc(op)
wf.add_operators([op, average])
wf.set_output_name("distrib", average.outputs.fields_container)
wf.set_input_name("ds", op.inputs.data_sources)
local_wf = core.Workflow()
merge = ops.utility.merge_fields_containers()
min_max = ops.min_max.min_max_fc(merge)
local_wf.add_operator(merge)
local_wf.add_operator(min_max)
local_wf.set_output_name("tot_output", min_max.outputs.field_max)
for i in files:
data_sources1 = core.DataSources(files[i])
remote_wf = wf.create_on_other_server(ip=local_servers[i].ip, port=local_servers[i].port)
remote_wf.connect("ds", data_sources1)
local_wf.set_input_name("distrib" + str(i), merge, i)
local_wf.connect_with(remote_wf, ("distrib", "distrib" + str(i)))
max = local_wf.get_output("tot_output", core.types.field)
assert np.allclose(max.data, [10.03242272])
```
#### File: pydpf-core/tests/test_cache.py
```python
from ansys.dpf import core as dpf
from ansys.dpf.core.check_version import server_meet_version
def test_unit_mesh_cache(simple_bar):
model = dpf.Model(simple_bar)
mesh = model.metadata.meshed_region
initunit = mesh.unit
assert len(mesh._cache.cached) == 1
assert mesh.unit == initunit
mesh.unit = "cm"
assert len(mesh._cache.cached) == 0
assert mesh.unit == "cm"
assert len(mesh._cache.cached) == 1
def test_named_selections_mesh_cache(simple_bar):
model = dpf.Model(simple_bar)
mesh = model.metadata.meshed_region
init = mesh.available_named_selections
assert len(mesh._cache.cached) == 1
assert mesh.available_named_selections == init
assert len(mesh._cache.cached) == 1
ns = mesh.named_selection(init[0])
assert len(mesh._cache.cached) == 2
def test_mismatch_instances_cache(simple_bar):
model = dpf.Model(simple_bar)
model2 = dpf.Model(simple_bar)
mesh = model.metadata.meshed_region
mesh2 = model2.metadata.meshed_region
initunit = mesh.unit
assert len(mesh._cache.cached) == 1
assert len(mesh2._cache.cached) == 0
assert mesh.unit == initunit
mesh.unit = "cm"
assert len(mesh._cache.cached) == 0
mesh2.unit
assert len(mesh2._cache.cached) == 1
def test_available_results_cache(simple_bar):
model = dpf.Model(simple_bar)
res_info = model.metadata.result_info
for res in res_info:
pass
assert len(res_info._cache.cached) == len(res_info) + 1
def test_physics_type_cache(simple_bar):
ds = dpf.DataSources(simple_bar)
provider = dpf.operators.metadata.result_info_provider(data_sources=ds)
res_info = provider.outputs.result_info()
assert len(res_info._cache.cached) == 0
res_info.unit_system
assert len(res_info._cache.cached) == 1
res_info.physics_type
if server_meet_version("3.0", ds._server):
assert len(res_info._cache.cached) == 2
else:
assert len(res_info._cache.cached) == 1
def test_server_info_cache():
if not dpf.SERVER:
dpf.start_local_server()
dpf.SERVER.info
identifier = dpf.cache.MethodIdentifier("_get_server_info", (), {})
assert identifier in dpf.SERVER._base_service._cache.cached
```
#### File: pydpf-core/tests/test_code_docstrings.py
```python
import doctest
import os
import pathlib
import pytest
@pytest.mark.skipif(True, reason="examples are created for windows")
def test_doctest_allfiles():
directory = r"../ansys/dpf/core"
actual_path = pathlib.Path(__file__).parent.absolute()
# actual_path = os.getcwd()
print(actual_path)
for filename in os.listdir(os.path.join(actual_path, directory)):
if filename.endswith(".py"):
path = os.path.join(directory, filename)
print(path)
doctest.testfile(path, verbose=True, raise_on_error=True)
else:
continue
@pytest.mark.skipif(True, reason="examples are created for windows")
def test_doctest_allexamples():
directory = r"../examples"
actual_path = pathlib.Path(__file__).parent.absolute()
handled_files = []
for root, subdirectories, files in os.walk(os.path.join(actual_path, directory)):
for subdirectory in subdirectories:
subdir = os.path.join(root, subdirectory)
print(subdir)
for filename in os.listdir(subdir):
if filename.endswith(".py"):
path = os.path.join(subdir, filename)
if ".ipynb_checkpoints" in path:
continue
print(path)
handled_files.append(path)
exec(
open(path, mode="r", encoding="utf8").read(),
globals(),
globals(),
)
else:
continue
print(handled_files)
if __name__ == "__main__":
test_doctest_allfiles()
```
#### File: pydpf-core/tests/test_field.py
```python
import numpy as np
import pytest
from ansys import dpf
from ansys.dpf import core
from ansys.dpf.core import FieldDefinition
from ansys.dpf.core import operators as ops
from ansys.dpf.core.common import locations, shell_layers
@pytest.fixture()
def stress_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
return stress.outputs.fields_container()[0]
def test_create_field():
field = dpf.core.Field()
assert field._message.id != 0
def test_create_field_from_helper_scalar():
data = np.random.random(10)
field_a = dpf.core.field_from_array(data)
assert np.allclose(field_a.data, data)
def test_create_field_from_helper_vector():
data = np.random.random((10, 3))
field_a = dpf.core.field_from_array(data)
assert np.allclose(field_a.data, data)
def test_createbycopy_field():
field = dpf.core.Field()
field2 = dpf.core.Field(field=field._message)
assert field._message.id == field2._message.id
def test_set_get_scoping():
field = dpf.core.Field()
scoping = dpf.core.Scoping()
ids = [1, 2, 3, 5, 8, 9, 10]
scoping.ids = ids
field.scoping = scoping
assert field.scoping.ids == ids
def test_set_get_data_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_set_get_data_array_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
data.append(i + 0.001)
data.append(i + 0.001)
data = np.array(data)
data = data.reshape((20, 3))
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_append_data_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
data = [0.01 + i, 0.02 + i, 0.03 + i]
field.append(data, scopingid)
scopingOut = field.scoping
assert scopingOut.ids == list(range(1, 21))
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
datain = [0.01 + i, 0.02 + i, 0.03 + i]
dataout = field.get_entity_data(scopingindex)
assert np.allclose(dataout, datain)
def test_set_get_entity_data_array_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
data = [0.01 + i, 0.02 + i, 0.03 + i]
data = np.array(data)
data = data.reshape((1, 3))
field.append(data, scopingid)
scopingOut = field.scoping
assert scopingOut.ids == list(range(1, 21))
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
datain = [0.01 + i, 0.02 + i, 0.03 + i]
dataout = field.get_entity_data(scopingindex)
assert np.allclose(dataout, datain)
dataout = field.get_entity_data_by_id(scopingid)
assert np.allclose(dataout, datain)
# def test_get_data_ptr_field():
# field= dpf.core.Field(nentities=3, nature=dpf.core.natures.scalar,
# location=dpf.core.locations.elemental_nodal)
# data = [0.01,0.02,0.03]
# field.set_entity_data(data,0,1)
# data = [0.01,0.02,0.03,0.01,0.02,0.03]
# field.set_entity_data(data,1,2)
# data = [0.01,0.02,0.03,0.01]
# field.set_entity_data(data,2,3)
# scopingOut = field.scoping
# assert scopingOut.ids == [1,2,3]
# dataptr = field.data_ptr
# assert dataptr == [0,3,9]
def test_set_get_data_property_field():
field = core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_count_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert field.component_count == 1
assert field.elementary_data_count == 20
assert field.size == 20
def test_resize_field():
field = dpf.core.Field(nentities=1, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
field.resize(20, 20)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert field.component_count == 1
assert field.elementary_data_count == 20
assert field.size == 20
def test_fromarray_field():
data = np.empty((100, 6))
f = dpf.core.field_from_array(data)
assert f.shape == (100, 6)
def test_field_definition_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
assert f.unit == "m"
assert f.location == dpf.core.locations.nodal
def test_field_definition_modif_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
fielddef = f.field_definition
assert fielddef.unit == "m"
assert fielddef.location == dpf.core.locations.nodal
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [3]
assert fielddef.shell_layers == dpf.core.shell_layers.layerindependent
fielddef.unit = "mm"
assert fielddef.unit == "mm"
fielddef.location = dpf.core.locations.elemental
assert fielddef.location == dpf.core.locations.elemental
fielddef.dimensionality = dpf.core.Dimensionality.scalar_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
fielddef.dimensionality = dpf.core.Dimensionality.tensor_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.symmatrix
assert fielddef.dimensionality.dim == [3, 3]
fielddef.dimensionality = dpf.core.Dimensionality.vector_3d_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [3]
fielddef.dimensionality = dpf.core.Dimensionality.vector_dim(4)
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [4]
fielddef.shell_layers = dpf.core.shell_layers.bottom
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
def test_field_definition_set_in_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
fielddef = f.field_definition
fielddef.unit = "mm"
fielddef.location = dpf.core.locations.elemental
fielddef.dimensionality = dpf.core.Dimensionality.scalar_dim()
fielddef.shell_layers = dpf.core.shell_layers.bottom
f.field_definition = fielddef
fielddef = f.field_definition
assert fielddef.unit == "mm"
assert fielddef.location == dpf.core.locations.elemental
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
assert f.unit == "mm"
assert f.location == dpf.core.locations.elemental
assert f.dimensionality.nature == dpf.core.natures.scalar
assert f.dimensionality.dim == [1]
assert f.shell_layers == dpf.core.shell_layers.bottom
def test_change_field_definition_in_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
f.unit = "mm"
f.location = dpf.core.locations.elemental
f.dimensionality = dpf.core.Dimensionality.scalar_dim()
f.shell_layers = dpf.core.shell_layers.bottom
fielddef = f.field_definition
assert fielddef.unit == "mm"
assert fielddef.location == dpf.core.locations.elemental
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
assert f.unit == "mm"
assert f.location == dpf.core.locations.elemental
assert f.dimensionality.nature == dpf.core.natures.scalar
assert f.dimensionality.dim == [1]
assert f.shell_layers == dpf.core.shell_layers.bottom
def test_create_overall_field():
field_overall = dpf.core.Field(nentities=1, location="overall", nature="vector")
field_overall.scoping.location = "overall"
field_overall.scoping.ids = [0]
field_overall.data = [1.0, 2.0, 3.0]
field = dpf.core.Field(nentities=5, location="nodal")
field.scoping.location = "nodal"
field.scoping.ids = list(range(1, 6))
data = [float(i) for i in range(0, 15)]
field.data = data
add = dpf.core.Operator("add")
add.inputs.fieldA(field)
add.inputs.fieldB(field_overall)
field_added = add.outputs.field()
data_added = field_added.data
for i in range(0, 5):
assert np.allclose(data_added[i], [i * 3.0 + 1.0, i * 3.0 + 3.0, i * 3.0 + 5.0])
def test_data_pointer_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("S")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
data_pointer = fcOut[0]._data_pointer
assert len(data_pointer) == len(fcOut[0].scoping)
assert data_pointer[0] == 0
assert data_pointer[1] == 72
f = fcOut[0]
data_pointer[1] = 40
f._data_pointer = data_pointer
data_pointer = fcOut[0]._data_pointer
assert len(data_pointer) == len(fcOut[0].scoping)
assert data_pointer[0] == 0
assert data_pointer[1] == 40
def test_data_pointer_prop_field():
pfield = dpf.core.PropertyField()
pfield.append([1, 2, 3], 1)
pfield.append([1, 2, 3, 4], 2)
pfield.append([1, 2, 3], 3)
data_pointer = pfield._data_pointer
assert len(data_pointer) == 3
assert data_pointer[0] == 0
assert data_pointer[1] == 3
assert data_pointer[2] == 7
data_pointer[1] = 4
pfield._data_pointer = data_pointer
data_pointer = pfield._data_pointer
assert len(data_pointer) == 3
assert data_pointer[0] == 0
assert data_pointer[1] == 4
assert data_pointer[2] == 7
def test_append_data_elemental_nodal_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.location == "ElementalNodal"
f_new = dpf.core.Field(
f.scoping.size,
nature=dpf.core.natures.symmatrix,
location=dpf.core.locations.elemental_nodal,
)
size = int(f.scoping.size / 100)
for i in range(0, size):
f_new.append(f.get_entity_data(i), f.scoping.id(i))
for i in range(0, size):
assert np.allclose(f_new.get_entity_data(i), f.get_entity_data(i))
def test_str_field(stress_field):
assert "Location" in str(stress_field)
assert "ElementalNodal" in str(stress_field)
assert "Unit" in str(stress_field)
assert "Pa" in str(stress_field)
assert "9255" in str(stress_field)
assert "40016" in str(stress_field)
assert "6" in str(stress_field)
def test_to_nodal(stress_field):
assert stress_field.location == "ElementalNodal"
field_out = stress_field.to_nodal()
assert field_out.location == "Nodal"
def test_mesh_support_field(stress_field):
mesh = stress_field.meshed_region
assert len(mesh.nodes.scoping) == 15129
assert len(mesh.elements.scoping) == 10292
def test_shell_layers_1(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.topbottommid
model = dpf.core.Model(allkindofcomplexity)
disp = model.results.displacement()
f = disp.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.layerindependent
def test_shell_layers_2(velocity_acceleration):
model = dpf.core.Model(velocity_acceleration)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.nonelayer
def test_mesh_support_field_model(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
mesh = f.meshed_region
assert len(mesh.nodes.scoping) == 15129
assert len(mesh.elements.scoping) == 10292
def test_delete_auto_field():
field = dpf.core.Field()
field2 = dpf.core.Field(field=field)
del field
with pytest.raises(Exception):
field2.get_ids()
def test_create_and_update_field_definition():
fieldDef = FieldDefinition()
assert fieldDef is not None
with pytest.raises(Exception):
assert fieldDef.location is None
fieldDef.location = locations.nodal
assert fieldDef.location == locations.nodal
def test_set_support_timefreq(simple_bar):
tfq = dpf.core.TimeFreqSupport()
time_frequencies = dpf.core.Field(
nature=dpf.core.natures.scalar, location=dpf.core.locations.time_freq
)
time_frequencies.scoping.location = dpf.core.locations.time_freq_step
time_frequencies.append([0.1, 0.32, 0.4], 1)
tfq.time_frequencies = time_frequencies
model = dpf.core.Model(simple_bar)
disp = model.results.displacement()
fc = disp.outputs.fields_container()
field = fc[0]
# initial_support = field.time_freq_support
# assert initial_support is None
field.time_freq_support = tfq
tfq_to_check = field.time_freq_support
assert np.allclose(tfq.time_frequencies.data, tfq_to_check.time_frequencies.data)
def test_set_support_mesh(simple_bar):
mesh = dpf.core.MeshedRegion()
mesh.nodes.add_node(1, [0.0, 0.0, 0.0])
model = dpf.core.Model(simple_bar)
disp = model.results.displacement()
fc = disp.outputs.fields_container()
field = fc[0]
field.meshed_region = mesh
mesh_to_check = field.meshed_region
assert mesh_to_check.nodes.n_nodes == 1
assert mesh_to_check.elements.n_elements == 0
mesh.nodes.add_node(2, [1.0, 0.0, 0.0])
mesh.nodes.add_node(3, [1.0, 1.0, 0.0])
mesh.nodes.add_node(4, [0.0, 1.0, 0.0])
field.meshed_region = mesh
mesh_to_check_2 = field.meshed_region
assert mesh_to_check_2.nodes.n_nodes == 4
assert mesh_to_check_2.elements.n_elements == 0
def test_local_field_append():
num_entities = 400
field_to_local = dpf.core.fields_factory.create_3d_vector_field(num_entities)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([0.1 * i, 0.2 * i, 0.3 * i], i)
assert f._is_set == True
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append([0.1 * i, 0.2 * i, 0.3 * i], i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == 0
def test_local_elemental_nodal_field_append():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]], i)
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]], i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
# flat data
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i], i)
assert f._is_set == True
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
def test_local_array_field_append():
num_entities = 400
field_to_local = dpf.core.fields_factory.create_3d_vector_field(num_entities)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(np.array([0.1 * i, 0.2 * i, 0.3 * i]), i)
assert f._is_set is True
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append(np.array([0.1 * i, 0.2 * i, 0.3 * i]), i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == 0
def test_local_elemental_nodal_array_field_append():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
# flat data
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i]), i
)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
def test_local_get_entity_data():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(np.array([[0.1 * i, 0.2 * i, 0.3 * i]]), i)
assert np.allclose(f.get_entity_data(i - 1), [[0.1 * i, 0.2 * i, 0.3 * i]])
assert np.allclose(
f.get_entity_data_by_id(i), [[0.1 * i, 0.2 * i, 0.3 * i]]
)
assert hasattr(f, "_is_set") is True
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
assert np.allclose(f.get_entity_data(i - 1), [[0.1 * i, 0.2 * i, 0.3 * i]])
assert np.allclose(
f.get_entity_data_by_id(i), [[0.1 * i, 0.2 * i, 0.3 * i]]
)
assert hasattr(f, "_is_set") is False
def test_local_elemental_nodal_get_entity_data():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
assert np.allclose(
f.get_entity_data(i - 1),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert np.allclose(
f.get_entity_data_by_id(i),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert hasattr(f, "_is_set") is True
assert f._is_set is True
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
assert np.allclose(
f.get_entity_data(i - 1),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert np.allclose(
f.get_entity_data_by_id(i),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert hasattr(f, "_is_set") is False
def test_auto_delete_field_local():
num_entities = 1
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
field_to_local.append([3.0, 4.0, 5.0], 1)
fc = dpf.core.fields_container_factory.over_time_freq_fields_container(
[field_to_local]
)
field_to_local = None
with fc[0].as_local_field() as f:
assert np.allclose(f.get_entity_data(0), [3.0, 4.0, 5.0])
def test_auto_delete_field_local2():
num_entities = 1
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
f = field_to_local.as_local_field()
f.append([3.0, 4.0, 5.0], 1)
del f
with field_to_local.as_local_field() as f:
assert np.allclose(f.get_entity_data(0), [3.0, 4.0, 5.0])
def test_get_set_data_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
with field_to_local.as_local_field() as f:
f.data = [0.1, 0.2, 0.3, 0.1, 0.2, 0.3]
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
with field_to_local.as_local_field() as f:
f.data = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def test_get_set_data_elemental_nodal_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
with field_to_local.as_local_field() as f:
f.data = [0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.4]
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
with field_to_local.as_local_field() as f:
f.data = np.array(
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
def test_get_set_scoping_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
f.scoping = dpf.core.Scoping(ids=[3, 4])
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.scoping_ids, [3, 4])
assert np.allclose(f.scoping.ids, [3, 4])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.scoping.ids, [3, 4])
def test_empty_data_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(100)
data = [1.0, 2.0, 3.0]
field_to_local.data = data
assert np.allclose(field_to_local.data, data)
field_to_local.data = []
assert len(field_to_local.data) == 0
def test_set_data_numpy_array_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(100)
arr = np.arange(300).reshape(100, 3)
field_to_local.data = arr
assert np.allclose(field_to_local.data, arr)
def test_field_huge_amount_of_data(allkindofcomplexity):
# set data with a field created from a model
model = dpf.core.Model(allkindofcomplexity)
field = model.results.displacement().outputs.fields_container()[0]
data = field.data
assert len(data) == 15113
field.data = data
new_data = field.data
assert np.allclose(data, new_data)
modif_data = data
modif_data[245] = 45
modif_data[1129] = 69
modif_data[7209] = 2086
modif_data[9046] = 12
modif_data[12897] = 7894
modif_data[15112] = 2789
field.data = modif_data
new_modif_data = field.data
assert np.allclose(new_modif_data, modif_data)
# set data with a field created from scratch
field = dpf.core.Field(nature=dpf.core.natures.scalar)
data = range(1, 1000000)
field.data = data
data_check = field.data
assert np.allclose(data_check, data)
modif_data = data_check
modif_data[245] = 45
modif_data[10046] = 69
modif_data[1999] = 2086
modif_data[50067] = 12
modif_data[999345] = 7894
modif_data[506734] = 2789
modif_data = modif_data.tolist()
field.data = modif_data
new_modif_data = field.data
assert np.allclose(new_modif_data, modif_data)
def test_deep_copy_field():
field = dpf.core.fields_factory.create_3d_vector_field(100)
arr = np.arange(300).reshape(100, 3)
field.data = arr
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
assert iden.outputs.boolean()
assert field.unit == copy.unit
def test_deep_copy_elemental_nodal_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
field = stress.outputs.fields_container()[0]
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
try:
assert iden.outputs.boolean()
except AssertionError as e:
print(iden.outputs.message())
raise e
mesh = field.meshed_region
copy = copy.meshed_region
assert copy.nodes.scoping.ids == mesh.nodes.scoping.ids
assert copy.elements.scoping.ids == mesh.elements.scoping.ids
assert copy.unit == mesh.unit
assert np.allclose(
copy.nodes.coordinates_field.data, mesh.nodes.coordinates_field.data
)
assert np.allclose(
copy.elements.element_types_field.data, mesh.elements.element_types_field.data
)
assert np.allclose(
copy.elements.connectivities_field.data, mesh.elements.connectivities_field.data
)
assert np.allclose(
copy.nodes.coordinates_field.scoping.ids,
mesh.nodes.coordinates_field.scoping.ids,
)
assert np.allclose(
copy.elements.element_types_field.scoping.ids,
mesh.elements.element_types_field.scoping.ids,
)
assert np.allclose(
copy.elements.connectivities_field.scoping.ids,
mesh.elements.connectivities_field.scoping.ids,
)
def test_deep_copy_over_time_field(velocity_acceleration):
model = dpf.core.Model(velocity_acceleration)
stress = model.results.stress(time_scoping=[1, 2, 3])
min_max = dpf.core.operators.min_max.min_max_fc(stress)
field = min_max.outputs.field_max()
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
assert iden.outputs.boolean()
tf = field.time_freq_support
copy = copy.time_freq_support
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
def test_deep_copy_spec_ncomp_field():
field = dpf.core.fields_factory.create_vector_field(100, 6, dpf.core.locations.elemental)
arr = np.arange(600).reshape(100, 6)
field.data = arr
copy = field.deep_copy()
assert copy.component_count == 6
assert copy.location == dpf.core.locations.elemental
def test_add_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field+op
forward = ops.utility.forward_field(field)
add = field + forward
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array(field.data) * 2.0)
# field + list
add = field + [0.0, 1.0, 2.0]
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert len(out) == 6
assert out.scoping.ids == [1, 2]
assert np.allclose(
out.data, field.data + np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])
)
# field + float
add = field + 1.0
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
def test_minus_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field-op
forward = ops.utility.forward_field(field)
add = field - forward
assert type(add) == ops.math.minus
out = add.outputs.field()
assert len(out) == 6
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.zeros((2, 3)))
# fc - list
add = field - [0.0, 1.0, 2.0]
assert type(add) == ops.math.minus
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[0.0, 0.0, 0.0], [3.0, 3.0, 3.0]]))
# operator - float
add = field - 1.0
assert type(add) == ops.math.minus
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]))
def test_dot_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field * op
forward = ops.utility.forward_field(field)
add = field * forward
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 50.0]))
# field * field
add = field * field
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 50.0]))
# field * list
add = field * [0.0, 1.0, 2.0]
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 14.0]))
# field * float
add = field * -1.0
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, -field.data)
if __name__ == "__main__":
test_get_set_data_local_field()
```
#### File: pydpf-core/tests/test_plugins.py
```python
import os
import pytest
from ansys.dpf import core as dpf
def try_load_cff_operators():
try:
if os.name == "posix":
return False
dpf.load_library("Ans.Dpf.CFF.dll", "cff")
return True
except:
return False
# TODO: add loading for linux
@pytest.mark.skipif(not try_load_cff_operators(), reason="Couldn't load cff operators")
def test_cff(cff_data_sources):
m = dpf.Model(cff_data_sources)
assert m.metadata.meshed_region.nodes.n_nodes == 1430
op = dpf.Operator("cff::cas::SV_DENSITY")
op.connect(4, m.metadata.data_sources)
fc = op.get_output(0, dpf.types.fields_container)
assert len(fc[0]) == 1380
def try_load_lsdyna_operators():
try:
dpf.load_library("Ans.Dpf.LSDYNA.dll", "lsdyna")
return True
except:
return False
@pytest.mark.skipif(
not try_load_lsdyna_operators(), reason="Couldn't load lsdyna operators"
)
def test_lsdyna(d3plot):
dpf.load_library("Ans.Dpf.LSDYNA.dll", "lsdyna")
ds = dpf.DataSources()
ds.set_result_file_path(d3plot, "d3plot")
streams = dpf.operators.metadata.streams_provider(ds)
u = dpf.operators.result.displacement()
u.inputs.streams_container(streams)
fc = u.outputs.fields_container()
assert len(fc[0]) == 3195
def try_load_composites_operators():
try:
dpf.load_library("composite_operators.dll", "compo")
dpf.load_library("Ans.Dpf.EngineeringData.dll", "eng")
return True
except:
return False
@pytest.mark.skipif(
not try_load_composites_operators(), reason="Couldn't load composites operators"
)
def test_eng(engineering_data_sources):
dpf.load_library("composite_operators.dll", "compo")
dpf.load_library("Ans.Dpf.EngineeringData.dll", "eng")
m = dpf.Model(engineering_data_sources)
stress_op = dpf.operators.result.stress()
stress_op.inputs.data_sources.connect(engineering_data_sources)
result_info_provider = dpf.operators.metadata.result_info_provider()
result_info_provider.inputs.data_sources.connect(engineering_data_sources)
mat_support_operator = dpf.operators.metadata.material_support_provider()
mat_support_operator.inputs.data_sources.connect(engineering_data_sources)
ans_mat_operator = dpf.Operator("eng_data::ans_mat_material_provider")
ans_mat_operator.connect(0, mat_support_operator, 0)
ans_mat_operator.connect(1, result_info_provider, 0)
ans_mat_operator.connect(4, engineering_data_sources)
field_variable_provider = dpf.Operator(
"composite::inistate_field_variables_provider"
)
field_variable_provider.connect(4, engineering_data_sources)
field_variable_provider.inputs.mesh.connect(m.metadata.mesh_provider)
field_variable_provider.run()
```
#### File: pydpf-core/tests/test_timefreqsupport.py
```python
import numpy as np
import pytest
from ansys import dpf
from ansys.dpf.core import TimeFreqSupport, Model
from ansys.dpf.core import examples
from ansys.dpf.core import fields_factory
from ansys.dpf.core.common import locations
from ansys.dpf.core.check_version import meets_version, get_server_version
SERVER_VERSION_HIGHER_THAN_3_0 = meets_version(get_server_version(dpf.core._global_server()), "3.0")
@pytest.fixture()
def vel_acc_model(velocity_acceleration):
return dpf.core.Model(velocity_acceleration)
def test_get_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
assert res.n_sets == 5
assert res.get_frequency(0, 0) == 0.02
assert res.get_frequency(0, 1) == 0.04
assert res.get_frequency(cumulative_index=2) == 0.06
assert res.get_cumulative_index(0, 0) == 0
assert res.get_cumulative_index(freq=0.06) == 2
def test_model_time_freq_support(vel_acc_model):
timefreq = vel_acc_model.metadata.time_freq_support
assert str(timefreq.n_sets) in str(timefreq)
assert len(timefreq.time_frequencies.data) == timefreq.n_sets
expected_data = [0.02, 0.04, 0.06, 0.08, 0.1]
assert np.allclose(expected_data, timefreq.time_frequencies.data)
def test_get_frequencies_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
freq = res.time_frequencies
assert np.allclose(freq.data, [0.02, 0.04, 0.06, 0.08, 0.1])
assert freq.scoping.ids == [1]
def test_print_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
assert "Number of sets: 5" in str(res)
assert "Time (s)" in str(res)
assert "LoadStep" in str(res)
assert "Substep" in str(res)
def test_delete_timefreqsupport(velocity_acceleration):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(velocity_acceleration)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
res.__del__()
with pytest.raises(Exception):
res.get_frequence(0, 0)
def test_delete_auto_timefreqsupport(simple_rst):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(simple_rst)
op = dpf.core.Operator("mapdl::rst::TimeFreqSupportProvider")
op.connect(4, dataSource)
res = op.get_output(0, dpf.core.types.time_freq_support)
res1 = dpf.core.TimeFreqSupport(res._message)
res.__del__()
with pytest.raises(Exception):
res1.n_sets
def test_create_time_freq_support():
tfq = TimeFreqSupport()
assert tfq is not None
def test_update_time_freq_support_real_freq():
tfq = TimeFreqSupport()
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.time_frequencies = frequencies
frequencies_check = tfq.time_frequencies
assert np.allclose(frequencies.data, frequencies_check.data)
assert tfq.rpms is None
assert tfq.complex_frequencies is None
def test_update_time_freq_support_im_freq():
tfq = TimeFreqSupport()
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.complex_frequencies = frequencies
frequencies_check = tfq.complex_frequencies
assert np.allclose(frequencies.data, frequencies_check.data)
assert tfq.rpms is None
assert tfq.time_frequencies is None
def test_update_time_freq_support_rpms():
tfq = TimeFreqSupport()
rpm = fields_factory.create_scalar_field(3)
rpm.data = [0.1, 0.32, 0.4]
tfq.rpms = rpm
rpm_check = tfq.rpms
assert np.allclose(rpm.data, rpm_check.data)
assert tfq.time_frequencies is None
assert tfq.complex_frequencies is None
def test_update_time_freq_support_harmonic_indeces():
tfq = TimeFreqSupport()
harm = fields_factory.create_scalar_field(3)
harm.data = [0.1, 0.32, 0.4]
tfq.set_harmonic_indices(harm)
harm_check = tfq.get_harmonic_indices()
assert np.allclose(harm.data, harm_check.data)
assert tfq.time_frequencies is None
assert tfq.complex_frequencies is None
assert tfq.rpms is None
def test_update_time_freq_support_harmonic_indeces_with_num_stage():
tfq = TimeFreqSupport()
harm = fields_factory.create_scalar_field(3)
harm.data = [0.12, 0.32, 0.8]
tfq.set_harmonic_indices(harm, 2)
harm_check = tfq.get_harmonic_indices(2)
assert np.allclose(harm.data, harm_check.data)
assert tfq.time_frequencies is None
assert tfq.complex_frequencies is None
assert tfq.rpms is None
harm_check_2 = tfq.get_harmonic_indices(3)
assert harm_check_2 is None
harm_check_3 = tfq.get_harmonic_indices(0)
assert harm_check_3 is None
harm_check_4 = tfq.get_harmonic_indices()
assert harm_check_4 is None
def test_update_time_freq_support_real_freq_with_ds(velocity_acceleration):
model = Model(velocity_acceleration)
disp = model.results.displacement()
tfq = disp.outputs.fields_container().time_freq_support
assert tfq.time_frequencies is not None
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.time_frequencies = frequencies
frequencies_check = tfq.time_frequencies
assert np.allclose(frequencies.data, frequencies_check.data)
def test_append_step_1():
tfq = TimeFreqSupport()
frequencies = [0.1, 0.21, 1.0]
tfq.append_step(1, frequencies, rpm_value=2.0)
assert len(tfq.rpms.data) == 1
assert len(tfq.time_frequencies.data) == 3
assert tfq.rpms.location == locations.time_freq_step
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose(frequencies, tfq.time_frequencies.data)
assert np.allclose(2.0, tfq.rpms.data)
assert tfq.complex_frequencies is None
assert tfq.get_harmonic_indices() is None
frequencies2 = [1.1, 2.0]
tfq.append_step(1, frequencies2, rpm_value=2.0)
assert len(tfq.rpms.data) == 2
assert len(tfq.time_frequencies.data) == 5
assert tfq.rpms.location == locations.time_freq_step
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose(frequencies + frequencies2, tfq.time_frequencies.data)
assert np.allclose(2.0, tfq.rpms.data)
assert tfq.complex_frequencies is None
assert tfq.get_harmonic_indices() is None
def test_append_step_2():
tfq = TimeFreqSupport()
tfq.append_step(
1, [0.1, 0.21, 1.0], rpm_value=2.0, step_harmonic_indices=[1.0, 2.0, 3.0]
)
tfq.append_step(2, [1.1, 2.0], rpm_value=2.3, step_harmonic_indices=[1.0, 2.0])
tfq.append_step(3, [0.23, 0.25], rpm_value=3.0, step_harmonic_indices=[1.0, 2.0])
assert len(tfq.rpms.data) == 3
assert len(tfq.time_frequencies.data) == 7
assert len(tfq.get_harmonic_indices().data) == 7
assert tfq.rpms.location == locations.time_freq_step
assert tfq.get_harmonic_indices().location == locations.time_freq
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose(
[0.1, 0.21, 1.0, 1.1, 2.0, 0.23, 0.25], tfq.time_frequencies.data
)
assert np.allclose([2.0, 2.3, 3.0], tfq.rpms.data)
assert tfq.complex_frequencies is None
def test_append_step_3():
tfq = TimeFreqSupport()
tfq.append_step(
1,
[0.1, 0.21],
rpm_value=2.0,
step_harmonic_indices={1: [1.0, 2.0], 2: [3.0, 3.1]},
)
assert len(tfq.rpms.data) == 1
assert len(tfq.time_frequencies.data) == 2
assert len(tfq.get_harmonic_indices(1).data) == 2
assert len(tfq.get_harmonic_indices(2).data) == 2
assert tfq.get_harmonic_indices() is None
assert tfq.rpms.location == locations.time_freq_step
assert tfq.get_harmonic_indices(1).location == locations.time_freq
assert tfq.get_harmonic_indices(2).location == locations.time_freq
assert tfq.time_frequencies.location == locations.time_freq
assert np.allclose([1.0, 2.0], tfq.get_harmonic_indices(1).data)
assert np.allclose([3.0, 3.1], tfq.get_harmonic_indices(2).data)
assert tfq.complex_frequencies is None
def test_deep_copy_time_freq_support(velocity_acceleration):
model = Model(velocity_acceleration)
tf = model.metadata.time_freq_support
copy = tf.deep_copy()
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
def test_deep_copy_time_freq_support_harmonic():
model = Model(examples.download_multi_harmonic_result())
tf = model.metadata.time_freq_support
copy = tf.deep_copy()
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
assert tf.time_frequencies.unit == copy.time_frequencies.unit
assert np.allclose(tf.complex_frequencies.data, copy.complex_frequencies.data)
assert tf.complex_frequencies.scoping.ids == copy.complex_frequencies.scoping.ids
assert np.allclose(tf.rpms.data, copy.rpms.data)
assert tf.rpms.scoping.ids == copy.rpms.scoping.ids
def test_deep_copy_time_freq_support_multi_stage():
model = Model(examples.download_multi_stage_cyclic_result())
tf = model.metadata.time_freq_support
copy = tf.deep_copy()
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
assert tf.time_frequencies.unit == copy.time_frequencies.unit
assert np.allclose(
tf.get_harmonic_indices(0).data, copy.get_harmonic_indices(0).data
)
assert (
tf.get_harmonic_indices(0).scoping.ids
== copy.get_harmonic_indices(0).scoping.ids
)
assert np.allclose(
tf.get_harmonic_indices(1).data, copy.get_harmonic_indices(1).data
)
assert (
tf.get_harmonic_indices(1).scoping.ids
== copy.get_harmonic_indices(1).scoping.ids
)
assert len(tf.get_harmonic_indices(0).data) == 6
assert len(tf.get_harmonic_indices(1).data) == 6
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_operator_connect_get_output_time_freq_support(velocity_acceleration):
model = Model(velocity_acceleration)
tf = model.metadata.time_freq_support
op = dpf.core.operators.utility.forward(tf)
tfout = op.get_output(0, dpf.core.types.time_freq_support)
assert np.allclose(tf.time_frequencies.data, tfout.time_frequencies.data)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_workflow_connect_get_output_time_freq_support(velocity_acceleration):
model = Model(velocity_acceleration)
tf = model.metadata.time_freq_support
wf = dpf.core.Workflow()
op = dpf.core.operators.utility.forward()
wf.set_input_name("tf", op, 0)
wf.set_output_name("tf", op, 0)
wf.connect("tf", tf)
tfout = wf.get_output("tf", dpf.core.types.time_freq_support)
assert np.allclose(tf.time_frequencies.data, tfout.time_frequencies.data)
```
#### File: pydpf-core/tests/test_workflow.py
```python
import numpy as np
import pytest
import ansys.dpf.core.operators as op
import conftest
from ansys import dpf
from ansys.dpf.core.check_version import meets_version, get_server_version
SERVER_VERSION_HIGHER_THAN_3_0 = meets_version(get_server_version(dpf.core._global_server()), "3.0")
def test_create_workflow():
wf = dpf.core.Workflow()
assert wf._message.id
def test_connect_field_workflow():
wf = dpf.core.Workflow()
op = dpf.core.Operator("min_max")
inpt = dpf.core.Field(nentities=3)
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
scop = dpf.core.Scoping()
scop.ids = [1, 2, 3]
inpt.data = data
inpt.scoping = scop
wf.add_operator(op)
wf.set_input_name("field", op, 0)
wf.set_output_name("min", op, 0)
wf.set_output_name("max", op, 1)
wf.connect("field", inpt)
fOut = wf.get_output("min", dpf.core.types.field)
assert np.allclose(fOut.data, [1.0, 2.0, 3.0])
fOut = wf.get_output("max", dpf.core.types.field)
assert np.allclose(fOut.data, [7.0, 8.0, 9.0])
wf = dpf.core.Workflow()
wf.set_input_name("field", op.inputs.field)
wf.set_output_name("min", op.outputs.field_min)
wf.set_output_name("max", op.outputs.field_max)
wf.connect("field", inpt)
fOut = wf.get_output("min", dpf.core.types.field)
assert np.allclose(fOut.data, [1.0, 2.0, 3.0])
fOut = wf.get_output("max", dpf.core.types.field)
assert np.allclose(fOut.data, [7.0, 8.0, 9.0])
def test_connect_list_workflow(velocity_acceleration):
wf = dpf.core.Workflow()
model = dpf.core.Model(velocity_acceleration)
op = model.operator("U")
wf.add_operator(op)
wf.set_input_name("time_scoping", op, 0)
wf.set_output_name("field", op, 0)
wf.connect("time_scoping", [1, 2])
fcOut = wf.get_output("field", dpf.core.types.fields_container)
assert fcOut.get_available_ids_for_label() == [1, 2]
wf.set_input_name("time_scoping", op.inputs.time_scoping)
wf.set_output_name("field", op.outputs.fields_container)
wf.connect("time_scoping", [1, 2])
fcOut = wf.get_output("field", dpf.core.types.fields_container)
assert fcOut.get_available_ids_for_label() == [1, 2]
def test_connect_fieldscontainer_workflow():
wf = dpf.core.Workflow()
op = dpf.core.Operator("min_max_fc")
wf.add_operator(op)
fc = dpf.core.FieldsContainer()
fc.labels = ["time", "complex"]
scop = dpf.core.Scoping()
scop.ids = list(range(1, 11))
for i in range(0, 20):
mscop = {"time": i + 1, "complex": 0}
field = dpf.core.Field(nentities=10)
field.scoping = scop
field.data = np.zeros(len(field.scoping) * 3)
fc.add_field(mscop, field)
wf = dpf.core.Workflow()
wf.set_input_name("fields_container", op, 0)
wf.set_output_name("field", op, 0)
wf.connect("fields_container", fc)
fOut = wf.get_output("field", dpf.core.types.field)
assert fOut.data.size == 60
def test_connect_fieldscontainer_2_workflow():
wf = dpf.core.Workflow()
op = dpf.core.Operator("min_max_fc")
wf.add_operator(op)
fc = dpf.core.FieldsContainer()
fc.labels = ["time", "complex"]
scop = dpf.core.Scoping()
scop.ids = list(range(1, 11))
for i in range(0, 20):
mscop = {"time": i + 1, "complex": 0}
field = dpf.core.Field(nentities=10)
field.scoping = scop
field.data = np.zeros(len(field.scoping) * 3)
fc.add_field(mscop, field)
wf = dpf.core.Workflow()
wf.set_input_name("fields_container", op.inputs.fields_container)
wf.set_output_name("field", op.outputs.field_min)
wf.connect("fields_container", fc)
fOut = wf.get_output("field", dpf.core.types.field)
assert fOut.data.size == 60
def test_connect_bool_workflow():
op = dpf.core.Operator("S")
wf = dpf.core.Workflow()
wf.add_operator(op)
wf.set_input_name("bool", op, 5)
wf.connect("bool", True)
wf = dpf.core.Workflow()
wf.add_operator(op)
wf.set_input_name("bool", op.inputs.bool_rotate_to_global)
wf.connect("bool", True)
def test_connect_scoping_workflow():
op = dpf.core.Operator("Rescope")
scop = dpf.core.Scoping()
scop.ids = list(range(1, 11))
field = dpf.core.Field(nentities=10)
field.scoping = scop
field.data = np.zeros(len(field.scoping) * 3)
scop = dpf.core.Scoping()
scop.ids = list(range(1, 11))
scop2 = dpf.core.Scoping()
scop2.ids = list(range(1, 5))
wf = dpf.core.Workflow()
wf.add_operator(op)
wf.set_input_name("field", op, 0)
wf.connect("field", field)
wf.set_input_name("mesh_scoping", op, 1)
wf.connect("mesh_scoping", scop2)
wf.set_output_name("field", op, 0)
fOut = wf.get_output("field", dpf.core.types.field)
scopOut = fOut.scoping
assert scopOut.ids == list(range(1, 5))
def test_connect_scoping_2_workflow():
op = dpf.core.Operator("Rescope")
scop = dpf.core.Scoping()
scop.ids = list(range(1, 11))
field = dpf.core.Field(nentities=10)
field.scoping = scop
field.data = np.zeros(len(field.scoping) * 3)
scop = dpf.core.Scoping()
scop.ids = list(range(1, 11))
scop2 = dpf.core.Scoping()
scop2.ids = list(range(1, 5))
wf = dpf.core.Workflow()
wf.add_operator(op)
wf.set_input_name("field", op.inputs.fields)
wf.connect("field", field)
wf.set_input_name("mesh_scoping", op.inputs.mesh_scoping)
wf.connect("mesh_scoping", scop2)
wf.set_output_name("field", op, 0)
fOut = wf.get_output("field", dpf.core.types.field)
scopOut = fOut.scoping
assert scopOut.ids == list(range(1, 5))
def test_connect_datasources_workflow(fields_container_csv):
op = dpf.core.Operator("csv_to_field")
data_sources = dpf.core.DataSources()
data_sources.set_result_file_path(fields_container_csv)
wf = dpf.core.Workflow()
wf.add_operator(op)
wf.set_input_name("data_sources", op, 4)
wf.connect("data_sources", data_sources)
wf.set_output_name("fields_container", op, 0)
fcOut = wf.get_output("fields_container", dpf.core.types.fields_container)
assert len(fcOut.get_available_ids_for_label()) == 4
wf = dpf.core.Workflow()
wf.add_operator(op)
wf.set_input_name("data_sources", op.inputs.data_sources)
wf.connect("data_sources", data_sources)
wf.set_output_name("fields_container", op, 0)
fcOut = wf.get_output("fields_container", dpf.core.types.fields_container)
assert len(fcOut.get_available_ids_for_label()) == 4
def test_connect_operator_workflow():
op = dpf.core.Operator("norm")
inpt = dpf.core.Field(nentities=3)
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
scop = dpf.core.Scoping()
scop.ids = [1, 2, 3]
inpt.data = data
inpt.scoping = scop
op.connect(0, inpt)
op2 = dpf.core.Operator("component_selector")
wf = dpf.core.Workflow()
wf.add_operator(op2)
wf.set_input_name("fields_container", op2, 0)
wf.set_input_name("comp", op2, 1)
wf.connect("fields_container", op, 0)
wf.set_output_name("field", op, 0)
wf.connect("comp", 0)
fOut = wf.get_output("field", dpf.core.types.field)
assert len(fOut.data) == 3
def test_connect_operator_2_workflow():
op = dpf.core.Operator("norm")
inpt = dpf.core.Field(nentities=3)
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
scop = dpf.core.Scoping()
scop.ids = [1, 2, 3]
inpt.data = data
inpt.scoping = scop
op.connect(0, inpt)
op2 = dpf.core.Operator("component_selector")
wf = dpf.core.Workflow()
wf.add_operator(op2)
wf.set_input_name("field", op2.inputs.field)
wf.set_input_name("comp", op2.inputs.component_number)
wf.connect("field", op.outputs.field)
wf.set_output_name("field", op, 0)
wf.connect("comp", 0)
fOut = wf.get_output("field", dpf.core.types.field)
assert len(fOut.data) == 3
def test_output_mesh_workflow(cyclic_lin_rst, cyclic_ds):
data_sources = dpf.core.DataSources(cyclic_lin_rst)
data_sources.add_file_path(cyclic_ds)
model = dpf.core.Model(data_sources)
op = model.operator("mapdl::rst::U")
assert "data_sources" in str(op.inputs)
assert "fields_container" in str(op.outputs)
support = model.operator("mapdl::rst::support_provider_cyclic")
expand = model.operator("cyclic_expansion")
wf = dpf.core.Workflow()
wf.add_operators([support, expand])
wf.set_input_name("support", expand.inputs.cyclic_support)
wf.set_input_name("fields", expand.inputs.fields_container)
wf.connect("fields", op.outputs.fields_container)
wf.connect("support", support.outputs.cyclic_support)
wf.set_output_name("fields", op, 0)
mesh = model.operator("cyclic_expansion_mesh")
wf.add_operator(mesh)
wf.set_input_name("support", mesh.inputs.cyclic_support)
wf.connect("support", support.outputs.cyclic_support)
wf.set_output_name("mesh", mesh, 0)
meshed_region = wf.get_output("mesh", dpf.core.types.meshed_region)
coord = meshed_region.nodes.coordinates_field
assert coord.shape == (meshed_region.nodes.n_nodes, 3)
assert (
meshed_region.elements.connectivities_field.data.size
== meshed_region.elements.connectivities_field.size
)
fields = wf.get_output("fields", dpf.core.types.fields_container)
def test_outputs_bool_workflow():
inpt = dpf.core.Field(nentities=3)
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
scop = dpf.core.Scoping()
scop.ids = [1, 2, 3]
inpt.data = data
inpt.scoping = scop
op = dpf.core.Operator("AreFieldsIdentical")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("fieldA", op.inputs.fieldA)
wf.set_input_name("fieldB", op.inputs.fieldB)
wf.connect("fieldA", inpt)
wf.connect("fieldB", inpt)
wf.set_output_name("bool", op, 0)
out = wf.get_output("bool", dpf.core.types.bool)
assert out == True
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_connect_get_output_int_list_workflow():
d = list(range(0, 1000000))
wf = dpf.core.Workflow()
op = dpf.core.operators.utility.forward(d)
wf.add_operators([op])
wf.set_input_name("in", op, 0)
wf.set_output_name("out", op, 0)
dout = wf.get_output("out", dpf.core.types.vec_int)
assert np.allclose(d, dout)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_connect_get_output_double_list_workflow():
d = list(np.ones(500000))
wf = dpf.core.Workflow()
op = dpf.core.operators.utility.forward(d)
wf.add_operators([op])
wf.set_input_name("in", op, 0)
wf.set_output_name("out", op, 0)
dout = wf.get_output("out", dpf.core.types.vec_double)
assert np.allclose(d, dout)
def test_inputs_outputs_inputs_outputs_scopings_container_workflow(allkindofcomplexity):
data_sources = dpf.core.DataSources(allkindofcomplexity)
model = dpf.core.Model(data_sources)
op = dpf.core.Operator("scoping::by_property")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("mesh", op.inputs.mesh)
wf.set_input_name("prop", op.inputs.label1)
wf.connect("mesh", model.metadata.meshed_region)
wf.connect("prop", "elshape")
wf.set_output_name("scopings", op, 0)
sc = wf.get_output("scopings", dpf.core.types.scopings_container)
op = dpf.core.Operator("forward")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("a", op, 0)
wf.set_output_name("a", op, 0)
wf.connect("a", sc)
out = wf.get_output("a", dpf.core.types.scopings_container)
assert len(out) == len(sc)
def test_inputs_outputs_inputs_outputs_meshes_container_workflow(allkindofcomplexity):
data_sources = dpf.core.DataSources(allkindofcomplexity)
model = dpf.core.Model(data_sources)
op = dpf.core.Operator("split_mesh")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("mesh", op.inputs.mesh)
wf.set_input_name("prop", op.inputs.property)
wf.connect("mesh", model.metadata.meshed_region)
wf.connect("prop", "elshape")
wf.set_output_name("meshes", op, 0)
mc = wf.get_output("meshes", dpf.core.types.meshes_container)
op = dpf.core.Operator("forward")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("a", op, 0)
wf.set_output_name("a", op, 0)
wf.connect("a", mc)
out = wf.get_output("a", dpf.core.types.meshes_container)
assert len(out) == len(mc)
def test_record_workflow(allkindofcomplexity):
data_sources = dpf.core.DataSources(allkindofcomplexity)
model = dpf.core.Model(data_sources)
op = dpf.core.Operator("scoping::by_property")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("mesh", op.inputs.mesh)
wf.set_input_name("prop", op.inputs.label1)
wf.connect("mesh", model.metadata.meshed_region)
wf.connect("prop", "elshape")
wf.set_output_name("scopings", op, 0)
id = wf.record()
op = dpf.core.Operator("forward")
wf2 = dpf.core.Workflow()
wf2.add_operators([op])
wf2.set_input_name("a", op, 0)
wf2.set_output_name("a", op, 0)
id2 = wf2.record()
wf_copy = dpf.core.Workflow.get_recorded_workflow(id)
wf2_copy = dpf.core.Workflow.get_recorded_workflow(id2)
sc = wf_copy.get_output("scopings", dpf.core.types.scopings_container)
wf2_copy.connect("a", sc)
out = wf2_copy.get_output("a", dpf.core.types.scopings_container)
assert len(out) == len(sc)
def test_transfer_owner_workflow(allkindofcomplexity):
data_sources = dpf.core.DataSources(allkindofcomplexity)
model = dpf.core.Model(data_sources)
op = dpf.core.Operator("scoping::by_property")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("mesh", op.inputs.mesh)
wf.set_input_name("prop", op.inputs.label1)
wf.connect("mesh", model.metadata.meshed_region)
wf.connect("prop", "elshape")
wf.set_output_name("scopings", op, 0)
id = wf.record(transfer_ownership=True)
wf_copy = dpf.core.Workflow.get_recorded_workflow(id)
with pytest.raises(Exception):
wf_copy = dpf.core.Workflow.get_recorded_workflow(id)
id = wf.record(transfer_ownership=False)
wf_copy = dpf.core.Workflow.get_recorded_workflow(id)
wf_copy = dpf.core.Workflow.get_recorded_workflow(id)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_connect_with_workflow(cyclic_lin_rst, cyclic_ds):
data_sources = dpf.core.DataSources(cyclic_lin_rst)
data_sources.add_file_path(cyclic_ds)
model = dpf.core.Model(data_sources)
support = model.operator("mapdl::rst::support_provider_cyclic")
mesh = model.operator("cyclic_expansion_mesh")
wf = dpf.core.Workflow()
wf.add_operators([support, mesh])
wf.set_input_name("support", mesh.inputs.cyclic_support)
wf.connect("support", support.outputs.cyclic_support)
wf.set_output_name("mesh_expand", mesh, 0)
wf.set_output_name("support", mesh, 1)
op = model.operator("mapdl::rst::U")
expand = model.operator("cyclic_expansion")
expand.connect(0, op, 0)
wf2 = dpf.core.Workflow()
wf2.add_operators([op, expand])
wf2.set_input_name("support", expand.inputs.cyclic_support)
wf2.set_output_name("u", op, 0)
wf2.connect_with(wf)
meshed_region = wf2.get_output("mesh_expand", dpf.core.types.meshed_region)
fc = wf2.get_output("u", dpf.core.types.fields_container)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_connect_with_2_workflow(cyclic_lin_rst, cyclic_ds):
data_sources = dpf.core.DataSources(cyclic_lin_rst)
data_sources.add_file_path(cyclic_ds)
model = dpf.core.Model(data_sources)
support = model.operator("mapdl::rst::support_provider_cyclic")
mesh = model.operator("cyclic_expansion_mesh")
wf = dpf.core.Workflow()
wf.add_operators([support, mesh])
wf.set_input_name("support", mesh.inputs.cyclic_support)
wf.connect("support", support.outputs.cyclic_support)
wf.set_output_name("mesh_expand", mesh, 0)
wf.set_output_name("support1", mesh, 1)
op = model.operator("mapdl::rst::U")
expand = model.operator("cyclic_expansion")
expand.connect(0, op, 0)
wf2 = dpf.core.Workflow()
wf2.add_operators([op, expand])
wf2.set_input_name("support2", expand.inputs.cyclic_support)
wf2.set_output_name("u", op, 0)
wf2.connect_with(wf, ("support1", "support2"))
meshed_region = wf2.get_output("mesh_expand", dpf.core.types.meshed_region)
fc = wf2.get_output("u", dpf.core.types.fields_container)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_connect_with_dict_workflow(cyclic_lin_rst, cyclic_ds):
data_sources = dpf.core.DataSources(cyclic_lin_rst)
data_sources.add_file_path(cyclic_ds)
model = dpf.core.Model(data_sources)
support = model.operator("mapdl::rst::support_provider_cyclic")
mesh = model.operator("cyclic_expansion_mesh")
wf = dpf.core.Workflow()
wf.add_operators([support, mesh])
wf.set_input_name("support", mesh.inputs.cyclic_support)
wf.connect("support", support.outputs.cyclic_support)
wf.set_output_name("mesh_expand", mesh, 0)
wf.set_output_name("support1", mesh, 1)
op = model.operator("mapdl::rst::U")
expand = model.operator("cyclic_expansion")
expand.connect(0, op, 0)
wf2 = dpf.core.Workflow()
wf2.add_operators([op, expand])
wf2.set_input_name("support2", expand.inputs.cyclic_support)
wf2.set_output_name("u", op, 0)
wf2.connect_with(wf, {"support1": "support2"})
meshed_region = wf2.get_output("mesh_expand", dpf.core.types.meshed_region)
fc = wf2.get_output("u", dpf.core.types.fields_container)
def test_info_workflow(allkindofcomplexity):
data_sources = dpf.core.DataSources(allkindofcomplexity)
model = dpf.core.Model(data_sources)
op = dpf.core.Operator("scoping::by_property")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("mesh", op.inputs.mesh)
wf.set_input_name("prop", op.inputs.label1)
wf.connect("mesh", model.metadata.meshed_region)
wf.connect("prop", "elshape")
wf.set_output_name("scopings", op, 0)
assert wf.info["operator_names"] == ["scoping::by_property"]
assert wf.info["input_names"] == ["mesh", "prop"]
assert wf.info["output_names"] == ["scopings"]
assert wf.operator_names == ["scoping::by_property"]
assert wf.input_names == ["mesh", "prop"]
assert wf.output_names == ["scopings"]
def test_print_workflow():
inpt = dpf.core.Field(nentities=3)
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
scop = dpf.core.Scoping()
scop.ids = [1, 2, 3]
inpt.data = data
inpt.scoping = scop
op = dpf.core.Operator("AreFieldsIdentical")
wf = dpf.core.Workflow()
wf.add_operators([op])
wf.set_input_name("fieldA", op.inputs.fieldA)
wf.set_input_name("fieldB", op.inputs.fieldB)
wf.connect("fieldA", inpt)
wf.connect("fieldB", inpt)
wf.set_output_name("bool", op, 0)
assert "AreFieldsIdentical" in str(wf)
assert "input pins" in str(wf)
assert "fieldA" in str(wf)
assert "fieldB" in str(wf)
assert "output pins" in str(wf)
assert "bool" in str(wf)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_throws_error(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
wf = dpf.core.Workflow()
op = model.results.stress()
op.inputs.read_cyclic(3)
opnorm = dpf.core.operators.averaging.to_nodal_fc(op)
add = dpf.core.operators.math.add_fc(opnorm, opnorm)
add2 = dpf.core.operators.math.add_fc(add, add)
add3 = dpf.core.operators.math.add_fc(add2)
add4 = dpf.core.operators.math.add_fc(add3, add3)
wf.add_operators([op, opnorm, add, add2, add3, add4])
wf.set_output_name("output", add4, 0)
fc = wf.get_output("output", dpf.core.types.fields_container)
assert len(fc) == 2
add4.connect(1, 1)
with pytest.raises(Exception):
fc = wf.get_output("output", dpf.core.types.fields_container)
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_flush_workflows_session(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
wf = dpf.core.Workflow()
op = model.results.stress()
op.inputs.read_cyclic(3)
opnorm = dpf.core.operators.averaging.to_nodal_fc(op)
add = dpf.core.operators.math.add_fc(opnorm, opnorm)
add2 = dpf.core.operators.math.add_fc(add, add)
add3 = dpf.core.operators.math.add_fc(add2)
add4 = dpf.core.operators.math.add_fc(add3, add3)
wf.add_operators([op, opnorm, add, add2, add3, add4])
wf.set_output_name("output", add4, 0)
fc = wf.get_output("output", dpf.core.types.fields_container)
assert len(fc) == 2
wf = dpf.core.Workflow()
op = model.results.stress()
op.inputs.read_cyclic(3)
opnorm = dpf.core.operators.averaging.to_nodal_fc(op)
add = dpf.core.operators.math.add_fc(opnorm, opnorm)
add2 = dpf.core.operators.math.add_fc(add, add)
add3 = dpf.core.operators.math.add_fc(add2)
add4 = dpf.core.operators.math.add_fc(add3, add3)
wf.add_operators([op, opnorm, add, add2, add3, add4])
wf.set_output_name("output", add4, 0)
fc = wf.get_output("output", dpf.core.types.fields_container)
assert len(fc) == 2
wf._server._session.flush_workflows()
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_create_on_other_server_workflow(local_server):
disp_op = op.result.displacement()
max_fc_op = op.min_max.min_max_fc(disp_op)
workflow = dpf.core.Workflow()
workflow.add_operators([disp_op, max_fc_op])
workflow.set_input_name("data_sources", disp_op.inputs.data_sources)
workflow.set_output_name("min", max_fc_op.outputs.field_min)
workflow.set_output_name("max", max_fc_op.outputs.field_max)
new_workflow = workflow.create_on_other_server(local_server)
assert new_workflow.input_names == ['data_sources']
assert new_workflow.output_names == ['max', 'min']
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_create_on_other_server2_workflow(local_server):
disp_op = op.result.displacement()
max_fc_op = op.min_max.min_max_fc(disp_op)
workflow = dpf.core.Workflow()
workflow.add_operators([disp_op, max_fc_op])
workflow.set_input_name("data_sources", disp_op.inputs.data_sources)
workflow.set_output_name("min", max_fc_op.outputs.field_min)
workflow.set_output_name("max", max_fc_op.outputs.field_max)
new_workflow = workflow.create_on_other_server(server=local_server)
assert new_workflow.input_names == ['data_sources']
assert new_workflow.output_names == ['max', 'min']
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_create_on_other_server_with_ip_workflow(local_server):
disp_op = op.result.displacement()
max_fc_op = op.min_max.min_max_fc(disp_op)
workflow = dpf.core.Workflow()
workflow.add_operators([disp_op, max_fc_op])
workflow.set_input_name("data_sources", disp_op.inputs.data_sources)
workflow.set_output_name("min", max_fc_op.outputs.field_min)
workflow.set_output_name("max", max_fc_op.outputs.field_max)
new_workflow = workflow.create_on_other_server(
ip=local_server.ip,
port=local_server.port)
assert new_workflow.input_names == ['data_sources']
assert new_workflow.output_names == ['max', 'min']
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_create_on_other_server_with_address_workflow(local_server):
disp_op = op.result.displacement()
max_fc_op = op.min_max.min_max_fc(disp_op)
workflow = dpf.core.Workflow()
workflow.add_operators([disp_op, max_fc_op])
workflow.set_input_name("data_sources", disp_op.inputs.data_sources)
workflow.set_output_name("min", max_fc_op.outputs.field_min)
workflow.set_output_name("max", max_fc_op.outputs.field_max)
new_workflow = workflow.create_on_other_server(
address=local_server.ip + ":" + str(local_server.port))
assert new_workflow.input_names == ['data_sources']
assert new_workflow.output_names == ['max', 'min']
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_create_on_other_server_with_address2_workflow(local_server):
disp_op = op.result.displacement()
max_fc_op = op.min_max.min_max_fc(disp_op)
workflow = dpf.core.Workflow()
workflow.add_operators([disp_op, max_fc_op])
workflow.set_input_name("data_sources", disp_op.inputs.data_sources)
workflow.set_output_name("min", max_fc_op.outputs.field_min)
workflow.set_output_name("max", max_fc_op.outputs.field_max)
new_workflow = workflow.create_on_other_server(
local_server.ip + ":" + str(local_server.port))
assert new_workflow.input_names == ['data_sources']
assert new_workflow.output_names == ['max', 'min']
@pytest.mark.skipif(not SERVER_VERSION_HIGHER_THAN_3_0,
reason='Requires server version higher than 3.0')
def test_create_on_other_server_and_connect_workflow(allkindofcomplexity, local_server):
disp_op = op.result.displacement()
max_fc_op = op.min_max.min_max_fc(disp_op)
workflow = dpf.core.Workflow()
workflow.add_operators([disp_op, max_fc_op])
workflow.set_input_name("data_sources", disp_op.inputs.data_sources)
workflow.set_output_name("min", max_fc_op.outputs.field_min)
workflow.set_output_name("max", max_fc_op.outputs.field_max)
new_workflow = workflow.create_on_other_server(local_server)
new_workflow.connect("data_sources", dpf.core.DataSources(allkindofcomplexity))
max = new_workflow.get_output("max", dpf.core.types.field)
assert np.allclose(max.data, [[8.50619058e+04, 1.04659292e+01, 3.73620870e+05]])
def main():
test_connect_field_workflow()
velocity_acceleration = conftest.resolve_test_file(
"velocity_acceleration.rst", "rst_operators"
)
test_connect_list_workflow(velocity_acceleration)
test_connect_fieldscontainer_workflow()
test_connect_fieldscontainer_2_workflow()
test_connect_bool_workflow()
test_connect_scoping_workflow()
test_connect_scoping_2_workflow()
fields_container_csv = conftest.resolve_test_file(
"fields_container.csv", "csvToField"
)
test_connect_datasources_workflow(fields_container_csv)
test_connect_operator_workflow()
test_connect_operator_2_workflow()
if __name__ == "__main__":
main()
```
|
{
"source": "jfthuong/toshling",
"score": 2
}
|
#### File: toshling/toshling/_client.py
```python
from . import _endpoints as endpoints
import requests
import json
from statham.schema.elements import Object
from statham.schema.constants import NotPassed
class StathamJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Object):
return {type(o).properties[k].source: v for k, v in o._dict.items() if not isinstance(v, NotPassed)}
return json.JSONEncoder.default(self, o)
class Client:
def __init__(self, api_key, api_endpoint_base='https://api2.toshl.com'):
self.api_key = api_key
self.api_endpoint_base = api_endpoint_base
self.accounts = endpoints.Accounts(self)
self.budgets = endpoints.Budgets(self)
self.categories = endpoints.Categories(self)
self.currencies = endpoints.Currencies(self)
self.entries = endpoints.Entries(self)
self.exports = endpoints.Exports(self)
self.images = endpoints.Images(self)
self.me = endpoints.Me(self)
self.tags = endpoints.Tags(self)
def request(self, href, method, argument_type=None, return_type=None, **kwargs):
options = {}
if argument_type:
# Remap kwargs (which are modified to avoid Python reserved keywords) back into
# the source keys of the argument object.
remap = {}
for k, v in kwargs.items():
remap[argument_type.properties[k].source] = v
# Construct the argument, which will validate all kwargs.
argument = argument_type(remap)
# If we GET, use the original remap, otherwise, JSON encode the argument.
if method == 'GET':
options['params'] = remap
else:
options['data'] = json.dumps(argument, cls=StathamJSONEncoder)
options['headers'] = {'Content-Type': 'application/json'}
# Do the request.
response = requests.request(method,
self.api_endpoint_base + href.format(**kwargs),
auth=(self.api_key, ''),
**options)
# Check if the response is OK.
if response.ok:
# Attempt to construct the return type, handling lists, and some
# dicts especially (Toshl decided that on some endpoints such as
# the currencies list that they'd actually return a dict).
if return_type:
plain = response.json()
if isinstance(plain, list):
return [return_type(p) for p in plain]
elif set(plain.keys()).issubset(set(p.source for p in return_type.properties.values())):
return return_type(response.json())
elif isinstance(plain, dict):
return {k: return_type(v) for k, v in plain.items()}
else:
return plain
else:
response.raise_for_status()
```
|
{
"source": "JFT-Inc/MetaDiamond",
"score": 2
}
|
#### File: MetaDiamond/example/glumpy_example_earth_rendering.py
```python
import csv
import numpy as np
from glumpy import app, gl, glm, gloo, data
from glumpy.geometry.primitives import sphere
from glumpy.transforms import Arcball, Viewport, Position
from glumpy.graphics.text import FontManager
from glumpy.graphics.collections import GlyphCollection
from glumpy.graphics.collections import PathCollection, MarkerCollection
def spheric_to_cartesian(phi, theta, rho):
""" Spheric to cartesian coordinates """
if hasattr(phi, '__iter__'):
n = len(phi)
elif hasattr(theta, '__iter__'):
n = len(theta)
elif hasattr(rho, '__iter__'):
n = len(rho)
P = np.empty((n, 3), dtype=np.float32)
sin_theta = np.sin(theta)
P[:, 0] = sin_theta * np.sin(phi) * rho
P[:, 1] = sin_theta * np.cos(phi) * rho
P[:, 2] = np.cos(theta) * rho
return P
vertex = """
uniform mat4 model, view, projection;
attribute vec3 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = texcoord;
gl_Position = <transform(position)>;
}
"""
fragment = """
const vec4 blue = vec4(0.80,0.80,1.00,1.00);
const vec4 white = vec4(1.00,1.00,1.00,1.00);
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
float v = texture2D(texture, v_texcoord).r;
gl_FragColor = mix(white,blue,v);
}
"""
transform = Arcball(Position(), znear=1, zfar=10)
viewport = Viewport()
radius = 1.5
vertices, indices = sphere(radius, 64, 64)
earth = gloo.Program(vertex, fragment)
earth.bind(vertices)
earth['texture'] = data.get("earth-black.jpg")
earth['texture'].interpolation = gl.GL_LINEAR
earth['transform'] = transform
paths = PathCollection(mode="agg+", color="global", linewidth="global",
viewport=viewport, transform=transform)
paths["color"] = 0, 0, 0, 0.5
paths["linewidth"] = 1.0
theta = np.linspace(0, 2 * np.pi, 64, endpoint=True)
for phi in np.linspace(0, np.pi, 12, endpoint=False):
paths.append(spheric_to_cartesian(phi, theta, radius * 1.01), closed=True)
phi = np.linspace(0, 2 * np.pi, 64, endpoint=True)
for theta in np.linspace(0, np.pi, 19, endpoint=True)[1:-1]:
paths.append(spheric_to_cartesian(phi, theta, radius * 1.01), closed=True)
vertex = """
#include "math/constants.glsl"
varying float v_size;
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying vec2 v_orientation;
varying float v_antialias;
varying float v_linewidth;
void main (void)
{
fetch_uniforms();
v_linewidth = linewidth;
v_antialias = antialias;
v_fg_color = fg_color;
v_bg_color = bg_color;
v_orientation = vec2(cos(orientation), sin(orientation));
gl_Position = <transform(position)>;
float scale = (3.5 - length(gl_Position.xyz)/length(vec3(1.5)));
v_fg_color.a = scale;
v_bg_color.a = scale;
scale=1;
v_size = scale * size;
gl_PointSize = M_SQRT2 * size * scale + 2.0 * (linewidth + 1.5*antialias);
<viewport.transform>;
}
"""
markers = MarkerCollection(marker="disc", vertex=vertex,
viewport=viewport, transform=transform)
C, La, Lo = [], [], []
with open(data.get("capitals.csv"), 'r') as file:
reader = csv.reader(file, delimiter=',')
next(reader, None) # skip the header
for row in reader:
capital = row[1]
latitude = np.pi / 2 + float(row[2]) * np.pi / 180
longitude = np.pi + float(row[3]) * np.pi / 180
C.append(capital)
La.append(latitude)
Lo.append(longitude)
P = spheric_to_cartesian(Lo, La, radius * 1.01)
markers.append(P, bg_color=(1, 1, 1, 1), fg_color=(.25, .25, .25, 1), size=10)
vertex = """
varying vec4 v_color;
varying float v_offset;
varying vec2 v_texcoord;
// Main
// ------------------------------------
void main()
{
fetch_uniforms();
gl_Position = <transform(origin)>;
v_color = color;
v_texcoord = texcoord;
<viewport.transform>;
float scale = (3.5 - length(gl_Position.xyz)/length(vec3(1.5)));
v_color.a = scale;
// We set actual position after transform
v_offset = 3.0*(offset + origin.x - int(origin.x));
gl_Position /= gl_Position.w;
gl_Position = gl_Position + vec4(2.0*position/<viewport.viewport_global>.zw,0,0);
}
"""
labels = GlyphCollection('agg', vertex=vertex,
transform=transform, viewport=viewport)
font = FontManager.get("OpenSans-Regular.ttf", size=16, mode='agg')
for i in range(len(P)):
labels.append(C[i], font, origin=P[i])
labels["position"][:, 1] -= 20
window = app.Window(width=1024, height=1024, color=(.2, .2, .35, 1))
window.attach(transform)
window.attach(viewport)
@window.event
def on_draw(dt):
window.clear()
gl.glEnable(gl.GL_DEPTH_TEST)
earth.draw(gl.GL_TRIANGLES, indices)
paths.draw()
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_BLEND)
markers.draw()
labels.draw()
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
transform.phi = 125
transform.theta = -150
transform.zoom = 15
app.run()
```
#### File: MetaDiamond/example/pyside2_opengl_example.py
```python
from PySide2.QtWidgets import QApplication, QPushButton, QGridLayout, QWidget, QOpenGLWidget, QMainWindow
from OpenGL.GL import *
import sys
class GLWidget(QOpenGLWidget):
def initializeGL(self):
self.f = self.context().currentContext().functions()
self.shader = self.context().currentContext().functions().Shaders
self.f.glViewport(self.x(), self.y(), self.width(), self.height())
self.f.glClear(GL_COLOR_BUFFER_BIT)
filestream = open('vertex_program.glsl')
program_vertex = str(filestream.readlines())
filestream.close()
filestream = open('fragment_program.glsl')
program_fragment = str(filestream.readlines())
self.shader.glCreateShader(program_vertex)
glShaderSource(program_vertex,)
def paintGL(self):
self.f.glClear(GL_COLOR_BUFFER_BIT)
class MainScreen(QMainWindow):
def switch_flag(self):
self.flag = not self.flag
def __init__(self, qt_app: QApplication):
super(MainScreen, self).__init__()
self.setGeometry(
qt_app.primaryScreen().geometry().width() / 2 - qt_app.primaryScreen().geometry().width() / 100 * 40 / 2,
qt_app.primaryScreen().geometry().height() / 2 - qt_app.primaryScreen().geometry().height() / 100 * 40 / 2,
qt_app.primaryScreen().geometry().width() / 100 * 40,
qt_app.primaryScreen().geometry().height() / 100 * 40
)
self.gl_widget = GLWidget()
self.flag = True
self.stop_go_button = QPushButton("Stop")
self.stop_go_button.clicked.connect(self.switch_flag)
self.the_layout = QGridLayout()
self.the_layout.addWidget(self.gl_widget, 1, 1)
self.the_layout.addWidget(self.stop_go_button, 2, 1)
self.center_screen = QWidget()
self.center_screen.setLayout(self.the_layout)
self.setCentralWidget(self.center_screen)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainScreen(app)
window.show()
app.exec_()
```
|
{
"source": "jftreanor/IRRMonteCarlo",
"score": 3
}
|
#### File: jftreanor/IRRMonteCarlo/IRRMonteCarlo.py
```python
import numpy as np
from matplotlib import pyplot as plt
import random as rd
import pandas as pd
#Fund Class - with attributes initialCapital, portfolio and value
class Fund:
def __init__(self, Capital, Portfolio):
self.initialcapital = Capital
self.portfolio = Portfolio
self.value = sum(asset.price for asset in Portfolio)
#Asset Class with attribute Price, yearly return, currency and daily return.
class Asset:
def __init__(self, Price, Returnyoy, Currency):
self.price = Price
self.returnyoy = Returnyoy
self.currency = Currency
self.daily_return = np.exp(np.log(Returnyoy)/252)
# Scales asset prices to fit portfolio value
def portfolioScaling(prices, val):
scaledPrices = []
a = sum(prices)
sf = val/a
for i in prices:
scaledPrices.append(i * sf)
return scaledPrices
# Randomly generates a portfolio with mean returns 8% anually.
def portfolioGenerator(portfolioVal, noAssets, US, EUR, GB, usAssets, eurAssets, gbAssets):
portfolio = []
usp = []
eurp = []
gbp = []
usa = usAssets
eura = eurAssets
gba = gbAssets
usval = portfolioVal * US
eurval = portfolioVal * EUR
gbval = portfolioVal * GB
for i in range(usa):
price = rd.randint(0,usval)
usp.append(price)
for i in range(eura):
price = rd.randint(0,eurval)
eurp.append(price)
for i in range(gba):
price = rd.randint(0,gbval)
gbp.append(price)
usp = portfolioScaling(usp, usval)
eurp = portfolioScaling(eurp, usval)
gbp = portfolioScaling(gbp, usval)
p = usp + eurp + gbp
for i in range(len(p)):
ret = rd.gauss(1.08,0.04)
if (i > 0 and i <= (usAssets-1)):
portfolio.append(Asset(p[i], ret, "US"))
elif (i > (usAssets-1) and i <= (eurAssets-1)):
portfolio.append(Asset(p[i], ret, "EUR"))
elif (i > (eurAssets-1)):
portfolio.append(Asset(p[i], ret, "GB"))
return portfolio
# Provides portfolio of assets with prices in respective currencies.
def portfolioValue(portfolio, eur_price, gbp_price):
for i in range(len(portfolio)):
if(portfolio[i].currency == "EUR"):
portfolio[i].price = portfolio[i].price / eur_price
elif(portfolio[i].currency == "GBP"):
portfolio[i].price = portfolio[i].price / gbp_price
return portfolio
# Runs a monte carlo simulation from Foreign Exchange price series using the normal distrubtion and daily volatility
def MC(num_sims, eurusd):
returns = eurusd.pct_change()
last_price = eurusd[-1]
num_days = 252
simulation_df = pd.DataFrame()
for x in range(num_sims):
count = 0
daily_vol = returns.std()
price_series = []
price = last_price * (1 + np.random.normal(0, daily_vol))
price_series.append(price)
for y in range(num_days):
if count == 251:
break
price = price_series[count] * (1 + np.random.normal(0, daily_vol))
price_series.append(price)
count += 1
simulation_df[x] = price_series
return simulation_df
# Runs Monte Carlo simulations of two FOREX price series using normal distributions and daily volatility.
#Returns result in a dataframe where 1st num_sims columns are of eurusd and next num_sims columns are of gbpusd
def multiMC(num_sims, eurusd, gbpusd):
eur_returns = eurusd.pct_change()
gbp_returns = gbpusd.pct_change()
eur_last_price = eurusd[-1]
gbp_last_price = gbpusd[-1]
num_days = 252
eur_simulation_df = pd.DataFrame()
gbp_simulation_df = pd.DataFrame()
for x in range(num_sims):
count = 0
eur_daily_vol = eur_returns.std()
gbp_daily_vol = gbp_returns.std()
eur_price_series = []
gbp_price_series = []
eur_price = eur_last_price * (1 + np.random.normal(0, eur_daily_vol))
eur_price_series.append(eur_price)
gbp_price = gbp_last_price * (1 + np.random.normal(0, gbp_daily_vol))
gbp_price_series.append(gbp_price)
for y in range(num_days):
if count == 251:
break
eur_price = eur_price_series[count] * (1 + np.random.normal(0, eur_daily_vol))
eur_price_series.append(eur_price)
gbp_price = gbp_price_series[count] * (1 + np.random.normal(0, gbp_daily_vol))
gbp_price_series.append(gbp_price)
count += 1
eur_simulation_df[x] = eur_price_series
gbp_simulation_df[x+100] = eur_price_series
simulation_df = pd.concat([eur_simulation_df, gbp_simulation_df], axis = 1)
return simulation_df
# Plots the result of a FOREX monte carlo simulation.
def plotMC(currency_series, num_sims):
sim_df = MC(num_sims, currency_series)
fig = plt.figure()
fig.suptitle('Monte Carlo Simulation')
plt.plot(sim_df)
plt.axhline(y = currency_series[0], color = 'r', linestyle = '-')
plt.xlabel('Day')
plt.ylabel('Price')
plt.show()
return
# Uses above FOREX Monte Carlo simulation to simulate the value of a portfolio 1 year (or time period) from now.
def portfolioMC(portfolio, portfolio_init, eurusd, gbpusd, num_sims):
eur_initial = eurusd[0]
gbp_initial = gbpusd[0]
sim_df = multiMC(num_sims, eurusd, gbpusd)
port_series = []
for i in range(num_sims):
portfolio_value = 0
eur_last = sim_df.iloc[251,i]
gbp_last = sim_df.iloc[251,i+99]
for i in range(len(portfolio)):
if(portfolio[i].currency == "EUR"):
portfolio[i].price = portfolio[i].price/eur_initial * eur_last * portfolio[i].returnyoy
portfolio_value += portfolio[i].price
portfolio[i].price = portfolio_init[i].price
elif(portfolio[i].currency == "GBP"):
portfolio[i].price = portfolio[i].price/gbp_initial * gbp_last * portfolio[i].returnyoy
portfolio_value += portfolio[i].price
portfolio[i].price = portfolio_init[i].price
elif(portfolio[i].currency =="US"):
portfolio[i].price = portfolio[i].price * portfolio[i].returnyoy
portfolio_value += portfolio[i].price
portfolio[i].price = portfolio_init[i].price
port_series.append(portfolio_value)
portfolio_value = 0
return port_series
#Generates the mean of a series of simulated portfolio values.
def meanIRR(port_series, Fund):
irr_series = []
for i in range(len(port_series)):
irr = (port_series[i] / Fund.value) -1
irr_series.append(irr)
return np.mean(irr_series)
# Returns the IRR for a portfolio using FOREX monte carlo simulation.
def IRRMonteCarlo(num_sims, Fund, portfolio_init, portfolio, eurusd, gbpusd):
port_mc = portfolioMC(portfolio, portfolio_init, eurusd, gbpusd, num_sims)
irr_avg = meanIRR(port_mc, Fund)
return irr_avg
```
|
{
"source": "jftsang/dfhist",
"score": 3
}
|
#### File: jftsang/dfhist/tests.py
```python
import os
import unittest
import tempfile
import time
from unittest.mock import MagicMock
import pandas as pd
from dfhist import DFHist
TEST_DF = pd.DataFrame(
{
"ints": [1, 2, 3],
"floats": [1.23, 4.56, 7.89],
"bools": [True, False, True],
"strs": ["cake", "ham", "eggs"],
}
)
# noinspection PyArgumentList
class TestDfhistInitialisation(unittest.TestCase):
def test_dfhist_rejects_no_args(self):
with self.assertRaises(TypeError):
DFHist()
def test_dfhist_rejects_no_directory(self):
with self.assertRaises(TypeError):
DFHist(format='{timezone}.csv')
def test_dfhist_rejects_no_format(self):
with tempfile.TemporaryDirectory() as td:
with self.assertRaises(TypeError):
DFHist(directory=td)
def test_dfhist_rejects_improper_format(self):
with tempfile.TemporaryDirectory() as td:
with self.assertRaises(ValueError):
DFHist(directory=td, format='improper.csv')
def test_dfhist_creates_missing_directory(self):
with tempfile.TemporaryDirectory() as td:
cachedir = os.path.join(td, 'subdir')
DFHist(
directory=cachedir, format='{timestamp}.csv'
)
self.assertTrue(os.path.isdir(cachedir))
class TestDfhist(unittest.TestCase):
def setUp(self):
self.td = tempfile.TemporaryDirectory()
self.addCleanup(self.td.cleanup)
self.count = 0
self.dfhist = DFHist(
directory=self.td.name,
format='{timestamp}.csv',
expire=None,
tsformatter=self.counter,
method="csv",
)
def counter(self):
"""Use this for tsformatter"""
self.count += 1
return str(self.count)
def assertIsFile(self, filename):
self.assertTrue(os.path.isfile(os.path.join(self.td.name, filename)))
def assertIsNotFile(self, filename):
self.assertFalse(os.path.isfile(os.path.join(self.td.name, filename)))
def test_csv_marshal_round_trip(self):
path = self.dfhist.marshal(TEST_DF)
self.assertTrue(os.path.isfile(path))
restored_df = self.dfhist.unmarshal(path)
self.assertTrue(TEST_DF.equals(restored_df))
def test_handles_no_cache_yet(self):
# given no cache file yet
@self.dfhist
def fn():
return TEST_DF
# when
df = fn()
# then
self.assertTrue(TEST_DF.equals(df))
self.assertTrue(os.path.isfile(os.path.join(self.td.name, "1.csv")))
def test_uses_cache(self):
m = MagicMock(return_value=TEST_DF)
@self.dfhist
def fn():
return m()
# when
fn()
# then
m.assert_called()
self.assertIsFile("1.csv")
# when
m.reset_mock()
fn()
# then
m.assert_not_called()
self.assertIsNotFile("2.csv")
def test_cache_instant_expiry(self):
m = MagicMock(return_value=TEST_DF)
self.dfhist.expiry = 0
@self.dfhist
def fn():
return m()
# when
df = fn()
# then
m.assert_called()
self.assertTrue(TEST_DF.equals(df))
self.assertIsFile("1.csv")
# when
m.reset_mock()
df2 = fn()
# then
m.assert_called()
self.assertTrue(TEST_DF.equals(df2))
self.assertIsFile("2.csv")
def test_cache_expiry(self):
m = MagicMock(return_value=TEST_DF)
self.dfhist.expiry = 1
@self.dfhist
def fn():
return m()
# when
df = fn()
# then
m.assert_called()
self.assertTrue(TEST_DF.equals(df))
self.assertIsFile("1.csv")
# when
time.sleep(1.5)
m.reset_mock()
df2 = fn()
# then
m.assert_called()
self.assertTrue(TEST_DF.equals(df2))
self.assertIsFile("2.csv")
def test_retrieve(self):
# given
self.dfhist.unmarshal = MagicMock(side_effect=self.dfhist.unmarshal)
@self.dfhist
def fn():
return TEST_DF
fn.force()
# when
df = fn()
# then
self.dfhist.unmarshal.assert_called()
self.assertTrue(TEST_DF.equals(df))
def test_force_rewrite(self):
m = MagicMock(return_value=TEST_DF)
@self.dfhist
def fn():
return m()
# when
fn()
# then
m.assert_called()
self.assertIsFile("1.csv")
# when
m.reset_mock()
fn.force()
# then
m.assert_called()
self.assertIsFile("2.csv")
```
|
{
"source": "jftsang/pylint-errors",
"score": 3
}
|
#### File: pylint-errors/plerr/cli.py
```python
import argparse
import pathlib
import sys
from pygments import highlight
from pygments.lexers import MarkdownLexer
from pygments.formatters import TerminalFormatter
from plerr import __version__
def main():
"""Get a pylint error description by an error code."""
parser = argparse.ArgumentParser(
description=(
'Get a verbose description of a pylint error by an error code.'
)
)
parser.add_argument(
'code',
metavar='error code',
type=str,
help='a pylint error code either r1710 or R1710'
)
parser.add_argument(
'-v',
'--version',
action='version',
version='plerr v{}'.format(__version__)
)
args = parser.parse_args()
root = pathlib.Path(__file__).resolve().parent
try:
error = next(root.rglob('*{}.md'.format(args.code.upper())))
content = error.read_bytes()
print(highlight(content, MarkdownLexer(), TerminalFormatter()))
sys.exit(0)
except StopIteration:
print(
'Cannot find {} pylint error by such error code.'.format(
args.code
),
file=sys.stderr
)
sys.exit(1)
```
|
{
"source": "jftsang/pypew",
"score": 2
}
|
#### File: jftsang/pypew/models.py
```python
import datetime as dt # avoid namespace conflict over 'date'
import os
import typing
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Optional
import jinja2
import pandas as pd
from attr import field
from dateutil.easter import easter
from docx import Document
from docxtpl import DocxTemplate
from models_base import model, AllGetMixin, nullable_field
if typing.TYPE_CHECKING:
from forms import PewSheetForm
from utils import get_neh_df, advent, closest_sunday_to
feasts_fields = ['name', 'month', 'day', 'coeaster', 'coadvent',
'introit', 'collect', 'epistle_ref', 'epistle',
'gat', 'gradual', 'alleluia', 'tract', 'gospel_ref',
'gospel', 'offertory', 'communion']
FEASTS_CSV = Path(os.path.dirname(__file__)) / 'data' / 'feasts.csv'
PEW_SHEET_TEMPLATE = os.path.join('templates', 'pewSheetTemplate.docx')
@model
class Feast(AllGetMixin):
_df = pd.read_csv(FEASTS_CSV)
# Int64, not int, to allow null values (rather than casting them to 0)
_df = _df.astype(
{
'month': 'Int64',
'day': 'Int64',
'coeaster': 'Int64',
'coadvent': 'Int64'
}
)
assert list(_df.columns) == feasts_fields
name: str = field()
# Specified for the fixed holy days, None for the movable feasts.
# TODO - what about Remembrance Sunday and Advent Sunday? Not fixed
# days but also not comoving with Easter. As a hack go with 11 Nov
# and 30 Nov respectively but the exact dates are
month: Optional[int] = nullable_field()
day: Optional[int] = nullable_field()
# For the feasts synced with Easter, the number of days since Easter
coeaster: Optional[int] = nullable_field()
coadvent: Optional[int] = nullable_field()
introit: Optional[str] = nullable_field()
collect: Optional[str] = nullable_field()
epistle_ref: Optional[str] = nullable_field()
epistle: Optional[str] = nullable_field()
gat: Optional[str] = nullable_field()
gradual: Optional[str] = nullable_field()
alleluia: Optional[str] = nullable_field()
tract: Optional[str] = nullable_field()
gospel_ref: Optional[str] = nullable_field()
gospel: Optional[str] = nullable_field()
offertory: Optional[str] = nullable_field()
communion: Optional[str] = nullable_field()
def get_date(self, year=None) -> Optional[dt.date]:
if year is None:
year = datetime.now().year
if self.month is not None and self.day is not None:
# TODO Check this definition
if self.name == 'Remembrance Sunday':
return closest_sunday_to(dt.date(year, self.month, self.day))
return dt.date(year, self.month, self.day)
assert not (self.coeaster is not None and self.coadvent is not None)
if self.coeaster is not None:
return easter(year) + timedelta(days=self.coeaster)
if self.coadvent is not None:
return advent(year) + timedelta(days=self.coadvent)
return None
@property
def date(self):
"""The date of the feast in the present year."""
return self.get_date()
def get_next_date(self, d: Optional[dt.date] = None) -> Optional[dt.date]:
"""Returns the next occurrence of this feast from the specified
date, which may be in the next calendar year.
"""
if d is None:
d = dt.date.today()
next_occurrence = self.get_date(year=d.year)
if next_occurrence is None:
return None
if (next_occurrence - d).days < 0:
next_occurrence = self.get_date(year=d.year + 1)
return next_occurrence
@property
def next_date(self):
"""The next occurrence of the feast, which may be in the next
calendar year.
"""
return self.get_next_date()
def create_docx(self, path):
document = Document()
document.add_heading(self.name, 0)
document.save(path)
@model
class Music:
hymns_df = get_neh_df()
title: str = field()
category: str = field() # Anthem or Hymn or Plainsong
composer: Optional[str] = field()
lyrics: Optional[str] = field()
ref: Optional[str] = field()
@classmethod
def neh_hymns(cls) -> List['Music']:
return [
Music(
title=record.firstLine,
category='Hymn',
composer=None,
lyrics=None,
ref=f'NEH: {record.number}'
) for record in get_neh_df().itertuples()
]
@classmethod
def get_neh_hymn_by_ref(cls, ref: str) -> Optional['Music']:
try:
return next(filter(lambda h: h.ref == ref, cls.neh_hymns()))
except StopIteration:
return None
def __str__(self):
if self.category == 'Hymn':
return f'{self.ref}, {self.title}'
return super().__str__()
@model
class Service:
# Mandatory fields first, then fields with default values.
title: str = field()
date: datetime.date = field()
primary_feast: Feast = field()
secondary_feast: Optional[Feast] = field(default=None)
celebrant: str = field(default='')
preacher: str = field(default='')
introit_hymn: Optional[Music] = field(default=None)
offertory_hymn: Optional[Music] = field(default=None)
recessional_hymn: Optional[Music] = field(default=None)
anthem: Optional[Music] = field(default=None)
# One can't call methods in jinja2 templates, so one must provide
# everything as member properties instead.
@property
def collects(self) -> List[str]:
out = []
if self.primary_feast.collect:
out.append(self.primary_feast.collect)
if self.secondary_feast and self.secondary_feast.collect:
out.append(self.secondary_feast.collect)
# Collects for Advent I and Ash Wednesday are repeated
# throughout Advent and Lent respectively.
advent1 = Feast.get(name='Advent I')
ash_wednesday = Feast.get(name='Ash Wednesday')
if 'Advent' in self.primary_feast.name and self.primary_feast != advent1:
out.append(advent1.collect)
if 'Lent' in self.primary_feast.name:
out.append(ash_wednesday.collect)
return out
# TODO primary or secondary?
@property
def introit_proper(self) -> str:
return self.primary_feast.introit
@property
def gat(self) -> str:
return self.primary_feast.gat
@property
def gat_propers(self) -> List[str]:
propers = []
if 'Gradual' in self.primary_feast.gat:
propers.append(self.primary_feast.gradual)
if 'Alleluia' in self.primary_feast.gat:
propers.append(self.primary_feast.alleluia)
if 'Tract' in self.primary_feast.gat:
propers.append(self.primary_feast.tract)
return propers
@property
def offertory_proper(self) -> str:
return self.primary_feast.offertory
@property
def communion_proper(self) -> str:
return self.primary_feast.communion
@property
def epistle_ref(self) -> str:
return self.primary_feast.epistle_ref
@property
def epistle(self) -> str:
return self.primary_feast.epistle
@property
def gospel_ref(self) -> str:
return self.primary_feast.gospel_ref
@property
def gospel(self) -> str:
return self.primary_feast.gospel
@classmethod
def from_form(cls, form: 'PewSheetForm') -> 'Service':
primary_feast = Feast.get(name=form.primary_feast_name.data)
if form.secondary_feast_name.data:
secondary_feast = Feast.get(name=form.secondary_feast_name.data)
else:
secondary_feast = None
if form.anthem_title.data or form.anthem_composer.data or form.anthem_lyrics.data:
anthem = Music(
title=form.anthem_title.data,
composer=form.anthem_composer.data,
lyrics=form.anthem_lyrics.data,
category='Anthem',
ref=None
)
else:
anthem = None
return Service(
title=form.title.data,
date=form.date.data,
celebrant=form.celebrant.data,
preacher=form.preacher.data,
primary_feast=primary_feast,
secondary_feast=secondary_feast,
introit_hymn=Music.get_neh_hymn_by_ref(form.introit_hymn.data),
offertory_hymn=Music.get_neh_hymn_by_ref(form.offertory_hymn.data),
recessional_hymn=Music.get_neh_hymn_by_ref(form.recessional_hymn.data),
anthem=anthem,
)
def create_docx(self, path):
doc = DocxTemplate(PEW_SHEET_TEMPLATE)
jinja_env = jinja2.Environment(autoescape=True)
jinja_env.globals['len'] = len
# local import to avoid circular import
from filters import filters_context
jinja_env.filters.update(filters_context)
doc.render({'service': self}, jinja_env)
doc.save(path)
```
|
{
"source": "jftuga/csv_word_merge",
"score": 3
}
|
#### File: jftuga/csv_word_merge/csv_word_merge.py
```python
r"""
csv_word_merge.py
-<NAME>
May-3-2021
Merge CSV fields into a MS Word template
References: https://stackoverflow.com/a/61516850/452281
"""
import argparse
import csv
import concurrent.futures
import os
import time
from docx import Document
from docx2pdf import convert
pgm_name = "csv_word_merge"
pgm_version = "1.0.0"
pgm_url = "https://github.com/jftuga/csv_word_merge"
def get_csv_data(fname: str) -> dict:
all_rows = []
with open(fname, newline="") as c:
reader = csv.DictReader(c)
for row in reader:
all_rows.append(row)
return all_rows
def get_docx_name(row: dict, output: int) -> str:
return "%s.docx" % row[output]
# https://stackoverflow.com/a/55733040/452281
def docx_replace(doc, data) -> int:
count = 0
paragraphs = list(doc.paragraphs)
for t in doc.tables:
for row in t.rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
paragraphs.append(paragraph)
for p in doc.paragraphs:
for key, val in data.items():
key = key.strip()
val = val.strip()
key_name = f"_{key}_"
if key_name in p.text:
count += 1
inline = p.runs
# Replace strings and retain the same style.
# The text to be replaced can be split over several runs so
# search through, identify which runs need to have text replaced
# then replace the text in those identified
started = False
key_index = 0
# found_runs is a list of (inline index, index of match, length of match)
found_runs = list()
found_all = False
replace_done = False
for i in range(len(inline)):
# case 1: found in single run so short circuit the replace
if key_name in inline[i].text and not started:
found_runs.append((i, inline[i].text.find(key_name), len(key_name)))
text = inline[i].text.replace(key_name, str(val))
inline[i].text = text
replace_done = True
found_all = True
break
if key_name[key_index] not in inline[i].text and not started:
# keep looking ...
continue
# case 2: search for partial text, find first run
if key_name[key_index] in inline[i].text and inline[i].text[-1] in key_name and not started:
# check sequence
start_index = inline[i].text.find(key_name[key_index])
check_length = len(inline[i].text)
for text_index in range(start_index, check_length):
if inline[i].text[text_index] != key_name[key_index]:
# no match so must be false positive
break
if key_index == 0:
started = True
chars_found = check_length - start_index
key_index += chars_found
found_runs.append((i, start_index, chars_found))
if key_index != len(key_name):
continue
else:
# found all chars in key_name
found_all = True
break
# case 2: search for partial text, find subsequent run
if key_name[key_index] in inline[i].text and started and not found_all:
# check sequence
chars_found = 0
check_length = len(inline[i].text)
for text_index in range(0, check_length):
if inline[i].text[text_index] == key_name[key_index]:
key_index += 1
chars_found += 1
else:
break
# no match so must be end
found_runs.append((i, 0, chars_found))
if key_index == len(key_name):
found_all = True
break
if found_all and not replace_done:
for i, item in enumerate(found_runs):
index, start, length = [t for t in item]
if i == 0:
text = inline[index].text.replace(inline[index].text[start:start + length], str(val))
inline[index].text = text
else:
text = inline[index].text.replace(inline[index].text[start:start + length], '')
inline[index].text = text
return count
def create_dest(dest: str):
if not os.path.exists(dest):
os.mkdir(dest, 0o755)
def process_row(row: tuple, wordfile: str, col: str, dest: str):
document = Document(wordfile)
changes = 0
if len(row[col]) == 0:
print("Skipping row, invalid data: ", row)
return
changes += docx_replace(document, row)
print(f"{row=}; number of changes: {changes}")
if changes > 0:
docx_name = get_docx_name(row, col)
docx_name = os.path.join(dest, docx_name)
document.save(docx_name)
time.sleep(0.1)
print(f"{docx_name=}")
convert(docx_name)
else:
print(f"No changes found for {row}")
def main():
version_string = f"{pgm_name}, v{pgm_version}, {pgm_url}"
parser = argparse.ArgumentParser(description="Merge CSV fields into a MS Word template")
parser.add_argument("--csv", "-c", help="csv file containing macros", required=True)
parser.add_argument("--col", "-C", help="column name for output PDF", required=True)
parser.add_argument("--dest", "-d", help="destination folder", required=True)
parser.add_argument("--version", "-v", help="display version and then exit", action="version",
version=version_string)
parser.add_argument("wordfile", metavar="wordfile", help="MS Word file with macros")
args = parser.parse_args()
create_dest(args.dest)
csv_data = get_csv_data(args.csv)
max_workers = os.cpu_count()
with concurrent.futures.ProcessPoolExecutor(max_workers) as executor:
result = {executor.submit(process_row, row, args.wordfile, args.col, args.dest): row for row in csv_data}
if "__main__" == __name__:
main()
# end of script
```
|
{
"source": "jfuechsl/simplesvc",
"score": 2
}
|
#### File: simplesvc/simplesvc/__init__.py
```python
from flask import Flask, render_template
from .volumes import get_mounts
from .envvars import get_environment_variables
from .xkcd import get_current_xkcd
app = Flask(__name__)
@app.route('/')
def root_route():
mounts = get_mounts()
env_vars = get_environment_variables('EY_')
xkcd = get_current_xkcd()
return render_template('svcinfo.html', mounts=mounts, env_vars=env_vars, xkcd=xkcd)
```
#### File: simplesvc/volumes/__init__.py
```python
def get_mounts():
try:
with open('/proc/mounts', 'r') as f:
mounts = [line.split()[1] for line in f.readlines()]
return mounts
except:
return []
```
|
{
"source": "JFU-GIT/bluebutton-web-server",
"score": 2
}
|
#### File: apps/mymedicare_cb/loggers.py
```python
import json
import logging
from django.conf import settings
"""
Logger functions for mymedicare_cb module
"""
authenticate_logger = logging.getLogger('audit.authenticate.sls')
mymedicare_cb_logger = logging.getLogger('audit.authenticate.mymedicare_cb')
# For use in models.get_and_update_user()
def log_get_and_update_user(auth_flow_dict, status, user, fhir_id, mbi_hash, hicn_hash, hash_lookup_type, mesg):
'''
Logging for info or issue
used in get_and_update_user()
mesg = Description text.
'''
log_dict = {
"type": "mymedicare_cb:get_and_update_user",
"status": status,
"fhir_id": fhir_id,
"mbi_hash": mbi_hash,
"hicn_hash": hicn_hash,
"hash_lookup_type": hash_lookup_type,
"crosswalk": {
"id": user.crosswalk.id,
"user_hicn_hash": user.crosswalk.user_hicn_hash,
"user_mbi_hash": user.crosswalk.user_mbi_hash,
"fhir_id": user.crosswalk.fhir_id,
"user_id_type": user.crosswalk.user_id_type,
},
"mesg": mesg,
}
# Update with auth flow session info
if auth_flow_dict:
log_dict.update(auth_flow_dict)
if settings.LOG_JSON_FORMAT_PRETTY:
mymedicare_cb_logger.info(json.dumps(log_dict, indent=2))
else:
mymedicare_cb_logger.info(json.dumps(log_dict))
# For use in models.create_beneficiary_record()
def log_create_beneficiary_record(auth_flow_dict, status, username, fhir_id, user_mbi_hash, user_hicn_hash, mesg):
'''
Logging for info or issue
used in create_beneficiary_record()
mesg = Description text.
'''
log_dict = {
"type": "mymedicare_cb:create_beneficiary_record",
"status": status,
"username": username,
"fhir_id": fhir_id,
"user_mbi_hash": user_mbi_hash,
"user_hicn_hash": user_hicn_hash,
"mesg": mesg,
}
# Update with auth flow session info
if auth_flow_dict:
log_dict.update(auth_flow_dict)
if settings.LOG_JSON_FORMAT_PRETTY:
mymedicare_cb_logger.info(json.dumps(log_dict, indent=2))
else:
mymedicare_cb_logger.info(json.dumps(log_dict))
# For use in views.authenticate()
def log_authenticate_start(auth_flow_dict, sls_status, sls_status_mesg, sls_subject=None,
sls_mbi_format_valid=None, sls_mbi_format_msg=None,
sls_mbi_format_synthetic=None, sls_hicn_hash=None,
sls_mbi_hash=None, slsx_client=None):
log_dict = {
"type": "Authentication:start",
"sls_status": sls_status,
"sls_status_mesg": sls_status_mesg,
"sls_signout_status_code": slsx_client.signout_status_code,
"sls_signout_status_mesg": str(slsx_client.signout_status_mesg),
"sls_token_status_code": slsx_client.token_status_code,
"sls_token_status_mesg": str(slsx_client.token_status_mesg),
"sls_userinfo_status_code": slsx_client.userinfo_status_code,
"sls_userinfo_status_mesg": str(slsx_client.userinfo_status_mesg),
"sls_validate_signout_status_code": slsx_client.validate_signout_status_code,
"sls_validate_signout_status_mesg": str(slsx_client.validate_signout_status_mesg),
"sub": sls_subject,
"sls_mbi_format_valid": sls_mbi_format_valid,
"sls_mbi_format_msg": sls_mbi_format_msg,
"sls_mbi_format_synthetic": sls_mbi_format_synthetic,
"sls_hicn_hash": sls_hicn_hash,
"sls_mbi_hash": sls_mbi_hash,
}
# Update with auth flow session info
if auth_flow_dict:
log_dict.update(auth_flow_dict)
if settings.LOG_JSON_FORMAT_PRETTY:
authenticate_logger.info(json.dumps(log_dict, indent=2))
else:
authenticate_logger.info(json.dumps(log_dict))
# For use in views.authenticate()
def log_authenticate_success(auth_flow_dict, sls_subject, user):
log_dict = {
"type": "Authentication:success",
"sub": sls_subject,
"user": {
"id": user.id,
"username": user.username,
"crosswalk": {
"id": user.crosswalk.id,
"user_hicn_hash": user.crosswalk.user_hicn_hash,
"user_mbi_hash": user.crosswalk.user_mbi_hash,
"fhir_id": user.crosswalk.fhir_id,
"user_id_type": user.crosswalk.user_id_type,
},
},
}
# Update with auth flow session info
if auth_flow_dict:
log_dict.update(auth_flow_dict)
if settings.LOG_JSON_FORMAT_PRETTY:
authenticate_logger.info(json.dumps(log_dict, indent=2))
else:
authenticate_logger.info(json.dumps(log_dict))
```
|
{
"source": "jfuhrer/black-dashboard-django",
"score": 3
}
|
#### File: black-dashboard-django/app/services.py
```python
import requests
from django.shortcuts import render
# ****************************** GET ********************************
# get the list of all events of a client
def getEvents(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/events/'
# https://hzmplaceholder.ch/events/<clientId>/
response = requests.get(url+clientId)
events = response.json()
# extract the events of the list object called "events" in the JSON file
events_list = {'events': events['events']}
return events_list
# get the client profile
def getProfile(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/clients/'
# https://hzmplaceholder.ch/clients/<clientId>/profile
response = requests.get(url+clientId+'/profile')
userProfile = response.json()
return userProfile
# get the investment profile
def getInvestmentProfile(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/clients/'
# https://hzmplaceholder.ch/clients/<clientId>/investmentProfile
response = requests.get(url+clientId+'/investmentProfile')
invProfile = response.json()
return invProfile
# get an event
def getEvent(request, eventId):
url = 'https://hzmEndpoint.ch/events/'
# https://hzmplaceholder.ch/events/<eventId>/
response = requests.get(url+eventId)
evt = response.json()
return evt
# get the list of all notes of a client
def getNotes(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/notes/<clientId>/
response = requests.get(url+clientId)
ns = response.json()
# extract the notes of the list object called "notes" in the JSON file
notes_list = {'notes': ns['notes']}
return notes_list
# get the list of all notes of an event
def getNotesByEvent(request, eventId):
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/notes/<eventId>/
response = requests.get(url+eventId)
ns = response.json()
notes_list = {'notes': ns['notes']}
return notes_list
# get a note
def getNote(request, noteId):
url = 'https://hzmEndpoint.ch/notes/'
# https://hzmplaceholder.ch/events/<eventId>/
response = requests.get(url+noteId)
nt = response.json()
return nt
# get the list of all documents of a client
def getDocuments(request):
clientId = request.user
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<clientId>/
response = requests.get(url+clientId)
docs = response.json()
# extract the documents of the list object called "documents" in the JSON file
docs_list = {'docs': docs['docs']}
return docs_list
# get the list of all documents of an event
def getDocumentsByEvent(request, eventId):
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<eventId>/
response = requests.get(url+eventId)
docs = response.json()
docs_list = {'docs': docs['docs']}
return docs_list
# get a document
def getDocument(request, documentId):
url = 'https://hzmEndpoint.ch/documents/'
# https://hzmplaceholder.ch/documents/<documentId>/
response = requests.get(url+documentId)
doc = response.json()
return doc
# ****************************** POST ********************************
def postNote(request, json):
url = 'https://hzmEndpoint.ch/notes/'
# json would be filled by form data
"""
json = {'clientId': "123",
"consultationId": "123-3456-3454-1",
"title": "Neue Notiz",
"evtCreated": "2021-01-17 10:00",
"evtModified": "2021-01-21 10:00",
"evtDue": "2021-05-17",
"reminder": True,
"textHtml": "Dies ist eine neue Notiz" } """
response = requests.post(url, data=json)
if response.status_code == 200:
return print('POST successful, note stored', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
def postDocument(request, json):
url = 'https://hzmEndpoint.ch/documents/'
# json would filled by form data / upload
"""
json = {"clientId": 012342223,
"consultationId": "123-3456-3454-1",
"title": "Risikoaufklärung 2.0",
"evtCreated": "2021-01-17 10:00",
"format": "pdf",
"status": "Neu",
"pdfResource":PDF resource } """
response = requests.post(url, data=json)
if response.status_code == 200:
return print('POST successful, document stored', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
# ****************************** DELETE ********************************
def deleteNote(request, noteId):
url = 'https://hzmEndpoint.ch/notes/' + noteId
# delete should have some tokens to validate permission / security reasons
# to be included here once known!
response = requests.delete(url)
if response.status_code == 200:
return print('DELETE successful', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
def deleteDocument(request, documentId):
url = 'https://hzmEndpoint.ch/documents/' + documentId
# delete should have some tokens to validate permission / security reasons
# to be included here once known!
response = requests.delete(url)
if response.status_code == 200:
return print('DELETE successful', response.text)
else:
return print('something went wrong, check out the response code: %s and text %s'
% (response.status_code, response.text))
```
|
{
"source": "jfulling/bobber",
"score": 3
}
|
#### File: jfulling/bobber/bobber.py
```python
import sys
from datetime import datetime
import time
from flask import Flask
from flask import request,Response
import requests
import urllib
from urllib.parse import urljoin
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from bs4 import BeautifulSoup, SoupStrainer
import threading
import base64
app = Flask(__name__)
tokens = []
token = 1
TIMESTAMP = datetime.now().strftime("%d.%m.%y-%H-%M-%S")
############################
# CHANGE THESE VARIABLES
############################
# Path to file containing target user tokens/IDs (one per line)
TOKENS = "/path/to/tokens/file"
############################
# This should be the URL for this server - make sure they line up with
# any settings defined in the app.run() function at the bottom of this
# file
REDIRECT_DOMAIN = "https://my.flask.app:8443"
############################
# This should be the URL where your phishing app is hosted
PHISHAPP_DOMAIN = "https://my.app.url"
############################
# This site will be used when an invalid request is made,
# or if the user is locked out by accessing the server.
#
# This should be a valid URL to a site with legitimate content.
SPOOFED_DOMAIN = "https://some.other.content.com"
############################
# This should be what the app uses to identify a user token on landing
# eg if the url is https://myapp.com?userguid=1234 then use "userguid"
TOKEN_DELIMITER = "CHANGEME"
############################
# This is the value of time (in seconds) the user should be able to
# access the phishing app before getting redirected
TIMEOUT_LENGTH = 900
############################
# Update this file if you want to reuse a generated bobber DB
# otherwise, a new one will be generated at restart
#
# To stop using auto-generated files, do the below
#
# Comment the top line to stop automatically generating a DB
# Fill out the BOBBER_LOCATION variable and uncomment the last 2 lines
BOBBER_DB = ("sqlite:///bobber.%s.db" % (TIMESTAMP))
#BOBBER_LOCATION = "/path/to/file/bobber.db"
#BOBBER_DB = ("sqlite:///%s" % (BOBBER_LOCATION))
############################
# END CHANGES AFTER HERE
############################
# List of users who have accessed the app
# but shouldn't be locked out yet
INTERMEDIATE_ACCESS_LIST = []
engine = create_engine(BOBBER_DB,echo=False)
def dbinit():
#Gather tokens
f_token = open(TOKENS,"r")
line = f_token.readline()
while line:
tokens.append(line.rstrip())
line = f_token.readline()
f_token.close()
#Create db file
Base = declarative_base()
class Tokens(Base):
__tablename__ = 'tracker'
id = Column(Integer, primary_key=True)
userToken = Column(String)
hasAccessed = Column(Integer)
timeAccessed = Column(String)
sourceIP = Column(String)
def __init__(self, userToken, hasAccessed, timeAccessed):
self.userToken = userToken
self.hasAccessed = hasAccessed
self.timeAccessed = timeAccessed
self.sourceIP = sourceIP
Base.metadata.create_all(engine)
#Populate the database with user tokens
c = engine.connect()
t = c.begin()
for token in range(0,len(tokens)):
ins = 'INSERT INTO "tracker" (userToken,hasAccessed,timeAccessed,sourceIP) VALUES ("%s",0,"Not Accessed","0.0.0.0")' % (tokens[token])
c.execute(ins)
t.commit()
c.close()
def remove_access(userID):
if(userID in INTERMEDIATE_ACCESS_LIST):
sys.exit(0)
INTERMEDIATE_ACCESS_LIST.append(userID)
time.sleep(TIMEOUT_LENGTH)
INTERMEDIATE_ACCESS_LIST.remove(userID)
c = engine.connect()
t = c.begin()
lockout = c.execute('UPDATE tracker set hasAccessed=1 WHERE userToken="%s"' % (userID))
t.commit()
c.close()
def accessed(userID, sourceIP):
if(userID == False):
return 0
if userID in tokens:
c = engine.connect()
t = c.begin()
result = c.execute('SELECT "hasAccessed" FROM tracker WHERE "userToken" = "%s"' % (userID))
result = result.fetchone()
accessTimestamp = c.execute('UPDATE tracker SET timeAccessed="%s" where userToken="%s"' % (datetime.now().strftime("%d.%m.%y-%H-%M-%S"), userID))
source = c.execute('UPDATE tracker SET sourceIP="%s" where userToken="%s"' % (sourceIP, userID))
t.commit()
c.close()
if(result["hasAccessed"] == 0):
block = threading.Thread(target=remove_access, args=(userID,))
block.start()
return result["hasAccessed"]
return 1
def process_content(request, DOMAIN, **kwargs):
#Assign default values if not specified
try:
gargs = kwargs["gargs"]
except:
gargs = ""
try:
pargs = kwargs["pargs"]
except:
pargs = {}
try:
path = kwargs["path"]
except:
path = ""
if(request.method=="GET"):
#Go fetch the content of the specified domain
resp = requests.get(("%s/%s%s" % (DOMAIN,path,gargs)))
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
elif(request.method=="POST"):
resp = requests.post(("%s/%s%s" % (DOMAIN,path,gargs)), data=pargs)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
#Replace all links to route through the flask app
soup = BeautifulSoup(response.data, "html.parser")
for url in soup.find_all('a'):
try:
if(url.get('href')[0] == "/"):
if(DOMAIN == PHISHAPP_DOMAIN):
url["href"] = ("%s%s" % (urljoin(REDIRECT_DOMAIN,url.get('href')),gargs))
else:
url["href"] = urljoin(REDIRECT_DOMAIN,url.get('href'))
except:
pass
for img in soup.find_all('img'):
try:
if(img.get('src')[0] == "/"):
imgex = str(img.get("src")[-3:])
ib64 = base64.b64encode(urllib.request.urlopen(urljoin(DOMAIN,img.get('src'))).read())
img["src"] = ("data:img/%s; base64,%s" % (imgex,ib64.decode("utf-8")))
except:
pass
for l in soup.find_all('link'):
try:
if(l.get('href')[0] == "/"):
l["href"] = urljoin(REDIRECT_DOMAIN,l.get('href'))
except:
pass
for s in soup.find_all('script'):
try:
if(s.get('src')[0] == "/"):
s["src"] = urljoin(REDIRECT_DOMAIN,s.get("src"))
continue
s = str(s).replace('src=\"/',('src=\"%s/\"' % (REDIRECT_DOMAIN)))
except Exception as e:
pass
for f in soup.find_all('form'):
try:
if(f.get('action')[0] == "/"):
f["action"] = urljoin(REDIRECT_DOMAIN,"%s%s" % (f.get("action"),gargs))
except:
pass
response.data = soup.prettify()
return response
#If the base url is requested
@app.route('/')
def index():
#Default fail
token = False
PHISHAPP_DELIM = False
#Grab the user ID from the end of the URL if it's there
try:
token = request.args[TOKEN_DELIMITER]
#If it's not there, move on
except Exception as e:
pass
if(TOKEN_DELIMITER in request.args):
PHISHAPP_DELIM = True
#If this is their first time accessing the site
if((not accessed(token,request.environ.get('HTTP_X_REAL_IP', request.remote_addr))) and PHISHAPP_DELIM):
gargs=""
if request.method=='GET':
#Gather GET arguments
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
#If not passing GET parameters
return process_content(request,PHISHAPP_DOMAIN,gargs=gargs)
#If requested via POST
elif request.method=='POST':
#Gather the POST arguments
pargs = {}
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
else:
gargs=("?%s=%s" % (TOKEN_DELIMITER, token))
for i in request.values:
pargs.update({ i : request.values[i]})
return process_content(request, PHISHAPP_DOMAIN,pargs=pargs,gargs=gargs)
else:
gargs=""
if request.method=='GET':
#Gather GET arguments
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
return process_content(request,SPOOFED_DOMAIN,gargs=gargs)
elif request.method=='POST':
#Gather the POST arguments
pargs = {}
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
else:
gargs=("?%s=%s" % (TOKEN_DELIMITER, token))
for i in request.values:
pargs.update({ i : request.values[i]})
return process_content(request, SPOOFED_DOMAIN,pargs=pargs,gargs=gargs)
#If specific urls are requested
@app.route('/<path:path>',methods=['GET','POST'])
def proxy(path):
#Default fail
token = False
PHISHAPP_DELIM = False
#Grab the user ID from the end of the URL if it's there
try:
token = request.args[TOKEN_DELIMITER]
#If it's not there, move on
except Exception as e:
pass
if(TOKEN_DELIMITER in request.args):
PHISHAPP_DELIM = True
#If there's no get args, it's likely not for the phishing app anymore
if(len(request.args) == 0) and (request.method == "GET"):
return process_content(request,SPOOFED_DOMAIN,path=path)
#If this is their first time visiting
if((not accessed(token,request.environ.get('HTTP_X_REAL_IP', request.remote_addr))) and PHISHAPP_DELIM):
#If requested via GET
gargs=""
if request.method=='GET':
#Gather GET arguments
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
return process_content(request, PHISHAPP_DOMAIN, path=path, gargs=gargs)
#If requested via POST
elif request.method=='POST':
#Gather the POST arguments
pargs = {}
if(len(request.args) >= 1):
gargs = "?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
else:
gargs=("?%s=%s" % (TOKEN_DELIMITER, token))
for i in request.values:
pargs.update({ i : request.values[i]})
return process_content(request, PHISHAPP_DOMAIN, path=path,pargs=pargs,gargs=gargs)
else:
#If this is not their first time visiting, or if the token is invalid
gargs = ""
#If requested via GET
if request.method=='GET':
if(len(request.args) >= 1):
gargs="?"
for key in request.args:
gargs += ("&%s=%s" % (key, request.args[key]))
#Go fetch the content of the spoofed domain
return process_content(request, SPOOFED_DOMAIN, path=path, gargs=gargs)
elif request.method=='POST':
args = {}
for i in request.values:
args.update({ i : request.values[i]})
#Go fetch the content of the spoofed domain
return process_content(request, SPOOFED_DOMAIN, path=path, gargs=gargs, pargs=args)
if __name__ == '__main__':
dbinit()
app.run(host="0.0.0.0", port="5000")
```
|
{
"source": "jfultz/sublime-GitConflictResolver",
"score": 2
}
|
#### File: sublime-GitConflictResolver/modules/icons.py
```python
import sublime
_plugin_name = "Git Conflict Resolver"
_icon_folder = "/".join([_plugin_name, "gutter"])
_icons = {
"ours": "ours",
"ancestor": "ancestor",
"theirs": "theirs"
}
def get(group):
base = ""
extension = ""
if int(sublime.version()) < 3000:
base = "/".join(["..", _icon_folder])
else:
base = "/".join(["Packages", _icon_folder])
extension = ".png"
return "/".join([base, _icons[group] + extension])
```
|
{
"source": "jfunez/apiada",
"score": 3
}
|
#### File: jfunez/apiada/server_hello.py
```python
from http.server import SimpleHTTPRequestHandler
import socketserver
PORT = 8001
class HelloHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'Hello, world!')
with socketserver.TCPServer(("", PORT), HelloHandler) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
```
#### File: jfunez/apiada/server.py
```python
import json
from random import choices
from http.server import SimpleHTTPRequestHandler
jokes_raw = open('potasio.json').read()
# jokes_raw = open('jokes.json').read()
jokes_dict = json.loads(jokes_raw)
class ApiHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
# random joke:
joke = choices(jokes_dict)
# content = bytes(str(joke, "UTF-8"))
content = json.dumps(joke).encode('utf-8')
# send response
self.wfile.write(content)
```
|
{
"source": "jfunez/articles_meta",
"score": 2
}
|
#### File: articles_meta/articlemeta/articlemeta.py
```python
import os
import urlparse
import json
from datetime import datetime
from wsgiref.simple_server import make_server
import pyramid.httpexceptions as exc
from pyramid.config import Configurator
from pyramid.view import view_config, notfound_view_config
from pyramid.response import Response
from pyramid.settings import asbool
import pymongo
import utils
import controller
from export import Export
from decorators import authenticate
def _get_request_limit_param(request, default_limit=1000,
only_positive_limit=True, force_max_limit_to_default=True):
"""
Extract from request's querystring, the limit param,
and apply some restrictions if necessary.
@param request: the request object!
@param default_limit: if not limit was found in querystring
@param only_positive_limit: if true, then NOT accept limits <= 0
@param force_max_limit_to_default: if true, then NOT accept limits > default_limit
"""
limit = request.GET.get('limit', default_limit)
try:
limit = int(limit)
except ValueError:
raise exc.HTTPBadRequest('limit must be integer')
else:
if limit <= 0 and only_positive_limit:
raise exc.HTTPBadRequest('limit must be a positive (non zero) integer')
elif limit >= default_limit and force_max_limit_to_default:
limit = default_limit
return limit
@notfound_view_config(append_slash=True)
def notfound(request):
# http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/urldispatch.html#redirecting-to-slash-appended-routes
return exc.HTTPNotFound()
@view_config(route_name='index', request_method='GET')
def index(request):
return Response('Articles Metadata API')
@view_config(route_name='collection',
request_method='GET')
def get_collection(request):
code = request.GET.get('code', None)
collection = request.databroker.get_collection(code)
return Response(json.dumps(collection), content_type="application/json")
@view_config(route_name='identifiers_collection',
request_method='GET')
def identifier_collection(request):
collections = request.databroker.identifiers_collection()
return Response(json.dumps(collections), content_type="application/json")
@view_config(route_name='journal',
request_method='GET')
def get_journal(request):
collection = request.GET.get('collection', None)
issn = request.GET.get('issn', None)
journal = request.databroker.get_journal(collection=collection, issn=issn)
return Response(json.dumps(journal), content_type="application/json")
@view_config(route_name='identifiers_journal',
request_method='GET')
def identifiers_journal(request):
collection = request.GET.get('collection', None)
limit = _get_request_limit_param(request)
offset = request.GET.get('offset', 0)
try:
offset = int(offset)
except ValueError:
raise exc.HTTPBadRequest('offset must be integer >= 0')
if offset < 0:
raise exc.HTTPBadRequest('offset must be integer >= 0')
ids = request.databroker.identifiers_journal(collection=collection,
limit=limit,
offset=offset)
return Response(json.dumps(ids), content_type="application/json")
@view_config(route_name='add_journal', request_method='POST')
@view_config(route_name='add_journal_slash', request_method='POST')
@authenticate
def add_journal(request):
try:
journal = request.databroker.add_journal(request.json_body)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='delete_journal', request_method='DELETE')
@view_config(route_name='delete_journal_slash', request_method='DELETE')
@authenticate
def delete_journal(request):
issn = request.GET.get('issn', None)
collection = request.GET.get('collection', None)
admintoken = request.GET.get('admintoken', None)
if not admintoken or not issn:
raise exc.HTTPBadRequest(
'The attribute code and admintoken must be given'
)
request.databroker.delete_journal(issn, collection=collection)
return Response()
@view_config(route_name='identifiers_issue',
request_method='GET')
def identifiers_issue(request):
collection = request.GET.get('collection', None)
issn = request.GET.get('issn', None)
from_date = request.GET.get('from', '1500-01-01')
until_date = request.GET.get('until', datetime.now().date().isoformat())
limit = _get_request_limit_param(request)
offset = request.GET.get('offset', 0)
try:
offset = int(offset)
except ValueError:
raise exc.HTTPBadRequest('offset must be integer')
if offset < 0:
raise exc.HTTPBadRequest('offset must be integer >= 0')
ids = request.databroker.identifiers_issue(
collection=collection,
issn=issn,
limit=limit,
offset=offset,
from_date=from_date,
until_date=until_date
)
return Response(json.dumps(ids), content_type="application/json")
@view_config(route_name='exists_issue',
request_method='GET',
request_param=['code'])
def exists_issue(request):
code = request.GET.get('code', None)
collection = request.GET.get('collection', None)
issue = request.databroker.exists_issue(code, collection=collection)
return Response(json.dumps(issue), content_type="application/json")
@view_config(route_name='get_issue',
request_method='GET',
request_param=['code'])
def get_issue(request):
code = request.GET.get('code', None)
collection = request.GET.get('collection', None)
fmt = request.GET.get('format', 'json')
issue = request.databroker.get_issue(code, collection=collection,
replace_journal_metadata=True)
return Response(json.dumps(issue), content_type="application/json")
@view_config(route_name='add_issue', request_method='POST')
@view_config(route_name='add_issue_slash', request_method='POST')
@authenticate
def add_issue(request):
try:
issue = request.databroker.add_issue(request.json_body)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='update_issue', request_method='POST')
@view_config(route_name='update_issue_slash', request_method='POST')
@authenticate
def update_issue(request):
try:
issue = request.databroker.update_issue(request.json_body)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='delete_issue', request_method='DELETE')
@view_config(route_name='delete_issue_slash', request_method='DELETE')
@authenticate
def delete_issue(request):
code = request.GET.get('code', None)
collection = request.GET.get('collection', None)
admintoken = request.GET.get('admintoken', None)
if not admintoken or not code:
raise exc.HTTPBadRequest(
'The attribute code and admintoken must be given'
)
request.databroker.delete_issue(code, collection=collection)
return Response()
@view_config(route_name='identifiers_article',
request_method='GET')
def identifiers_article(request):
collection = request.GET.get('collection', None)
issn = request.GET.get('issn', None)
from_date = request.GET.get('from', '1500-01-01')
until_date = request.GET.get('until', datetime.now().date().isoformat())
limit = _get_request_limit_param(request)
offset = request.GET.get('offset', 0)
try:
offset = int(offset)
except ValueError:
raise exc.HTTPBadRequest('offset must be integer')
if offset < 0:
raise exc.HTTPBadRequest('offset must be integer >= 0')
ids = request.databroker.identifiers_article(collection=collection,
issn=issn,
limit=limit,
offset=offset,
from_date=from_date,
until_date=until_date)
return Response(json.dumps(ids), content_type="application/json")
@view_config(route_name='identifiers_press_release',
request_method='GET')
def identifiers_press_release(request):
collection = request.GET.get('collection', None)
issn = request.GET.get('issn', None)
from_date = request.GET.get('from', '1500-01-01')
until_date = request.GET.get('until', datetime.now().date().isoformat())
limit = _get_request_limit_param(request)
offset = request.GET.get('offset', 0)
try:
offset = int(offset)
except ValueError:
raise exc.HTTPBadRequest('offset must be integer')
ids = request.databroker.identifiers_press_release(collection=collection,
issn=issn,
limit=limit,
offset=offset,
from_date=from_date,
until_date=until_date)
return Response(json.dumps(ids), content_type="application/json")
@view_config(route_name='exists_article',
request_method='GET',
request_param=['code'])
def exists_article(request):
code = request.GET.get('code', None)
collection = request.GET.get('collection', None)
article = request.databroker.exists_article(code, collection=collection)
return Response(json.dumps(article), content_type="application/json")
@view_config(route_name='get_article',
request_method='GET',
request_param=['code'])
def get_article(request):
code = request.GET.get('code', None)
collection = request.GET.get('collection', None)
fmt = request.GET.get('format', 'json')
body = request.GET.get('body', 'false')
if not body in ['true', 'false']:
raise HTTPBadRequest("parameter 'metaonly' must be 'true' or 'false'")
body = asbool(body)
article = request.databroker.get_article(
code, collection=collection, replace_journal_metadata=True, body=body
)
if article:
if fmt == 'xmlwos':
return Response(
Export(article).pipeline_sci(), content_type="application/xml")
if fmt == 'xmldoaj':
return Response(
Export(article).pipeline_doaj(), content_type="application/xml")
if fmt == 'xmlrsps':
return Response(
Export(article).pipeline_rsps(), content_type="application/xml")
if fmt == 'xmlpubmed':
return Response(
Export(article).pipeline_pubmed(), content_type="application/xml")
return Response(json.dumps(article), content_type="application/json")
@view_config(route_name='add_article', request_method='POST')
@view_config(route_name='add_article_slash', request_method='POST')
@authenticate
def add_article(request):
try:
article = request.databroker.add_article(request.json_body)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='update_article', request_method='POST')
@view_config(route_name='update_article_slash', request_method='POST')
@authenticate
def update_article(request):
try:
article = request.databroker.update_article(request.json_body)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='set_doaj_status_true', request_method='POST')
@view_config(route_name='set_doaj_status_true_slash', request_method='POST')
@authenticate
def set_doaj_status_true(request):
code = request.GET.get('code', None)
try:
article = request.databroker.set_doaj_status(code, True)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='set_doaj_status_false', request_method='POST')
@view_config(route_name='set_doaj_status_false_slash', request_method='POST')
@authenticate
def set_doaj_status_false(request):
code = request.GET.get('code', None)
try:
article = request.databroker.set_doaj_status(code, False)
except ValueError:
raise exc.HTTPBadRequest('The posted JSON data is not valid')
return Response()
@view_config(route_name='delete_article', request_method='DELETE')
@view_config(route_name='delete_article_slash', request_method='DELETE')
@authenticate
def delete_article(request):
code = request.GET.get('code', None)
collection = request.GET.get('collection', None)
if not code:
raise exc.HTTPBadRequest(
'The attribute code must be given'
)
token = request.registry.settings.get('app', {}).get('admintoken', None)
request.databroker.delete_article(code, collection=collection)
return Response()
@view_config(route_name='list_historychanges_article', request_method='GET')
@view_config(route_name='list_historychanges_journal', request_method='GET')
@view_config(route_name='list_historychanges_issue', request_method='GET')
def list_historychanges(request):
"""
This view will attend the request from differents urls:
- '/api/v1/article/history'
- '/api/v1/article/history/'
- '/api/v1/journal/history'
- '/api/v1/journal/history/'
- '/api/v1/issue/history'
- '/api/v1/issue/history/'
serving with the same logic, only difference is the type of document
requested: 'article' or 'journal'
"""
doc_type_by_route = {
'/api/v1/article/history': 'article',
'/api/v1/article/history/': 'article',
'/api/v1/journal/history': 'journal',
'/api/v1/journal/history/': 'journal',
'/api/v1/issue/history': 'issue',
'/api/v1/issue/history/': 'issue',
}
document_type = doc_type_by_route[request.matched_route.path]
collection = request.GET.get('collection', None)
event = request.GET.get('event', None)
code = request.GET.get('code', None)
from_date = request.GET.get('from', '1500-01-01T00:00:00')
until_date = request.GET.get('until', datetime.now().isoformat())
offset = request.GET.get('offset', 0)
limit = _get_request_limit_param(request, force_max_limit_to_default=True)
try:
offset = int(offset)
except ValueError:
raise exc.HTTPBadRequest('offset must be integer')
objs = request.databroker.historychanges(
document_type=document_type,
collection=collection,
event=event,
code=code,
limit=limit,
offset=offset,
from_date=from_date,
until_date=until_date
)
return Response(json.dumps(objs), content_type="application/json")
```
#### File: articles_meta/processing/load_mixedcitations.py
```python
import sys
import argparse
import logging
import os
import codecs
import json
import unicodedata
try: # Keep compatibility with python 2.7
from html import unescape
except ImportError:
from HTMLParser import HTMLParser
from pymongo import MongoClient
from articlemeta import utils
from xylose.scielodocument import Article
# --------------
# Py2 compat
# --------------
PY2 = sys.version_info[0] == 2
if PY2:
html_parser = HTMLParser().unescape
else:
html_parser = unescape
# --------------
logger = logging.getLogger(__name__)
config = utils.Configuration.from_env()
settings = dict(config.items())
try:
articlemeta_db = MongoClient(settings['app:main']['mongo_uri'])['articlemeta']
except:
logging.error('Fail to connect to (%s)' % settings['app:main']['mongo_uri'])
def remove_control_characters(data):
return "".join(ch for ch in data if unicodedata.category(ch)[0] != "C")
def html_decode(string):
string = html_parser(string)
string = remove_control_characters(string)
return string
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
def audity(mixed, document):
logger.debug('Auditing mixed citation')
if int(mixed['order']) > len(document.citations or []):
return False
check = mixed['mixed'].lower()
citation_index = int(mixed['order'])-1
citation_titles = [i.lower() for i in [
document.citations[citation_index].title() or '',
document.citations[citation_index].source or '',
document.citations[citation_index].chapter_title or '',
document.citations[citation_index].article_title or '',
document.citations[citation_index].thesis_title or '',
document.citations[citation_index].link_title or '',
document.citations[citation_index].issue_title or '',
document.citations[citation_index].conference_title or ''
] if i]
citation_authors = document.citations[citation_index].authors or []
for title in citation_titles:
if title in check:
return True
for author in citation_authors:
if author.get('surname', '').lower() in check:
return True
if author.get('given_names', '').lower() in check:
return True
return False
def get_document(collection, code):
logger.debug('Loading document from database')
document = articlemeta_db['articles'].find_one({'collection': collection, 'code': code})
if not document:
return
return Article(document)
def update_document(mixed, document):
logger.debug('Updating citation in database')
citation_field = 'citations.%s.mixed' % str(int(mixed['order'])-1)
articlemeta_db['articles'].update(
{
'collection': document.collection_acronym,
'code': document.publisher_id
},
{
'$set': {
citation_field: mixed['mixed']
}
}
)
def run(mixed_citations_file, import_data):
with codecs.open(mixed_citations_file, encoding='utf-8') as mixed_citations:
for line in mixed_citations:
mixed = json.loads(line)
mixed['mixed'] = html_decode(mixed['mixed'])
document = get_document(mixed['collection'], mixed['pid'])
logger.info('Trying to import %s %s %s' % (mixed['collection'], mixed['pid'], mixed['order']))
if not document:
logger.error('Document not found in Article Meta %s %s %s' % (mixed['collection'], mixed['pid'], mixed['order']))
continue
if not audity(mixed, document):
logger.error('Document did not pass in auditory %s %s %s' % (mixed['collection'], mixed['pid'], mixed['order']))
continue
logger.debug('Document pass in auditory %s %s %s' % (mixed['collection'], mixed['pid'], mixed['order']))
if import_data:
logger.debug('Importing data for %s %s %s' % (mixed['collection'], mixed['pid'], mixed['order']))
update_document(mixed, document)
def main():
parser = argparse.ArgumentParser(
description="Load mixed citations according to a given json file"
)
parser.add_argument(
'--csv_file',
'-f',
help='A json file with the mixed citations of each article'
)
parser.add_argument(
'--import_data',
'-i',
action='store_true',
help='Import data'
)
parser.add_argument(
'--logging_file',
'-o',
help='Full path to the log file'
)
parser.add_argument(
'--logging_level',
'-l',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
_config_logging(args.logging_level, args.logging_file)
run(args.csv_file, args.import_data)
```
|
{
"source": "jfunez/fastapi",
"score": 2
}
|
#### File: fastapi/tests/test_additional_responses_router.py
```python
from fastapi import APIRouter, FastAPI
from fastapi.testclient import TestClient
app = FastAPI()
router = APIRouter()
@router.get("/a", responses={501: {"description": "Error 1"}})
async def a():
return "a"
@router.get(
"/b",
responses={
502: {"description": "Error 2"},
"4XX": {"description": "Error with range, upper"},
},
)
async def b():
return "b"
@router.get(
"/c",
responses={
"400": {"description": "Error with str"},
"5xx": {"description": "Error with range, lower"},
"default": {"description": "A default response"},
},
)
async def c():
return "c"
app.include_router(router)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/a": {
"get": {
"responses": {
"501": {"description": "Error 1"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
"summary": "A",
"operationId": "a_a_get",
}
},
"/b": {
"get": {
"responses": {
"502": {"description": "Error 2"},
"4XX": {"description": "Error with range, upper"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
"summary": "B",
"operationId": "b_b_get",
}
},
"/c": {
"get": {
"responses": {
"400": {"description": "Error with str"},
"5XX": {"description": "Error with range, lower"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"default": {"description": "A default response"},
},
"summary": "C",
"operationId": "c_c_get",
}
},
},
}
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_a():
response = client.get("/a")
assert response.status_code == 200
assert response.json() == "a"
def test_b():
response = client.get("/b")
assert response.status_code == 200
assert response.json() == "b"
def test_c():
response = client.get("/c")
assert response.status_code == 200
assert response.json() == "c"
```
#### File: test_tutorial/test_advanced_middleware/test_tutorial003.py
```python
from fastapi.responses import PlainTextResponse
from fastapi.testclient import TestClient
from advanced_middleware.tutorial003 import app
@app.get("/large")
async def large():
return PlainTextResponse("x" * 4000, status_code=200)
client = TestClient(app)
def test_middleware():
response = client.get("/large", headers={"accept-encoding": "gzip"})
assert response.status_code == 200
assert response.text == "x" * 4000
assert response.headers["Content-Encoding"] == "gzip"
assert int(response.headers["Content-Length"]) < 4000
response = client.get("/")
assert response.status_code == 200
```
#### File: test_tutorial/test_openapi_callbacks/test_tutorial001.py
```python
from fastapi.testclient import TestClient
from openapi_callbacks.tutorial001 import app, invoice_notification
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/invoices/": {
"post": {
"summary": "Create Invoice",
"description": 'Create an invoice.\n\nThis will (let\'s imagine) let the API user (some external developer) create an\ninvoice.\n\nAnd this path operation will:\n\n* Send the invoice to the client.\n* Collect the money from the client.\n* Send a notification back to the API user (the external developer), as a callback.\n * At this point is that the API will somehow send a POST request to the\n external API with the notification of the invoice event\n (e.g. "payment successful").',
"operationId": "create_invoice_invoices__post",
"parameters": [
{
"required": False,
"schema": {
"title": "Callback Url",
"maxLength": 2083,
"minLength": 1,
"type": "string",
"format": "uri",
},
"name": "callback_url",
"in": "query",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Invoice"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"callbacks": {
"invoice_notification": {
"{$callback_url}/invoices/{$request.body.id}": {
"post": {
"summary": "Invoice Notification",
"operationId": "invoice_notification__callback_url__invoices___request_body_id__post",
"requestBody": {
"required": True,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/InvoiceEvent"
}
}
},
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/InvoiceEventReceived"
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
}
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Invoice": {
"title": "Invoice",
"required": ["id", "customer", "total"],
"type": "object",
"properties": {
"id": {"title": "Id", "type": "string"},
"title": {"title": "Title", "type": "string"},
"customer": {"title": "Customer", "type": "string"},
"total": {"title": "Total", "type": "number"},
},
},
"InvoiceEvent": {
"title": "InvoiceEvent",
"required": ["description", "paid"],
"type": "object",
"properties": {
"description": {"title": "Description", "type": "string"},
"paid": {"title": "Paid", "type": "boolean"},
},
},
"InvoiceEventReceived": {
"title": "InvoiceEventReceived",
"required": ["ok"],
"type": "object",
"properties": {"ok": {"title": "Ok", "type": "boolean"}},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_openapi():
with client:
response = client.get("/openapi.json")
assert response.json() == openapi_schema
def test_get():
response = client.post(
"/invoices/", json={"id": "fooinvoice", "customer": "John", "total": 5.3}
)
assert response.status_code == 200
assert response.json() == {"msg": "Invoice received"}
def test_dummy_callback():
# Just for coverage
invoice_notification({})
```
#### File: test_tutorial/test_request_files/test_tutorial002.py
```python
import os
from fastapi.testclient import TestClient
from request_files.tutorial002 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Files",
"operationId": "create_files_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_files_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfiles/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload Files",
"operationId": "create_upload_files_uploadfiles__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_files_uploadfiles__post"
}
}
},
"required": True,
},
}
},
"/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Main",
"operationId": "main__get",
}
},
},
"components": {
"schemas": {
"Body_create_upload_files_uploadfiles__post": {
"title": "Body_create_upload_files_uploadfiles__post",
"required": ["files"],
"type": "object",
"properties": {
"files": {
"title": "Files",
"type": "array",
"items": {"type": "string", "format": "binary"},
}
},
},
"Body_create_files_files__post": {
"title": "Body_create_files_files__post",
"required": ["files"],
"type": "object",
"properties": {
"files": {
"title": "Files",
"type": "array",
"items": {"type": "string", "format": "binary"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "files"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422
assert response.json() == file_required
def test_post_files(tmpdir):
path = os.path.join(tmpdir, "test.txt")
with open(path, "wb") as file:
file.write(b"<file content>")
path2 = os.path.join(tmpdir, "test2.txt")
with open(path2, "wb") as file:
file.write(b"<file content2>")
client = TestClient(app)
response = client.post(
"/files/",
files=(
("files", ("test.txt", open(path, "rb"))),
("files", ("test2.txt", open(path2, "rb"))),
),
)
assert response.status_code == 200
assert response.json() == {"file_sizes": [14, 15]}
def test_post_upload_file(tmpdir):
path = os.path.join(tmpdir, "test.txt")
with open(path, "wb") as file:
file.write(b"<file content>")
path2 = os.path.join(tmpdir, "test2.txt")
with open(path2, "wb") as file:
file.write(b"<file content2>")
client = TestClient(app)
response = client.post(
"/uploadfiles/",
files=(
("files", ("test.txt", open(path, "rb"))),
("files", ("test2.txt", open(path2, "rb"))),
),
)
assert response.status_code == 200
assert response.json() == {"filenames": ["test.txt", "test2.txt"]}
def test_get_root():
client = TestClient(app)
response = client.get("/")
assert response.status_code == 200
assert b"<form" in response.content
```
|
{
"source": "jfunez/packtools-gui",
"score": 2
}
|
#### File: packtools-gui/packtools-gui/app.py
```python
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.Qsci import QsciScintilla, QsciLexerXML
import packtools_wrapper
class SimpleXMLEditor(QsciScintilla):
ARROW_MARKER_NUM = 8
def __init__(self, parent=None):
super(SimpleXMLEditor, self).__init__(parent)
# Set the default font
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.setFont(font)
self.setMarginsFont(font)
# Margin 0 is used for line numbers
fontmetrics = QtGui.QFontMetrics(font)
self.setMarginsFont(font)
self.setMarginWidth(0, fontmetrics.width("00000") + 6)
self.setMarginLineNumbers(0, True)
self.setMarginsBackgroundColor(QtGui.QColor("#cccccc"))
# Clickable margin 1 for showing markers
self.setMarginSensitivity(1, True)
self.connect(self,
QtCore.SIGNAL('marginClicked(int, int, Qt::KeyboardModifiers)'),
self.on_margin_clicked)
self.markerDefine(QsciScintilla.RightArrow,
self.ARROW_MARKER_NUM)
self.setMarkerBackgroundColor(QtGui.QColor("#ee1111"),
self.ARROW_MARKER_NUM)
# Brace matching: enable for a brace immediately before or after
# the current position
#
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
# Current line visible with special background color
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QtGui.QColor("#ffe4e4"))
# Set XML lexer
# Set style for Python comments (style number 1) to a fixed-width
# courier.
lexer = QsciLexerXML()
lexer.setDefaultFont(font)
self.setLexer(lexer)
self.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Courier')
# Don't want to see the horizontal scrollbar at all
# Use raw message to Scintilla here (all messages are documented
# here: http://www.scintilla.org/ScintillaDoc.html)
self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0)
# not too small
self.setMinimumSize(600, 450)
def on_margin_clicked(self, nmargin, nline, modifiers):
# Toggle marker for the line the margin was clicked on
if self.markersAtLine(nline) != 0:
self.markerDelete(nline, self.ARROW_MARKER_NUM)
else:
self.markerAdd(nline, self.ARROW_MARKER_NUM)
class MainWindow(QtGui.QMainWindow):
new_xml_input_signal = QtCore.pyqtSignal(dict, name="new_xml_input_signal")
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
@QtCore.pyqtSlot(dict)
def analyze_xml_callback(self, params):
if params.has_key('xml_source'):
results, exc = packtools_wrapper.analyze_xml(params['xml_source'])
if results:
self.populateEditor(results['annotations'])
if exc:
self.populateEditor(str(exc))
def initUI(self):
self.editor = SimpleXMLEditor(parent=self)
self.setCentralWidget(self.editor)
# Action: Exit Application
exitAction = QtGui.QAction(QtGui.QIcon('resources/exit.png'), 'Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
# Action: Open Local XML file
openFile = QtGui.QAction(QtGui.QIcon('resources/open.png'), 'Open local XML File', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open local XML File')
openFile.triggered.connect(self.showOpenXMLDialog)
# Action: Open URL (remote XML)
openURL = QtGui.QAction(QtGui.QIcon('resources/web.png'), 'Open URL XML File', self)
openURL.setShortcut('Ctrl+U')
openURL.setStatusTip('Open URL XML File')
openURL.triggered.connect(self.showOpenURLDialog)
self.statusbar = self.statusBar()
self.statusbar.showMessage('Packtools version: %s' % packtools_wrapper.PACKTOOLS_VERSION)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
fileMenu.addAction(openURL)
fileMenu.addAction(exitAction)
toolbar = self.addToolBar('Exit')
toolbar.addAction(openFile)
toolbar.addAction(openURL)
toolbar.addAction(exitAction)
self.new_xml_input_signal.connect(self.analyze_xml_callback)
self.resize(800, 600)
self.center()
self.setWindowTitle('Packtools GUI v0.1')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(
self, 'Message', "Are you sure to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def showOpenXMLDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open XML file', '.', "XML Files (*.xml)")
with open(fname, 'r') as f:
self.new_xml_input_signal.emit({'xml_source': f})
def showOpenURLDialog(self):
url, ok = QtGui.QInputDialog.getText(self, 'Input URL Dialog', 'Enter valid URL:')
if ok:
self.new_xml_input_signal.emit({'xml_source': str(url)})
def populateEditor(self, text_content, decode_as='utf-8'):
self.editor.setText(text_content.decode(decode_as))
def main():
app = QtGui.QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
|
{
"source": "jfunez/packtools",
"score": 2
}
|
#### File: packtools/packtools/stylechecker.py
```python
import re
import os
import logging
import itertools
from lxml import etree, isoschematron
from packtools.utils import setdefault
from packtools.checks import StyleCheckingPipeline, StyleError
HERE = os.path.dirname(os.path.abspath(__file__))
SCHEMAS = {
'SciELO-journalpublishing1.xsd': os.path.join(HERE, 'sps', 'xsd', 'sps.xsd'),
'sps.sch': os.path.join(HERE, 'sps', 'sps.sch'),
}
EXPOSE_ELEMENTNAME_PATTERN = re.compile(r"(?<=Element )'.*?'")
logger = logging.getLogger(__name__)
def XMLSchema(schema_name):
with open(SCHEMAS[schema_name]) as fp:
xmlschema_doc = etree.parse(fp)
xmlschema = etree.XMLSchema(xmlschema_doc)
return xmlschema
def XMLSchematron(schema_name):
with open(SCHEMAS[schema_name]) as fp:
xmlschema_doc = etree.parse(fp)
schematron = isoschematron.Schematron(xmlschema_doc)
return schematron
def search_element_name(message):
"""Try to locate in `message` the element name pointed as error.
:param message: is a lxml error log message.
"""
match = EXPOSE_ELEMENTNAME_PATTERN.search(message)
if match is None:
raise ValueError('Could not locate the element name in %s.' % message)
else:
return match.group(0).strip("'")
class XML(object):
def __init__(self, file):
"""Represents an XML under validation.
The XML can be retrieved given its filesystem path,
an URL, a file-object or an etree instance.
The XML is validated against the JATS Publishing tag set
and the SPS Style.
:param file: Path to the XML file, URL or etree.
"""
if isinstance(file, etree._ElementTree):
self.lxml = file
else:
self.lxml = etree.parse(file)
self.xmlschema = XMLSchema('SciELO-journalpublishing1.xsd')
self.schematron = XMLSchematron('sps.sch')
self.ppl = StyleCheckingPipeline()
def find_element(self, tagname, lineno=None, fallback=True):
"""Find an element given a tagname and a line number.
If no element is found than the return value is None.
:param tagname: string of the tag name.
:param lineno: int if the line it appears on the original source file.
:param fallback: fallback to root element when `element` is not found.
"""
for elem in self.lxml.findall('//' + tagname):
if lineno is None:
return elem
elif elem.sourceline == lineno:
logger.debug('method *find*: hit a regular element: %s.' % tagname)
return elem
else:
continue
else:
root = self.lxml.getroot()
if fallback:
return root
elif root.tag == tagname:
logger.debug('method *find*: hit a root element.')
return root
else:
raise ValueError("Could not find element '%s'." % tagname)
def validate(self):
"""Validate the source XML against the JATS Publishing Schema.
Returns a tuple comprising the validation status and the errors list.
"""
result = setdefault(self, '__validation_result', lambda: self.xmlschema.validate(self.lxml))
errors = setdefault(self, '__validation_errors', lambda: self.xmlschema.error_log)
return result, errors
def _validate_sch(self):
"""Validate the source XML against the SPS Schematron.
Returns a tuple comprising the validation status and the errors list.
"""
def make_error_log():
err_log = self.schematron.error_log
return [StyleError.from_schematron_errlog(err) for err in err_log]
result = setdefault(self, '__sch_validation_result', lambda: self.schematron.validate(self.lxml))
errors = setdefault(self, '__sch_validation_errors', make_error_log)
return result, errors
def validate_style(self):
"""Validate the source XML against the SPS Tagging guidelines.
Returns a tuple comprising the validation status and the errors list.
"""
def make_error_log():
errors = next(self.ppl.run(self.lxml, rewrap=True))
errors += self._validate_sch()[1]
return errors
errors = setdefault(self, '__style_validation_result', make_error_log)
result = setdefault(self, '__style_validation_errors', lambda: not bool(errors))
return result, errors
def _annotate_error(self, element, error):
"""Add an annotation prior to `element`, with `error` as the content.
The annotation is a <SPS-ERROR> element added prior to `element`.
If `element` is the root element, then the error is annotated as comment.
:param element: etree instance to be annotated.
:param error: string of the error.
"""
notice_element = etree.Element('SPS-ERROR')
notice_element.text = error
try:
element.addprevious(notice_element)
except TypeError:
# In case of a root element, a comment if added.
element.addprevious(etree.Comment('SPS-ERROR: %s' % error))
def annotate_errors(self):
"""Add notes on all elements that have errors.
The errors list is generated as a result of calling both :meth:`validate` and
:meth:`validate_style` methods.
"""
v_result, v_errors = self.validate()
s_result, s_errors = self.validate_style()
if v_result and s_result:
return None
for error in itertools.chain(v_errors, s_errors):
try:
element_name = search_element_name(error.message)
except ValueError:
# could not find the element name
logger.info('Could not locate the element name in: %s' % error.message)
continue
if error.line is None:
err_element = self.find_element(element_name)
else:
err_element = self.find_element(element_name, lineno=error.line)
self._annotate_error(err_element, error.message)
def __str__(self):
return etree.tostring(self.lxml, pretty_print=True,
encoding='utf-8', xml_declaration=True)
def __unicode__(self):
return str(self).decode('utf-8')
def __repr__(self):
return '<packtools.stylechecker.XML xml=%s valid=%s>' % (self.lxml, self.validate()[0])
def read(self):
"""
Read the XML contents as text.
"""
return unicode(self)
def main():
import argparse
import sys
parser = argparse.ArgumentParser(description='stylechecker cli utility.')
parser.add_argument('--annotated', action='store_true')
parser.add_argument('xmlpath', help='Filesystem path or URL to the XML file.')
args = parser.parse_args()
xml = XML(args.xmlpath)
is_valid, errors = xml.validate()
style_is_valid, style_errors = xml.validate_style()
if args.annotated:
xml.annotate_errors()
sys.stdout.write(str(xml))
else:
if not is_valid:
print 'Invalid XML! Found %s errors:' % len(errors)
for err in errors:
print '%s,%s\t%s' % (err.line, err.column, err.message)
else:
print 'Valid XML! ;)'
if not style_is_valid:
print 'Invalid SPS Style! Found %s errors:' % len(style_errors)
for err in style_errors:
print err.message
else:
print 'Valid SPS Style! ;)'
if __name__ == '__main__':
main()
```
#### File: packtools/tests/test_schematron.py
```python
import unittest
from StringIO import StringIO
from lxml import isoschematron, etree
from packtools import stylechecker
SCH = etree.parse(stylechecker.SCHEMAS['sps.sch'])
class JournalIdTests(unittest.TestCase):
"""Tests for article/front/journal-meta/journal-id elements.
"""
def _run_validation(self, sample):
schematron = isoschematron.Schematron(SCH, phase='phase.journal-id')
return schematron.validate(etree.parse(sample))
def test_case1(self):
"""
presence(@nlm-ta) is True
presence(@publisher-id) is True
presence(@nlm-ta) v presence(@publisher-id) is True
"""
sample = """<article>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Rev Saude Publica
</journal-id>
<journal-id journal-id-type="publisher-id">
RSP
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
presence(@nlm-ta) is True
presence(@publisher-id) is False
presence(@nlm-ta) v presence(@publisher-id) is True
"""
sample = """<article>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Rev Saude Publica
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertTrue(self._run_validation(sample))
def test_case3(self):
"""
presence(@nlm-ta) is False
presence(@publisher-id) is True
presence(@nlm-ta) v presence(@publisher-id) is True
"""
sample = """<article>
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">
RSP
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
presence(@nlm-ta) is False
presence(@publisher-id) is False
presence(@nlm-ta) v presence(@publisher-id) is False
"""
sample = """<article>
<front>
<journal-meta>
<journal-id journal-id-type='doi'>
123.plin
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
class JournalTitleGroupTests(unittest.TestCase):
"""Tests for article/front/journal-meta/journal-title-group elements.
"""
def _run_validation(self, sample):
schematron = isoschematron.Schematron(SCH, phase='phase.journal-title-group')
return schematron.validate(etree.parse(sample))
def test_journal_title_group_is_absent(self):
sample = """<article>
<front>
<journal-meta>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
def test_case1(self):
"""
A: presence(journal-title) is True
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is True
A ^ B is True
"""
sample = """<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>
Revista de Saude Publica
</journal-title>
<abbrev-journal-title abbrev-type='publisher'>
Rev. Saude Publica
</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
A: presence(journal-title) is True
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is False
A ^ B is False
"""
sample = """<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>
Revista de Saude Publica
</journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""
A: presence(journal-title) is False
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is True
A ^ B is False
"""
sample = """<article>
<front>
<journal-meta>
<journal-title-group>
<abbrev-journal-title abbrev-type='publisher'>
Rev. Saude Publica
</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
def test_case4(self):
"""
A: presence(journal-title) is False
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is False
A ^ B is False
"""
sample = """<article>
<front>
<journal-meta>
<journal-title-group>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
class ISSNTests(unittest.TestCase):
"""Tests for article/front/journal-meta/issn elements.
"""
def _run_validation(self, sample):
schematron = isoschematron.Schematron(SCH, phase='phase.issn')
return schematron.validate(etree.parse(sample))
def test_epub_is_absent(self):
sample = """<article>
<front>
<journal-meta>
<issn>0959-8138</issn>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
def test_epub_is_present(self):
sample = """<article>
<front>
<journal-meta>
<issn pub-type='epub'>0959-8138</issn>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertTrue(self._run_validation(sample))
class PublisherTests(unittest.TestCase):
"""Tests for article/front/journal-meta/publisher elements.
"""
def _run_validation(self, sample):
schematron = isoschematron.Schematron(SCH, phase='phase.publisher')
return schematron.validate(etree.parse(sample))
def test_publisher_is_present(self):
sample = """<article>
<front>
<journal-meta>
<publisher>
<publisher-name>British Medical Journal</publisher-name>
</publisher>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertTrue(self._run_validation(sample))
def test_publisher_is_absent(self):
sample = """<article>
<front>
<journal-meta>
</journal-meta>
</front>
</article>
"""
sample = StringIO(sample)
self.assertFalse(self._run_validation(sample))
```
#### File: packtools/tests/test_stylechecker.py
```python
import unittest
from StringIO import StringIO
from tempfile import NamedTemporaryFile
from lxml import etree, isoschematron
from packtools import stylechecker
# valid: <a><b></b></a>
# invalid: anything else
sample_xsd = StringIO('''\
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
sample_sch = StringIO('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="sum_equals_100_percent">
<title>Sum equals 100%.</title>
<rule context="Total">
<assert test="sum(//Percent)=100">Element 'Total': Sum is not 100%.</assert>
</rule>
</pattern>
</schema>
''')
def setup_tmpfile(method):
def wrapper(self):
valid_tmpfile = NamedTemporaryFile()
valid_tmpfile.write(b'<a><b>bar</b></a>')
valid_tmpfile.seek(0)
self.valid_tmpfile = valid_tmpfile
method(self)
self.valid_tmpfile.close()
return wrapper
class XMLTests(unittest.TestCase):
@setup_tmpfile
def test_initializes_with_filepath(self):
self.assertTrue(stylechecker.XML(self.valid_tmpfile.name))
def test_initializes_with_etree(self):
fp = StringIO(b'<a><b>bar</b></a>')
et = etree.parse(fp)
self.assertTrue(stylechecker.XML(et))
def test_validation(self):
fp = etree.parse(StringIO(b'<a><b>bar</b></a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
result, errors = xml.validate()
self.assertTrue(result)
self.assertFalse(errors)
def test_invalid(self):
fp = etree.parse(StringIO(b'<a><c>bar</c></a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
result, _ = xml.validate()
self.assertFalse(result)
def test_invalid_errors(self):
# Default lxml error log.
fp = etree.parse(StringIO(b'<a><c>bar</c></a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
_, errors = xml.validate()
self.assertIsInstance(errors, etree._ListErrorLog)
def test_find(self):
fp = etree.parse(StringIO(b'<a>\n<b>bar</b>\n</a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
elem = xml.find_element('b', 2)
self.assertEqual(elem.tag, 'b')
self.assertEqual(elem.sourceline, 2)
def test_find_root_element(self):
fp = etree.parse(StringIO(b'<a>\n<b>bar</b>\n</a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
elem = xml.find_element('a', 1)
self.assertEqual(elem.tag, 'a')
self.assertEqual(elem.sourceline, 1)
def test_find_missing(self):
fp = etree.parse(StringIO(b'<a>\n<b>bar</b>\n</a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
# missing elements fallback to the root element
self.assertEquals(xml.find_element('c', 2), fp.getroot())
def test_find_missing_without_fallback(self):
fp = etree.parse(StringIO(b'<a>\n<b>bar</b>\n</a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
# missing elements fallback to the root element
self.assertRaises(ValueError, lambda: xml.find_element('c', 2, fallback=False))
def test_annotate_errors(self):
fp = etree.parse(StringIO(b'<a><c>bar</c></a>'))
xml = stylechecker.XML(fp)
xml.xmlschema = etree.XMLSchema(etree.parse(sample_xsd))
xml.annotate_errors()
xml_text = xml.read()
self.assertIn("<SPS-ERROR>Element 'c': This element is not expected. Expected is ( b ).</SPS-ERROR>", xml_text)
self.assertTrue(isinstance(xml_text, unicode))
def test_validation_schematron(self):
fp = etree.parse(StringIO(b'<Total><Percent>70</Percent><Percent>30</Percent></Total>'))
xml = stylechecker.XML(fp)
xml.schematron = isoschematron.Schematron(etree.parse(sample_sch))
result, errors = xml._validate_sch()
self.assertTrue(result)
self.assertFalse(errors)
def test_invalid_schematron(self):
fp = etree.parse(StringIO(b'<Total><Percent>60</Percent><Percent>30</Percent></Total>'))
xml = stylechecker.XML(fp)
xml.schematron = isoschematron.Schematron(etree.parse(sample_sch))
result, errors = xml._validate_sch()
self.assertFalse(result)
self.assertTrue(errors)
def test_annotate_errors_schematron(self):
fp = etree.parse(StringIO(b'<Total><Percent>60</Percent><Percent>30</Percent></Total>'))
xml = stylechecker.XML(fp)
xml.schematron = isoschematron.Schematron(etree.parse(sample_sch))
xml.annotate_errors()
xml_text = xml.read()
self.assertIn("<!--SPS-ERROR: Element 'Total': Sum is not 100%.-->", xml_text)
self.assertTrue(isinstance(xml_text, unicode))
class ElementNamePatternTests(unittest.TestCase):
pattern = stylechecker.EXPOSE_ELEMENTNAME_PATTERN
def test_case1(self):
message = "Element 'article', attribute 'dtd-version': [facet 'enumeration'] The value '3.0' is not an element of the set {'1.0'}."
self.assertEqual(self.pattern.search(message).group(0), "'article'")
def test_case2(self):
message = "Element 'article', attribute 'dtd-version': '3.0' is not a valid value of the local atomic type."
self.assertEqual(self.pattern.search(message).group(0), "'article'")
def test_case3(self):
message = "Element 'author-notes': This element is not expected. Expected is one of ( label, title, ack, app-group, bio, fn-group, glossary, ref-list, notes, sec )."
self.assertEqual(self.pattern.search(message).group(0), "'author-notes'")
def test_case4(self):
message = "Element 'journal-title-group': This element is not expected. Expected is ( journal-id )."
self.assertEqual(self.pattern.search(message).group(0), "'journal-title-group'")
def test_case5(self):
message = "Element 'contrib-group': This element is not expected. Expected is one of ( article-id, article-categories, title-group )."
self.assertEqual(self.pattern.search(message).group(0), "'contrib-group'")
```
|
{
"source": "jfunez/pyqt-calc",
"score": 3
}
|
#### File: jfunez/pyqt-calc/signals_slots.py
```python
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QWidget
def greeting():
"""Slot function."""
if msg.text():
msg.setText("")
else:
msg.setText("Hello World!")
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle('Signals and slots')
layout = QVBoxLayout()
btn = QPushButton('Greet')
btn.clicked.connect(greeting) # Connect clicked to greeting()
layout.addWidget(btn)
msg = QLabel('')
layout.addWidget(msg)
window.setLayout(layout)
window.show()
sys.exit(app.exec_())
```
|
{
"source": "jfunez/sauron-rule-engine",
"score": 4
}
|
#### File: sauron-rule-engine/examples/simple.py
```python
from sauron_rule_engine.rule_engine import RuleEngine
engine = RuleEngine()
@engine.condition("First Condition")
def first_condition(lower_number: int = 10, greater_number: int = 20) -> bool:
"""
Checks if first number is lower than the first
- lower_number: Number expected to be low
- higher_number: Number expected to be high
"""
return lower_number < greater_number
@engine.condition()
def second_condition():
"""
Takes no argument and always returns True
"""
return True
@engine.action("The Action")
def print_the_equation(
lower_number: int = 10, greater_number: int = 20
) -> None:
"""
Prints a statement Asserting that the first number is lower than the second number
- lower_number: Number expected to be low
- higher_number: Number expected to be high
"""
print(f"{lower_number} < {greater_number}")
rule = {
"conditions": [
{
"name": "first_condition",
"args": {"lower_number": 3, "greater_number": 10},
}
],
"actions": [
{
"name": "print_the_equation",
"args": {"lower_number": 3, "greater_number": 10},
}
],
}
engine.run(rule)
```
|
{
"source": "j-funk/pedantic",
"score": 3
}
|
#### File: j-funk/pedantic/pedantic.py
```python
from docopt import docopt
import pedantic.validator_service as val
def runserver():
with open(arguments['SCHEMA_PATH'], 'r') as f:
schema = f.read()
whitelist_path = arguments['--whitelist']
if not whitelist_path:
whitelist = None
else:
with open(whitelist_path, 'r') as f:
whitelist = f.read()
port = arguments['--port']
if not port:
port = 5000
else:
port = int(port)
val.set_proxy_settings(schema, whitelist)
val.app.run(port=port)
if __name__ == '__main__':
arguments = docopt(__doc__, version='Pedantic 0.1')
print(arguments)
runserver()
```
|
{
"source": "j-funk/rpi-thermostat",
"score": 2
}
|
#### File: j-funk/rpi-thermostat/temp_logger.py
```python
try:
import Adafruit_DHT
# Move mocks into test / dev layer
except ImportError, e:
class Adafruit_DHTMOCK():
def read_retry(self):
return 25, 50
Adafruit_DHT = Adafruit_DHTMOCK()
import requests
import logging
from apscheduler.schedulers.background import BlockingScheduler
# Extract hardcoded url to config / build layer
THERMOSTAT_URI = 'http://192.168.1.214:5000/api/v1/temperature/'
def main():
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, '17') # 17 extract to config layer
if humidity is not None and temperature is not None:
# We should be checking here if a connection is able to succeed / fail and logging the
# responses
requests.post(THERMOSTAT_URI, data=dict(temperature=temperature, humidity=humidity))
logger.warn('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
logger.error('Failed to get reading. Try again!')
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN, format='%(levelname)s - %(asctime)s %(message)s')
logger = logging.getLogger('main')
scheduler = BlockingScheduler()
scheduler.add_job(main, 'interval', seconds=60) # this interval should be in config layer
logger.warn('starting scheduler')
scheduler.start()
```
|
{
"source": "jfunston/chogori-platform",
"score": 2
}
|
#### File: k2/common/gen_macros.py
```python
def mname_with_vars(mn, i, vars):
return ''.join([mn, str(i), "(", ','.join(vars), ")"])
def genlst(N):
print('// _K2_MKLIST(...) generates a list for formatting variables, e.g. _K2_MKLIST(a, b, c) -> "a" = a, "b" = b, "c" = c')
vrs=["a1"]
mn="_K2_MKLIST"
print("#define", mn + "(...) _K2_OVERLOADED_MACRO(" + mn + ", __VA_ARGS__)")
print("#define", mn + "0()")
print("#define", mn + '1(a1) #a1 "={}"')
for i in range(2,N):
vrs.append("a" + str(i))
v = " ".join(["#define",
mname_with_vars(mn, i, vrs),
mname_with_vars(mn, i-1, vrs[:-1]),
'", "',
mn + "1(a" +str(i) + ")"])
print(v)
def genvars(N):
print('// _K2_MKVARS(...) generates a list for formatting variables, e.g. _K2_MKVARS(a, b, c) -> o.a, o.b, o.c')
vrs=["a1"]
mn = "_K2_MKVARS"
print("#define", mn + "(...) _K2_OVERLOADED_MACRO(" + mn + ", __VA_ARGS__)")
print("#define", mn + "0()")
print("#define", mn + "1(a1) , o.a1")
for i in range(2,N):
vrs.append("a" + str(i))
v = " ".join(["#define",
mname_with_vars(mn, i, vrs),
mname_with_vars(mn, i-1, vrs[:-1]),
mn + "1(a" +str(i) + ")"])
print(v)
def genfromjson(N):
print('// _K2_FROM_JSON(...) generates from-json entries, e.g. _K2_FROM_JSON(a) -> j.at("a").get_to(o.a);')
vrs = ["a1"]
mn = "_K2_FROM_JSON"
print("#define", mn + "(...) _K2_OVERLOADED_MACRO(" + mn + ", __VA_ARGS__)")
print("#define", mn + "0() (void)o; (void)j;")
print("#define", mn + "1(a1) j.at(#a1).get_to(o.a1);")
for i in range(2, N):
vrs.append("a" + str(i))
v = " ".join(["#define",
mname_with_vars(mn, i, vrs),
mname_with_vars(mn, i-1, vrs[:-1]),
mn + "1(a" + str(i) + ")"])
print(v)
def gentojson(N):
print(
'// _K2_TO_JSON(...) generates from-json entries, e.g. _K2_TO_JSON(a1, a2) -> {"a1", o.a1}, {"a2", o.a2}')
vrs = ["a1"]
mn = "_K2_TO_JSON"
print("#define", mn + "(...) _K2_OVERLOADED_MACRO(" + mn + ", __VA_ARGS__)")
print("#define", mn + "0()")
print("#define", mn + "1(a1) {#a1, o.a1}")
for i in range(2, N):
vrs.append("a" + str(i))
v = " ".join(["#define",
mname_with_vars(mn, i, vrs),
mname_with_vars(mn, i-1, vrs[:-1]),
", ",
mn + "1(a" + str(i) + ")"])
print(v)
def gentostring(N):
print(
'// _K2_TO_STRING_LIST(...) converts list of args to list of stringed args, e.g. _K2_TO_STRING_LIST(a1, a2) -> "a1", "a2"')
vrs = ["a1"]
mn = "_K2_TO_STRING_LIST"
print("#define", mn + "(...) _K2_OVERLOADED_MACRO(" + mn + ", __VA_ARGS__)")
print("#define", mn + "0()")
print("#define", mn + "1(a1) #a1")
for i in range(2, N):
vrs.append("a" + str(i))
v = " ".join(["#define",
mname_with_vars(mn, i, vrs),
mname_with_vars(mn, i-1, vrs[:-1]),
",",
mn + "1(a" + str(i) + ")"])
print(v)
def gen_enum_if_stmt(N):
print(
'''// _K2_ENUM_IF_STMT(Name, ...) generates an if statement for each arg to fit in the enum-generator
// macro, e.g. _K2_ENUM_IF_STMT(NAME, a1, a2) -> if(str=="a1") return NAME::a1; if (str=="a2")...
''')
vrs = ["NAME", "a1"]
mn = "_K2_ENUM_IF_STMT"
print("#define", mn + "(...) _K2_OVERLOADED_MACRO(" + mn + ", __VA_ARGS__)")
print("#define", mn + "0()") # 0-arg not supported
print("#define", mn + "1(NAME)") # 1-arg not supported
print("#define", mn + "2(NAME,a1) if(str==#a1) return NAME::a1;")
for i in range(3, N):
vrs.append("a" + str(i-1))
v = " ".join(["#define",
mname_with_vars(mn, i, vrs),
mname_with_vars(mn, i-1, vrs[:-1]),
mn + "2(NAME, a" + str(i-1) + ")"])
print(v)
if __name__ == "__main__":
print('''/*
MIT License
Copyright(c) 2021 Future<NAME>
Permission is hereby granted,
free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions :
The above copyright notice and this permission notice shall be included in all copies
or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS",
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER
LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/* This entire file has been generated by common/gen_macros.py" */
#pragma once
#include "MacroUtils.h"
''')
N = 60
genlst(N)
print()
genvars(N)
print()
genfromjson(N)
print()
gentojson(N)
print()
gentostring(N)
print()
gen_enum_if_stmt(N)
```
|
{
"source": "jfurcean/Adafruit_CircuitPython_FeatherWing",
"score": 3
}
|
#### File: Adafruit_CircuitPython_FeatherWing/adafruit_featherwing/pixelmatrix.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FeatherWing.git"
# pylint: disable-msg=unsubscriptable-object, unsupported-assignment-operation
from adafruit_featherwing.auto_writeable import AutoWriteable
class PixelMatrix(AutoWriteable):
"""Base Class for DotStar and NeoPixel FeatherWings
The feather uses pins D13 and D11"""
def __init__(self):
self.rows = 0
self.columns = 0
self._matrix = None
super().__init__()
def __setitem__(self, indices, value):
"""
indices can be one of three things:
x and y ints that are calculated to the DotStar index
a slice of DotStar indexes with a set of values that match the slice
a single int that specifies the DotStar index
value can be one of three things:
a (r,g,b) list/tuple
a (r,g,b, brightness) list/tuple
a single, longer int that contains RGB values, like 0xFFFFFF
brightness, if specified should be a float 0-1
"""
self._matrix[self._get_index(indices)] = value
self._update()
def __getitem__(self, indices):
"""
indices can be one of three things:
x and y ints that are calculated to the DotStar index
a slice of DotStar indexes to retrieve
a single int that specifies the DotStar index
"""
return self._matrix[self._get_index(indices)]
def _get_index(self, indices):
"""
Figure out which DotStar to address based on what was passed in
"""
if isinstance(indices, int):
if not 0 <= indices < self.rows * self.columns:
raise ValueError("The index of {} is out of range".format(indices))
return indices
if isinstance(indices, slice):
return indices
if len(indices) == 2:
x, y = indices
if not 0 <= x < self.columns:
raise ValueError("The X value of {} is out of range".format(x))
if not 0 <= y < self.rows:
raise ValueError("The Y value of {} is out of range".format(y))
return y * self.columns + x
raise ValueError("Index must be 1 or 2 number")
def _update(self):
"""
Update the Display automatically if auto_write is set to True
"""
if self._auto_write:
self._matrix.show()
def fill(self, color=0):
"""
Fills all of the Pixels with a color or unlit if empty.
:param color: (Optional) The text or number to display (default=0)
:type color: list/tuple or int
"""
self._matrix.fill(color)
self._update()
def show(self):
"""
Update the Pixels. This is only needed if auto_write is set to False
This can be very useful for more advanced graphics effects.
"""
self._matrix.show()
def shift_right(self, rotate=False):
"""
Shift all pixels right
:param rotate: (Optional) Rotate the shifted pixels to the left side (default=False)
"""
for y in range(0, self.rows):
last_pixel = self._matrix[(y + 1) * self.columns - 1] if rotate else 0
for x in range(self.columns - 1, 0, -1):
self._matrix[y * self.columns + x] = self._matrix[
y * self.columns + x - 1
]
self._matrix[y * self.columns] = last_pixel
self._update()
def shift_left(self, rotate=False):
"""
Shift all pixels left
:param rotate: (Optional) Rotate the shifted pixels to the right side (default=False)
"""
for y in range(0, self.rows):
last_pixel = self._matrix[y * self.columns] if rotate else 0
for x in range(0, self.columns - 1):
self._matrix[y * self.columns + x] = self._matrix[
y * self.columns + x + 1
]
self._matrix[(y + 1) * self.columns - 1] = last_pixel
self._update()
def shift_up(self, rotate=False):
"""
Shift all pixels up
:param rotate: (Optional) Rotate the shifted pixels to bottom (default=False)
"""
for x in range(0, self.columns):
last_pixel = (
self._matrix[(self.rows - 1) * self.columns + x] if rotate else 0
)
for y in range(self.rows - 1, 0, -1):
self._matrix[y * self.columns + x] = self._matrix[
(y - 1) * self.columns + x
]
self._matrix[x] = last_pixel
self._update()
def shift_down(self, rotate=False):
"""
Shift all pixels down
:param rotate: (Optional) Rotate the shifted pixels to top (default=False)
"""
for x in range(0, self.columns):
last_pixel = self._matrix[x] if rotate else 0
for y in range(0, self.rows - 1):
self._matrix[y * self.columns + x] = self._matrix[
(y + 1) * self.columns + x
]
self._matrix[(self.rows - 1) * self.columns + x] = last_pixel
self._update()
@property
def brightness(self):
"""
Overall brightness of the display
"""
return self._matrix.brightness
@brightness.setter
def brightness(self, brightness):
self._matrix.brightness = min(max(brightness, 0.0), 1.0)
self._update()
```
#### File: Adafruit_CircuitPython_FeatherWing/adafruit_featherwing/tft_featherwing_35.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FeatherWing.git"
from adafruit_hx8357 import HX8357
# pylint: disable-msg=too-few-public-methods
from adafruit_featherwing.tft_featherwing import TFTFeatherWing
class TFTFeatherWing35(TFTFeatherWing):
"""Class representing an `TFT FeatherWing 3.5
<https://www.adafruit.com/product/3651>`_.
"""
def __init__(self, spi=None, cs=None, dc=None):
super().__init__(spi, cs, dc)
self.display = HX8357(self._display_bus, width=480, height=320)
```
|
{
"source": "jfurcean/u2if",
"score": 2
}
|
#### File: source/machine/i2c.py
```python
from .u2if import Device
from . import u2if_const as report_const
# TODO: deinit
class I2C(object):
def __init__(self, *, i2c_index=0, frequency=100000, pullup=False):
self.i2c_index = i2c_index
self._initialized = False
self._device = Device()
self._i2c_configure(frequency, pullup)
def __del__(self):
self.deinit()
def deinit(self):
if not self._initialized:
return
res = self._device.send_report(bytes([report_const.I2C0_DEINIT if self.i2c_index == 0 else report_const.I2C1_DEINIT]))
if res[1] != report_const.OK:
raise RuntimeError("I2c deinit error.")
# MicroPython I2C methods
def scan(self):
return self._i2c_scan()
def readfrom(self, addr, nbytes, stop=True):
buf = bytearray(nbytes)
self.readfrom_into(addr, buf, stop)
return buf
def readfrom_into(self, addr, buf, stop=True):
return self._i2c_readfrom_into(addr, buf, stop)
def writeto(self, addr, buf, stop=True):
return self._i2c_writeto(addr, buf, stop)
def writevto(self, addr, vector, stop=True):
raise RuntimeError('Not implemented')
# MicroPython I2C convenient methods
def readfrom_mem(self, addr, memaddr, nbytes):
self.writeto(addr, bytes([memaddr]), False)
return self.readfrom(addr, nbytes, True)
def readfrom_mem_into(self, addr, memaddr, buf):
self.writeto(addr, bytes([memaddr]), False)
return self.readfrom_into(addr, buf, True)
def writeto_mem(self, addr, memaddr, buf):
return self.writeto(addr, bytes([memaddr]) + bytes(buf), False)
# Internal methods
def _i2c_configure(self, baudrate=100000, pullup=False) :
res = self._device.send_report(
bytes([report_const.I2C0_INIT if self.i2c_index == 0 else report_const.I2C1_INIT,
0x00 if not pullup else 0x01])
+ baudrate.to_bytes(4, byteorder='little')
)
if res[1] != report_const.OK:
raise RuntimeError("I2C init error.")
def _i2c_scan(self, start=0, end=0x79):
found = []
for addr in range(start, end + 1):
try:
self._i2c_writeto(addr, b"\x00\x00\x00")
except RuntimeError:
continue
found.append(addr)
return found
def _i2c_readfrom_into(self, addr, buf, stop=True):
read_size = len(buf)
report_id = report_const.I2C0_READ if self.i2c_index == 0 else report_const.I2C1_READ
res = self._device.send_report(bytes([report_id, addr, 0x01 if stop else 0x00, read_size]))
if res[1] != report_const.OK:
raise RuntimeError("I2C read error.")
for i in range(read_size):
buf[i] = res[i+2]
def _i2c_writeto(self, addr, buf, stop=True):
if stop and len(buf) > 3 * report_const.HID_REPORT_SIZE:
self._i2c_writeto_stream(addr, buf, stop)
else:
self._i2c_writeto_direct(addr, buf, stop)
def _i2c_writeto_stream(self, addr, buf, stop=True):
if not stop:
raise RuntimeError('_i2c_writeto_stream with not stop Not implemented')
self._device.reset_output_serial()
report_id = report_const.I2C0_WRITE_FROM_UART if self.i2c_index == 0 else report_const.I2C1_WRITE_FROM_UART
remain_bytes = len(buf)
res = self._device.send_report(bytes([report_id, addr]) + remain_bytes.to_bytes(4, byteorder='little'))
if res[1] != report_const.OK:
raise RuntimeError("I2C write error.")
self._device.write_serial(buf)
res = self._device.read_hid(report_id)
if res[1] != report_const.OK:
raise RuntimeError("I2C write error.")
def _i2c_writeto_direct(self, addr, buf, stop=True):
report_id = report_const.I2C0_WRITE if self.i2c_index == 0 else report_const.I2C1_WRITE
stop_flag = 0x01 if stop else 0x00
start = 0
end = len(buf)
while (end - start) > 0:
remain_bytes = end - start
chunk = min(remain_bytes, report_const.HID_REPORT_SIZE - 7)
res = self._device.send_report(
bytes([report_id, addr, stop_flag]) + remain_bytes.to_bytes(4, byteorder='little') + buf[start : (start + chunk)])
if res[1] != report_const.OK:
raise RuntimeError("I2C write error.")
start += chunk
```
|
{
"source": "JFurness1/DrunkenBishop",
"score": 3
}
|
#### File: JFurness1/DrunkenBishop/DrunkenBishop.py
```python
import numpy as np
import hashlib
class Board:
START = None
END = None
XDIM = 17
YDIM = 9
symbols = [' ', '.', 'o', '+', '=', '*', 'B', '0', 'X', '@', '%', '&', '#', '/', '^']
START_SYMBOL = 'S'
END_SYMBOL = 'E'
field = None # initialised to (XDIM, YDIM) numpy array of zeros
i_string = None
i_bytes = None
def __init__(self, title:str=None, xdim:int=17, ydim:int=9):
self.XDIM = xdim
self.YDIM = ydim
self.START = (self.XDIM//2, self.YDIM//2)
self.title = title
self.clear_board()
def clear_board(self):
self.field = np.zeros((self.XDIM, self.YDIM), dtype=np.int)
def resize(self, xdim: int, ydim: int):
self.XDIM = xdim
self.YDIM = ydim
self.clear_board()
def __str__(self):
if self.title is None:
ostr = "+" + "-"*self.XDIM + "+\n"
else:
if len(self.title) > self.XDIM - 2:
tstr = "["+self.title[:self.XDIM - 5]+"...]"
else:
tstr = "["+self.title+"]"
ostr = "+" + tstr.center(self.XDIM, '-') + "+\n"
for y in range(self.YDIM):
ostr += "|"
for x in range(self.XDIM):
if x == self.START[0] and y == self.START[1]:
ostr += self.START_SYMBOL
elif x == self.END[0] and y == self.END[1]:
ostr += self.END_SYMBOL
else:
# Note modulo to wrap symbols around for very long walks
ostr += self.symbols[self.field[x, y] % len(self.symbols)]
ostr += "|\n"
ostr += "+" + "-"*(self.XDIM) + "+"
return ostr
def make_art(self, istr: str, do_md5:bool=True, is_hex:bool=False) -> str:
self.i_string = istr
if is_hex:
self.i_bytes = bytes.fromhex(self.i_string)
elif do_md5:
md5 = hashlib.md5()
md5.update(self.i_string.encode('utf-8'))
self.i_bytes = md5.digest()
else:
self.i_bytes = self.i_string.encode('utf-8')
bishop = list(self.START) # copy the start position
# Don't need to increment start as it will always be "S"
for byte in self.i_bytes:
# Extract the pairs of bits from the byte into an array.
# Least significant first
w = byte
pairs = []
for i in range(4):
b1 = int(w&1 != 0)
w = w >> 1
b2 = int(w&1 != 0)
w = w >> 1
pairs.append((b2, b1))
for p in pairs:
dy = p[0]*2 - 1
dx = p[1]*2 - 1
# Move the bishop, sliding along walls as necessary
bishop[0] = max(min(bishop[0] + dx, self.XDIM - 1), 0)
bishop[1] = max(min(bishop[1] + dy, self.YDIM - 1), 0)
# Drop a coin on the current square
self.field[bishop[0], bishop[1]] += 1
# We are done. Mark the end point
self.END = bishop
return str(self)
def test():
test_hash = "fc94b0c1e5b0987c5843997697ee9fb7"
print("test input:")
print(test_hash)
print("\nAs hex input:")
expected_hex = (
"+---[Test Hash]---+\n"
"| .=o. . |\n"
"| . *+*. o |\n"
"| =.*..o |\n"
"| o + .. |\n"
"| S o. |\n"
"| o . |\n"
"| . . . |\n"
"| o .|\n"
"| E.|\n"
"+-----------------+")
b = Board(title="Test Hash")
out_hex = b.make_art(test_hash, is_hex=True)
print_test_comparison(expected_hex, out_hex)
passed_hex = out_hex == expected_hex
if passed_hex:
print("TEST PASSED")
else:
print("TEST FAILED")
b.clear_board()
print("\nWith MD5 hash:")
expected_md5 = (
"+---[Test Hash]---+\n"
"| . . |\n"
"| . = . . |\n"
"| o o . o |\n"
"| . . . . |\n"
"| S + . |\n"
"| E. + |\n"
"| . .o |\n"
"| ..oo.o |\n"
"| +Bo=*.. |\n"
"+-----------------+")
out_md5 = b.make_art(test_hash, do_md5=True)
print_test_comparison(expected_md5, out_md5)
passed_md5 = out_md5 == expected_md5
if passed_md5:
print("TEST PASSED")
else:
print("TEST FAILED")
b.clear_board()
print("\nAs raw string:")
expected_raw = (
"+---[Test Hash]---+\n"
"| o#XBoo.o.o. |\n"
"| o.#%Boooo.E |\n"
"| + Xo=oo = o |\n"
"| . o o |\n"
"| S |\n"
"| + o |\n"
"| |\n"
"| |\n"
"| |\n"
"+-----------------+")
out_raw = b.make_art(test_hash, do_md5=False)
print_test_comparison(expected_raw, out_raw)
passed_raw = out_raw == expected_raw
if passed_raw:
print("TEST PASSED")
else:
print("TEST FAILED")
if passed_hex and passed_md5 and passed_raw:
print("\nALL TESTS PASSED! :)")
else:
print("\nSOME TESTS FAILED! :(")
def print_test_comparison(expected, got):
expected_parts = expected.split("\n")
got_parts = got.split("\n")
fstr = "{{:^{0}}} {{:^{0}}}".format(len(expected_parts[0]))
print(fstr.format("EXPECTED", "GOT"))
for i in range(len(expected_parts)):
print(fstr.format(expected_parts[i], got_parts[i]))
if __name__ == "__main__":
test()
```
|
{
"source": "jfuruness/lib_bgp_data",
"score": 3
}
|
#### File: as_rank_v2/tests/test_asrank_parser_v2.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pytest
from ..as_rank_v2_parser import AS_Rank_Parser_V2
from ..tables import AS_Rank_V2
@pytest.mark.asrank_parser_v2
class Test_AS_Rank_Parser_V2:
def test_quick_run(self):
"""Checks the first 10 ranked ASNs and makes sure everything
is formatted properly and inserted"""
parser = AS_Rank_Parser_V2()
parser._run(0, 10)
with AS_Rank_V2() as db:
result = db.execute('SELECT count(*) FROM as_rank_v2')
assert result[0]['count'] == 10
first_row = db.execute('SELECT * FROM as_rank_v2 LIMIT 1')
first_row = first_row[0]
links = first_row['links']
org = first_row['organization']
rank = first_row['rank']
assert links is not None
for link in links:
assert type(link) == int
assert type(org) == str
assert rank == 1
@pytest.mark.slow
def test_full(self):
"""This does not verify formatting, quick_run does that. This
instead makes sure that the parser pulls all ASNs from Caida.
Also this is INCREDIBLY SLOW, takes 6-ish hours to complete"""
parser = AS_Rank_Parser_V2()
count = parser._run() - 1
with AS_Rank_V2() as db:
result = db.execute('SELECT count(*) FROM as_rank_v2')
assert result[0]['count'] == count
```
#### File: collectors/as_rank_website/tables.py
```python
__author__ = "<NAME>, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
from ...utils.database import Generic_Table
class AS_Rank_Table(Generic_Table):
"""ASRankTable class, inherits from Generic_Table.
For a more in depth explanation see the top of the file.
"""
__slots__ = []
name = 'as_rank'
columns = ['as_rank', 'asn', 'organization', 'country', 'cone_size']
def _create_tables(self):
"""Creates new table if it doesn't already exist. The contents will
be cleared everytime asrank_website_parser is run because information
in the datebase may be out of date.
"""
sql = f"""CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (
as_rank bigint,
asn bigint,
organization varchar (250),
country varchar (2),
cone_size integer
);"""
self.cursor.execute(sql)
def get_top_100_ases(self):
"""Returns top 100 ases by as rank"""
sql = f"""SELECT * FROM {self.name} ORDER BY as_rank LIMIT 100;"""
return [x["asn"] for x in self.execute(sql)]
```
#### File: mrt/mrt_base/mrt_file.py
```python
__authors__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import os
import logging
from .tables import MRT_Announcements_Table
from ....utils import utils
from ....utils.base_classes import File
class MRT_File(File):
"""Converts MRT files to CSVs and then inserts them into a database.
In depth explanation in README.
"""
__slots__ = []
def parse_file(self, bgpscanner=True):
"""Parses a downloaded file and inserts it into the database
if bgpscanner is set to True, bgpscanner is used to parser files
which is faster, but ignores malformed announcements. While
these malformed announcements are few and far between, bgpdump
does not ignore them and should be used for full data runs. For
testing however, bgpscanner is much faster and has almost all
data required. More in depth explanation at the top of the file
Note that when tested for speed, logging doesn't slow down parse_files
Or it does, and I just turned it off wrong.
"""
# Sets CSV path
self.csv_name = f"{self.csv_dir}/{os.path.basename(self.path)}.csv"
# Parses the MRT file into a csv file
self._convert_dump_to_csv(bgpscanner)
# Inserts the csv file into the MRT_Announcements Table
utils.csv_to_db(MRT_Announcements_Table, self.csv_name)
# Deletes all old files
utils.delete_paths([self.path, self.csv_name])
utils.incriment_bar(logging.root.level)
########################
### Helper Functions ###
########################
def _convert_dump_to_csv(self, bgpscanner=True):
"""Parses MRT file into a CSV
This function uses bgpscanner to first be able to read
the MRT file. This is because BGPScanner is the fastest tool to
use for this task. The drawback of bgpscanner is that it ignores
malformed announcements. There aren't a lot of these, and it's
much faster, but for a full data set the slower tool bgpdump
should be used. Then the sed commands parse the file and
format the data for a CSV. Then this is stored as a tab
delimited CSV file, and the original is deleted. For a more in
depth explanation see top of file. For parsing spefics, see each
function listed below.
"""
args = self._bgpscanner_args() if bgpscanner else self._bgpdump_args()
# writes to a csv
args += '> ' + self.csv_name
utils.run_cmds(args)
logging.debug(f"Wrote {self.csv_name}\n\tFrom {self.url}")
utils.delete_paths(self.path)
def _bgpscanner_args(self):
"""Parses MRT file into a CSV using bgpscanner
For a more in depth explanation see _convert_dump_to_csv. For
explanation on specifics of the parsing, see below.
"""
# I know this may seem unmaintanable, that's because this is a
# Fast way to to this. Please, calm down.
# Turns out not fast - idk if other regexes are faster
# bgpscanner outputs this format:
# TYPE|SUBNETS|AS_PATH|NEXT_HOP|ORIGIN|ATOMIC_AGGREGATE|
# AGGREGATOR|COMMUNITIES|SOURCE|TIMESTAMP|ASN 32 BIT
# Example: =|172.16.58.3/24|14061 6453 9498 45528 45528|
# 192.168.3.11|i|||
# 6453:50 6453:1000 6453:1100 6453:1113 14061:402 14061:2000
# 14061:2002 14061:4000 14061:4002|192.168.3.11 14061|
# 1545345848|1
# Also please note: sed needs escape characters, so if something
# is escaped once it is for sed. If it is escaped twice, it is
# to escape something in sed, and a second escape for the python
# Below are the things that need to be escaped:
# Parenthesis are escaped because they are sed capture groups
# + is escaped to get sed's special plus (at least one)
# . is escaped for sed to recognize it as a period to match
# / is escaped for sed to match the actual forward slash
# performs bgpdump on the file
bash_args = 'bgpscanner '
bash_args += self.path
# Cuts out columns we don't need
bash_args += ' | cut -d "|" -f1,2,3,10'
# Now we have TYPE|SUBNETS|AS_PATH|TIMESTAMP
# Ex: =|172.16.58.3/24|14061 6453 9498 45528 45528|1545345848
# Makes sure gets announcement, withdrawl, or rib
# -n for no output if nothing there
bash_args += ' | sed -n "s/[=|+|-]|'
# Now we focus on SUBNETS|AS_PATH|TIMESTAMP
# Ex: 172.16.58.3/24|14061 6453 9498 45528 45528|1545345848
# Gets three capture groups.
# The first capture group is the prefix
# Captures chars normally in IPV4 or IPV6 prefixes
bash_args += '\([0|1|2|3|4|5|6|7|8|9|%|\.|\:|a|b|c|d|e|f|/]\+\)|'
# I left this old code here in case someone can figure it out
# https://unix.stackexchange.com/questions/145402/
# It appears sed doesn't support this kind of alternation
# It appears you cannot perform alternation with char classes
# So while it is slower to use ^, that is the way it will run
# until someone can figure out a crazier sed command. And even
# if you could, it would appear that it wouldn't be cross
# platform compatable, so it probably shouldn't be done anyways
# The regex for prefix is done in this way instead of non
# greedy matching because sed doesn't have non greedy matching
# so instead the | must be excluded which is slower than this
# bash_args += '\([[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
# bash_args += '\.[[:digit:]]\+\/[[:digit:]]\+|'
# Now we match for ipv6 prefixes
# bash_args += '[0|1|2|3|4|5|6|7|8|9|%|\:|\.|a|b|c|d|e|f]*]\)|'
# Now we focus on AS_PATH|TIMESTAMP
# Ex: 14061 6453 9498 45528 45528|1545345848
# Second capture group is as path except for the last number
bash_args += '\([^{]*[[:space:]]\)*'
# Now we have all but the last number
# Ex: 45528|1545345848
# Third capture group is the origin
bash_args += '\([^{]*\)'
# Now we have just the time
# Example: |1545345848
# Fourth capture group is the time
bash_args += '|\(.*\)'
# Replacement with the capture groups
# Must double escape here or python freaks out
bash_args += '/\\1\\t{\\2\\3}\\t\\3\\t\\4/p" | '
# Replaces spaces in array to commas
# Need to pipe to new sed because you need the -n -p args
# to make sed not output the full string if it doesn't match
# And you cannot add -e args after that
bash_args += 'sed -e "s/ /, /g" '
return bash_args
def _bgpdump_args(self):
"""Parses MRT file into a CSV using bgpdump
For a more in depth explanation see _convert_dump_to_csv. For
explanation on specifics of the parsing, see below. Also note,
you must use the updated bgpdump tool, not the apt repo.
"""
# performs bgpdump on the file
bash_args = 'bgpdump -q -m -t change '
bash_args += self.path
# Cuts out columns we don't need
bash_args += ' | cut -d "|" -f2,6,7 '
# Deletes any announcements with as sets
bash_args += '|sed -e "/{.*}/d" '
# Performs regex matching with sed and adds brackets to as_path
bash_args += '-e "s/\(.*|.*|\)\(.*$\)/\\1{\\2}/g" '
# Replaces pipes and spaces with commas for csv insertion
# leaves out first one: -e "s/, / /"
bash_args += '-e "s/ /, /g" -e "s/|/\t/g" '
# Adds a column for the origin
bash_args += '-e "s/\([[:digit:]]\+\)}/\\1}\t\\1/g"'
# Rearrange columns to match for csv_to_db
bash_args += '| awk \'BEGIN {FS="\t"};{OFS="\t"};{ print '
bash_args += '$2, $3, $4, $1}\''
return bash_args
```
#### File: mrt/mrt_base/mrt_installer.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
from ....utils import utils
from ....utils.logger import config_logging
class MRT_Installer:
"""This class installs all dependencies needed for MRT_Parser.
In depth explanation at the top of module.
"""
__slots__ = []
def __init__(self, **kwargs):
"""Initializes logger and path variables."""
config_logging(kwargs.get("stream_level"), kwargs.get("section"))
def install_dependencies(self):
"""Downloads all dependencies required
That includes, bgpscanner, and bgpdump,
and of course their dependencies.
"""
self._install_bgpscanner()
self._install_bgpdump()
@utils.delete_files(["bgpscanner/", "delete_me/"])
def _install_bgpscanner(self):
"""Installs bgpscanner to /usr/bin/bgpscanner.
Also installs lzma-dev, which apparently is necessary.
"""
# Installs all deps we can do with apt
self._install_bgpscanner_deps()
# Downloads bgpscanner and modifies it for malformed announcements
# Which we need because they exist in the real world
self._download_and_modify_bgpscanner()
# Builds bgpscanner and moves it to desired location
self._build_bgpscanner()
# Installs lib_isocore which bgpscanner needs to run
self._install_lib_isocore()
@utils.delete_files("bgpdump/")
def _install_bgpdump(self):
"""Installs bgpdump and all dependencies"""
cmds = ["git clone https://github.com/RIPE-NCC/bgpdump.git",
"cd bgpdump/",
"sh ./bootstrap.sh",
"make",
"./bgpdump -T",
"sudo cp bgpdump /usr/local/bin/bgpdump"]
utils.run_cmds(cmds)
###################################
### bgpscanner Helper Functions ###
###################################
def _install_bgpscanner_deps(self):
"""Installs all bgpscanner dependencies"""
self._install_lzma_dev()
cmds = ["sudo apt -y install meson",
"sudo apt -y install zlib1g",
"sudo apt -y install zlib1g-dev",
"sudo apt-get -y install libbz2-dev",
"sudo apt-get -y install liblzma-dev",
"sudo apt-get -y install liblz4-dev",
"pip3 install meson",
"sudo apt-get -y install cmake"]
utils.run_cmds(cmds)
# Ninja has to be installed 'manually', as doing it using apt
# installs an older version that prevents this file from
# working.
# Instructions found at:
# https://www.claudiokuenzler.com/blog/756/install-newer-ninja-build-tools-ubuntu-14.04-trusty
# We begin by wgeting 1.8.2, sufficent for our needs
cmds = ["wget -q https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip",
# Unzip into /usr/local/bin
"sudo unzip ninja-linux.zip -d /usr/local/bin",
# We use update-alternatives to make a link from the
# ninja executable in /usr/local/bin/ninja to
# /etc/alternatives/ninja to /usr/bin/ninja, and apt
# will now 'recognize' ninja in /usr/bin
"sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force",
# Remove the zip file.
"rm -r ninja-linux.zip"]
@utils.delete_files("lzma-dev/")
def _install_lzma_dev(self):
"""Installs lzma-dev, needed for bgpscanner"""
cmds = ["mkdir lzma-dev",
"cd lzma-dev/",
"wget https://tukaani.org/xz/xz-5.2.4.tar.gz",
"tar -xvf xz-5.2.4.tar.gz",
"cd xz-5.2.4/",
"./configure ",
"make",
"sudo make install"]
utils.run_cmds(cmds)
def _download_and_modify_bgpscanner(self):
"""Downloads bgpscanner and modifies it.
The reason it modifies it is because it used to reject
announcements with malformed attributes. However, bgpdump
does not, and because we know that they are in actual RIBs
of ASes, it makes sense to include them in our simulation.
"""
utils.run_cmds("git clone https://gitlab.com/Isolario/bgpscanner.git")
# If this line is not changed it remove improper configurations.
# We want to keep these because they are included in the monitors
# Announcements, so they clearly are propogated throughout the
# internet.
path = "bgpscanner/src/mrtdataread.c"
prepend = ' if ('
replace = 'rib->peer->as_size == sizeof(uint32_t))'
replace_with = 'true)'
utils.replace_line(path, prepend, replace, replace_with)
def _build_bgpscanner(self):
"""Builds bgpscanner.
For some reason, meson refuses to be installed in a good location
so we need to pip install it in our python env and run from there.
"""
cmds = ["python3 -m venv delete_me",
"delete_me/bin/pip3 install wheel",
"delete_me/bin/pip3 install meson",
"cd bgpscanner",
"mkdir build && cd build",
"../../delete_me/bin/meson ..",
"cd ../../",
"cd bgpscanner/build",
"sudo ninja install",
"sudo ldconfig",
"cd ../../",
"sudo cp bgpscanner/build/bgpscanner /usr/bin/bgpscanner"]
utils.run_cmds(cmds)
# Our second server runs from here, so we need to:
utils.run_cmds(("cp bgpscanner/build/bgpscanner "
"/usr/local/bin/bgpscanner"))
def _install_lib_isocore(self):
"""This installs lib_isocore.
bgpscanner needs this to run, and apparently it does not install
properly by default so we need to build it and move it.
"""
cmds = ["cd bgpscanner/subprojects/",
"git clone https://gitlab.com/Isolario/isocore.git",
"cd isocore",
"mkdir build && cd build",
"../../../../delete_me/bin/meson ..",
"cd ../../",
"cd isocore/build",
"sudo ninja install",
"sudo ldconfig",
"cd ../../",
"cp isocore/build/libisocore.so /usr/lib/libisocore.so"]
utils.run_cmds(cmds)
```
#### File: mrt/mrt_base/tables.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
from ....utils.database import Generic_Table
class MRT_Announcements_Table(Generic_Table):
"""Class with database functionality.
In depth explanation at the top of the file."""
__slots__ = []
name = "mrt_announcements"
columns = ["prefix", "as_path", "origin", "time"]
def _create_tables(self):
"""Creates tables if they do not exist.
Called during initialization of the database class.
"""
sql = f"""CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (
prefix INET,
as_path bigint ARRAY,
origin BIGINT,
time BIGINT
);"""
self.execute(sql)
```
#### File: mrt/mrt_detailed/detailed_tables.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>, <NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
from ..database import Generic_Table
class MRT_Detailed_Table(Generic_Table):
"""Class with database functionality.
"""
__slots__ = []
name = "mrt_detailed"
#columns = ['update_type', 'prefix', 'as_path', 'origin',
# 'time', 'interval_start', 'interval_end']
columns = ['interval_end', 'interval_start', 'time', 'origin', 'prefix', 'as_path', 'update_type']
def _create_tables(self):
"""Creates tables if they do not exist.
Called during initialization of the database class.
"""
#sql = """CREATE UNLOGGED TABLE IF NOT EXISTS mrt_detailed (update_type text,
# prefix cidr,
# as_path bigint ARRAY,
# origin bigint,
# time bigint,
# interval_start bigint,
# interval_end bigint);"""
sql = """CREATE UNLOGGED TABLE IF NOT EXISTS mrt_detailed (interval_end bigint,
interval_start bigint,
time bigint,
origin bigint,
prefix cidr,
as_path bigint ARRAY,
update_type text);"""
self.execute(sql)
```
#### File: mrt/mrt_metadata/mrt_metadata_parser.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import bisect
import datetime
import logging
import os
import warnings
import psycopg2
import requests
from ....utils.base_classes import Parser
from ...roas.tables import ROAs_Table
from ..mrt_base.mrt_file import MRT_File
from ..mrt_base.mrt_installer import MRT_Installer
from ..mrt_base.mrt_sources import MRT_Sources
from ..mrt_base.tables import MRT_Announcements_Table
from .tables import Distinct_Prefix_Origins_Table
from .tables import Prefix_IDs_Table
from .tables import Origin_IDs_Table
from .tables import Prefix_Origin_IDs_Table
from .tables import Distinct_Prefix_Origins_W_IDs_Table
from .tables import Blocks_Table
from .tables import ROA_Known_Validity_Table
from .tables import ROA_Validity_Table
from .tables import Prefix_Origin_Blocks_Metadata_Table
from .tables import Prefix_Origin_Metadata_Table
from .tables import MRT_W_Metadata_Table
from ....utils import utils
class MRT_Metadata_Parser(Parser):
"""This class downloads, parses, and deletes files from Caida.
In depth explanation at the top of module.
"""
__slots__ = []
def _run(self, *args, max_block_size=400):
"""Adds metadata to MRT files and prepares for EXR insertion
1. Adds ROA state
2. Adds prefix_id (id unique to each prefix)
3. Adds monitor_asn (last ASN in the path)
4. Adds block_id (block_id for insertion into exr
needed or else it won't fit in RAM)
5. Adds block_prefix_id (prefix ids in block. Used to compare prefixes from
in block in exr for determining best path
since you can use this as a key in a list
instead of a hashmap (inside extrapolator))
6. JK, add as many indexes as you can think of. Used in Forecast,
verification, Full path, etc, so just add them all.
"""
self._validate()
self._add_prefix_origin_index()
logging.info(f"Creating {Distinct_Prefix_Origins_Table.name}")
self._get_p_o_table_w_indexes(Distinct_Prefix_Origins_Table)
# If you were a real cool cat, you would have done a compressed
# trie, finding common ancestors, to get prefix groupings
# def way faster than all this. Also more difficult.
for Table in [Origin_IDs_Table,
Prefix_IDs_Table,
Prefix_Origin_IDs_Table,
Distinct_Prefix_Origins_W_IDs_Table]:
logging.info(f"Creating {Table.__name__}")
self._get_p_o_table_w_indexes(Table)
self._create_block_table(max_block_size)
self._add_roas_index()
for Table in [ROA_Known_Validity_Table,
ROA_Validity_Table,
Prefix_Origin_Blocks_Metadata_Table,
Prefix_Origin_Metadata_Table]:
self._get_p_o_table_w_indexes(Table)
self._add_metadata()
def _validate(self):
"""Asserts that tables are filled"""
for Table in [MRT_Announcements_Table, ROAs_Table]:
with Table() as db:
err = f"{db.name} not filled"
sql = f"SELECT * FROM {db.name} LIMIT 2"
assert len(db.execute(sql)) > 0, err
def _add_prefix_origin_index(self):
"""Adds index to prefix and origin for combining with ROAs table"""
with MRT_Announcements_Table() as db:
sql = f"""CREATE INDEX IF NOT EXISTS {db.name}_po_index ON
{db.name} USING GIST(prefix inet_ops, origin)"""
self._create_index(sql, db)
sql = f"""CREATE INDEX IF NOT EXISTS {db.name}_po_btree_i ON
{db.name}(prefix inet_ops, origin);"""
self._create_index(sql, db)
def _get_p_o_table_w_indexes(self, Table):
"""Prefix origin table with indexes"""
with Table(clear=True) as db:
db.fill_table()
index_sqls = [
f"""CREATE INDEX IF NOT EXISTS {db.name}_dpo_index
ON {db.name} USING GIST(prefix inet_ops, origin)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_dist_p_index
ON {db.name} USING GIST(prefix inet_ops)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_dist_o_index
ON {db.name}(origin)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_g_index
ON {db.name}(prefix_group_id);""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_pbtree_index
ON {db.name}(prefix)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_po_btree_index
ON {db.name}(prefix, origin);"""
]
for sql in index_sqls:
try:
self._create_index(sql, db)
except psycopg2.errors.UndefinedColumn:
pass
def _create_block_table(self, max_block_size):
"""Creates iteration blocks as balanced as possible
Based on prefix, total # ann for that prefix
Needed to write a custom algo for this
but it's fine, since binpacking is already np hard
Figures out first correct number of bins, since prefixes
are most important
Then figures out which bin to place in
Largest ann_count first into smallest bin_weight
"""
class Bin:
def __init__(self, bin_id):
self.bin_id = bin_id
self.prefixes = []
self.total_weight = 0
def add_prefix(self, prefix, ann_count):
if len(self.prefixes) + 1 <= max_block_size:
self.prefixes.append(prefix)
self.total_weight += ann_count
return True
else:
return False
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.total_weight < other.total_weight
@property
def rows(self):
return [[self.bin_id, x] for x in self.prefixes]
logging.info("Getting prefix blocks")
with Prefix_IDs_Table() as db:
group_counts = [[x["prefix"], x["ann_count"]]
for x in db.get_all()]
group_counts = sorted(group_counts, key=lambda x: x[1], reverse=True)
bin_count = (len(group_counts) // max_block_size) + 1
bins = list(sorted([Bin(i) for i in range(bin_count)]))
# tbh, is this the same as just doing it in order?
# Should check this...
for i, (prefix, ann_count) in enumerate(group_counts):
for b_index, b in enumerate(bins):
if b.add_prefix(prefix, ann_count):
current_index = b_index
break
# Inserts item in sorted list correctly
# MUCH faster than sort
# https://stackoverflow.com/a/38346428/8903959
bisect.insort_left(bins, bins.pop(b_index))
block_table_rows = []
for current_bin in bins:
block_table_rows.extend(current_bin.rows)
csv_path = os.path.join(self.csv_dir, "block_table.csv")
utils.rows_to_db(block_table_rows, csv_path, Blocks_Table)
for _id in ["block_id", "prefix"]:
sql = f"""CREATE INDEX IF NOT EXISTS
{Blocks_Table.name}_{_id}
ON {Blocks_Table.name}({_id})
;"""
self._create_index(sql, db)
def _add_roas_index(self):
"""Creates an index on the roas table"""
with ROAs_Table() as db:
sql = f"""CREATE INDEX IF NOT EXISTS roas_index
ON {db.name} USING GIST(prefix inet_ops, asn);"""
self._create_index(sql, db)
def _add_metadata(self):
"""Joins prefix origin metadata with MRT Anns"""
logging.info("Adding metadata to the MRT announcements")
with MRT_W_Metadata_Table(clear=True) as db:
db.fill_table()
sql = f"""CREATE INDEX {db.name}_block_index
ON {db.name}(block_id);"""
self._create_index(sql, db)
# NOTE: you probably need other indexes on this table
# Depending on what application is being run
def _create_index(self, sql, db):
logging.info(f"Creating index on {db.name}")
db.execute(sql)
logging.info("Index complete")
################
### Old Code ###
################
def _create_block_table_w_prefix_groups(self, max_block_size):
"""Legacy code now
This can be used for creating blocks with groups
We didn't need to tackle this problem for our phd
We leave it for the next runner up.
Creates blocks for the extrapolator
1. Counts number of prefixes per group
2. Packs them into blocks with a fixed max size
3. Creates the block id table
-contains group_id, block_id
"""
logging.info("Getting prefix blocks")
with Distinct_Prefix_Origins_W_IDs_Table() as db:
sql = f"""SELECT prefix_group_id, COUNT(prefix_group_id) AS total
FROM {db.name}
GROUP BY prefix_group_id;"""
group_counts = db.execute(sql)
group_counts_dict = {x["prefix_group_id"]: x["total"]
for x in group_counts}
# Returns a list of dicts, that contains group_id: count
bins = binpacking.to_constant_volume(group_counts_dict, max_block_size)
block_table_rows = []
for block_id, current_bin in enumerate(bins):
for group_id in current_bin:
block_table_rows.append([block_id, group_id])
csv_path = os.path.join(self.csv_dir, "block_table.csv")
utils.rows_to_db(block_table_rows, csv_path, Blocks_Table)
for _id in ["block_id", "prefix_group_id"]:
sql = f"""CREATE INDEX IF NOT EXISTS
{Blocks_Table.name}_{_id} ON {Blocks_Table.name}({_id})
;"""
self._create_index(sql, db)
```
#### File: relationships/tests/test_relationships_file.py
```python
import os
import pytest
from unittest.mock import Mock, patch
from ..relationships_file import Rel_File, Rel_Types
from ..relationships_parser import Relationships_Parser
from ..tables import Provider_Customers_Table, Peers_Table
from ....utils import utils
__authors__ = ["<NAME>", "<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
@pytest.mark.relationships_parser
class Test_Relationships_File:
"""Tests all local functions within the Relationships File class."""
def setup(self):
"""Set up a Relationships File object with the Relationships Parser.
This is to get the necessary arguments to intialize the File object.
"""
# Initialize a Relationships Parser object
self.rel_par = Relationships_Parser()
# Initialize Relationships File object
self.rel_file = Rel_File(self.rel_par.path,
self.rel_par.csv_dir,
self.rel_par._get_urls()[0]) # Gets URL
def test__db_insert(self):
"""Tests the _db_insert function"""
# Download a file to use as a test
utils.download_file(self.rel_file.url,
self.rel_file.path)
# Unzip this file and assign its new path
self.rel_file.path = utils.unzip_bz2(self.rel_file.path)
_peer_count, _cust_prov_count = self._get_lines(self.rel_file.path)
# Clean up with utils so as not to contaminate test
utils.delete_paths([self.rel_file.csv_dir, self.rel_file.path])
# Make sure the counts are accurate
with Peers_Table(clear=True) as _peers:
with Provider_Customers_Table(clear=True) as _cust_provs:
Relationships_Parser().run()
assert _peer_count == _peers.get_count()
assert _cust_prov_count == _cust_provs.get_count()
def test__get_rel_attributes(self):
"""Tests the _get_rel_attributes function"""
# Grep call for finding peer relationships:
# All lines not containing '-1' or '#', delimited by tabs
_peers_grep = (r'grep -v "\-1" | grep -F -v "#" | cut -d "|" -f1,2'
' | sed -e "s/|/\t/g"')
# Grep call for finding customer-provder relationships:
# All lines containing '-1' but not '#", delimited by tabs
_cust_prov_grep = (r'grep "\-1" | grep -F -v "#" | cut -d "|"'
' -f1,2 | sed -e "s/|/\t/g"')
# Expected return value for 'grep' from this method
_exp_grep = {Rel_Types.PROVIDER_CUSTOMERS: _cust_prov_grep,
Rel_Types.PEERS: _peers_grep}
# Assume rovpp == False for table attributes call, since it makes
# no difference for testing this method
_exp_table_attr = {Rel_Types.PROVIDER_CUSTOMERS:
Provider_Customers_Table,
Rel_Types.PEERS: Peers_Table}
_grep, _csvs, _table_attr = self.rel_file._get_rel_attributes()
# Finally, make sure all output matches what is expected
assert (_grep, _table_attr) == (_exp_grep, _exp_table_attr)
def test__get_table_attributes(self):
"""Tests the _get_table_attributes function"""
# Expected output
output = {Rel_Types.PROVIDER_CUSTOMERS: Provider_Customers_Table,
Rel_Types.PEERS: Peers_Table}
# Make sure calls give expected output.
assert self.rel_file._get_table_attributes() == output
def test_parse_file(self):
"""This uses an example relationship file to test the grep commands
We use a small example relationship file, for which we know the
expected output. We check that the data in the db is equivalent to
what we expect."""
# Patch the utils.download and utils.unzip_bz2 methods and then run
# the parse_file method
dl = ("lib_bgp_data.collectors.relationships.relationships_file"
".utils.download_file")
uz = ("lib_bgp_data.collectors.relationships.relationships_file"
".utils.unzip_bz2")
with patch(dl) as dl_mock, patch(uz) as uz_mock:
dl_mock.side_effect = self._custom_download_file
uz_mock.side_effect = self._custom_unzip_bz2
self.rel_file.parse_file()
# Check the database and assure we have expected outputs for both
# the peers table and the providers_customers table
with Peers_Table() as db:
expected = [{"peer_as_1": 1, "peer_as_2": 11537},
{"peer_as_1": 1, "peer_as_2": 44222}]
result = [dict(row) for row in db.get_all()]
assert expected == result
with Provider_Customers_Table() as db:
expected = [{"provider_as": 1, "customer_as": 21616},
{"provider_as": 1, "customer_as": 34732},
{"provider_as": 1, "customer_as": 41387}]
result = [dict(row) for row in db.get_all()]
assert expected == result
########################
### Helper Functions ###
########################
def _get_lines(self, path):
"""Returns total number of lines in the file"""
with open(path) as sample:
peer_count = 0
cust_prov_count = 0
for line in sample:
if "|0|" in line:
peer_count += 1
elif "|-1|" in line:
cust_prov_count += 1
return peer_count, cust_prov_count
def _custom_download_file(self, url, path):
"""Writes a test file to where the file would normally be
downloaded
"""
test_folder = "/tmp/test_Relationships_Parser/"
if not os.path.exists(test_folder):
os.makedirs(test_folder)
test_path = test_folder + "1.decompressed"
test_file = ["1|11537|0|bgp\n",
"1|21616|-1|bgp\n",
"1|34732|-1|bgp\n",
"1|41387|-1|bgp\n",
"1|44222|0|bgp"]
with open(test_path, "w") as test:
test.writelines(test_file)
def _custom_unzip_bz2(self, path):
"""Returns the path of where the unzipped file would be"""
return "/tmp/test_Relationships_Parser/1.decompressed"
```
#### File: rpki_validator/tests/test_rpki_validator_parser.py
```python
from time import sleep
import pytest
from ..rpki_validator_parser import RPKI_Validator_Parser
from ..rpki_validator_wrapper import RPKI_Validator_Wrapper
from ..tables import ROV_Validity_Table
from ...mrt.mrt_base import MRT_Parser, MRT_Sources
from ...mrt.mrt_base.tables import MRT_Announcements_Table
from ....utils import utils
from ....utils.base_classes import ROA_Validity as Val
__authors__ = ["<NAME>, <NAME>"]
__credits__ = ["<NAME>, <NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
@pytest.mark.rpki_validator
class Test_RPKI_Validator_Parser:
"""Tests all local functions within the RPKI_Validator_Parser class."""
@pytest.fixture
def parser(self):
return RPKI_Validator_Parser()
def test_parser_test_data(self, test_table):
"""This is more of an overall system test,
since most all of the functionality exists in the wrapper.
This test will input a few prefix origins we know are invalid
(which we can tell using the roas collector) and a few we know
are valid into a test table. Then we can confirm that the output
is what we expect.
"""
with ROV_Validity_Table() as db:
def _get_val_for_origin(origin):
sql = f"""SELECT validity FROM {db.name}
WHERE origin = {origin};"""
return db.execute(sql)[0]['validity']
# see conftest.py in this dir for test_table details
RPKI_Validator_Parser()._run(table=test_table)
# sometimes unknown validity status is returned by the API
# and it doesn't get the correct one unless waited on
valid = _get_val_for_origin(0)
assert valid == Val.VALID.value or valid == Val.UNKNOWN.value
invalid = _get_val_for_origin(1)
assert invalid == Val.INVALID_BY_ORIGIN.value or invalid == Val.UNKNOWN.value
def test__format_asn_dict(self, parser):
"""Tests the format asn_dict function
Confirms that the output is what we expect for a typical entry"""
for key, value in RPKI_Validator_Wrapper.get_validity_dict().items():
d = {'asn': 'AS198051', 'prefix': '1.2.0.0/16', 'validity': key}
assert parser._format_asn_dict(d) == [198051, '1.2.0.0/16', value]
@pytest.mark.xfail(strict=True)
@pytest.mark.slow
def test_comprehensive_system(self):
"""Tests the entire system on the MRT announcements.
Test is expected to fail. RPKI Validator does not
have data on all prefix-origin pairs.
RPKI Validator also changes validity values if waited.
"""
with ROV_Validity_Table() as db:
# Run MRT_Parser to fill mrt_announcements table which will
# be used as the input table for RPKI_Validator.
input_table = MRT_Announcements_Table.name
MRT_Parser().run()
RPKI_Validator_Parser().run(table=input_table)
initial_count = db.get_count()
initial_rows = db.get_all()
# all prefix-origin pairs from input should be in val table
sql = f"""SELECT * FROM {input_table} a
LEFT JOIN {db.name} b
USING (prefix, origin)
WHERE b.prefix IS NULL;"""
assert len(db.execute(sql)) == 0
# clear validity table and run with a wait before getting data
# should be the same with and without waiting
db.clear_table()
RPKI_Validator_Parser().run(table=input_table, wait=True)
second_count = db.get_count()
second_rows = db.get_all()
assert initial_count == second_count
assert initial_rows == second_rows
```
#### File: collectors/traceroutes/tables.py
```python
import logging
from ...utils.database import Generic_Table
class ROAs_Table(Generic_Table):
"""Announcements table class"""
__slots__ = []
name = "roas"
columns = ["asn", "prefix", "max_length", "created_at"]
def _create_tables(self):
"""Creates tables if they do not exist"""
sql = """CREATE UNLOGGED TABLE IF NOT EXISTS roas (
asn bigint,
prefix cidr,
max_length integer,
created_at bigint
) ;"""
self.execute(sql)
def create_index(self):
"""Creates a bunch of indexes to be used on the table"""
logging.debug("Creating index on roas")
sql = """CREATE INDEX IF NOT EXISTS roas_index
ON roas USING GIST(prefix inet_ops)"""
self.execute(sql)
```
#### File: extrapolator/verification_parser/extrapolator_analyzer.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from .tables import Monitors_Table, Control_Monitors_Table
from ..wrappers import Extrapolator_Wrapper
from ...collectors.mrt.mrt_metadata.tables import MRT_W_Metadata_Table
from ...collectors.relationships.tables import Peers_Table
from ...collectors.relationships.tables import Provider_Customers_Table
from ...utils.base_classes import Parser
from ...utils.database import Database
class Extrapolator_Analyzer(Parser):
"""This class generates input to the extrapolator verification
In depth explanation at the top of module. Jk needs docs
"""
__slots__ = []
def _run(self, test):
with Control_Monitors_Table() as db:
table = "mrt_w_metadata"
if test:
db.execute("DROP TABLE IF EXISTS mrt_verif_test")
db.execute(f"""CREATE TABLE mrt_verif_test AS (
SELECT * FROM mrt_w_metadata WHERE block_id <= 100);""")
table = "mrt_verif_test"
rows = db.get_all()
final_results = {}
for row in rows:
final_results[row['asn']] = {}
exr_rows = []
output_tables = []
for origin_only_mode, mh_prop in [[0, 0],
[1, 0],
[0, 1]]:
output_table = f"verification_origin_only{origin_only_mode}_mh_{mh_prop}"
output_tables.append(output_table)
cmd = (f"time /usr/bin/master_extrapolator ")
cmd += f"-a {table} --store-results=0 "
cmd += (f"--full-path-asns {row['asn']} "
f"--exclude-monitor={row['asn']} "
f"--mh-propagation-mode={mh_prop} "
f"--origin-only={origin_only_mode} "
f"--log-folder=/tmp/exr-log --log-std-out=1 "
f"--select-block-id=1 "
f"--full-path-results-table {output_table}")
Extrapolator_Wrapper(**self.kwargs)._run(bash_args=cmd)
with Database() as db:
db.execute("DROP TABLE IF EXISTS control_data")
sql = f"""CREATE UNLOGGED TABLE control_data AS (
SELECT * FROM {table}
WHERE monitor_asn = %s);"""
db.execute(sql, [row['asn']])
print("Created control tbl")
for output_table in output_tables:
# I know this isn't the fastest way, but whatevs
# It's fast enough compared to the runtime of the exr
sql = f"""SELECT ctrl.as_path AS ground_truth,
out.as_path AS estimate
FROM control_data ctrl
LEFT JOIN {output_table} out
ON out.prefix_id = ctrl.prefix_id"""
results = (db.execute(sql))
distances = []
from tqdm import tqdm
# NOTE: if this is too slow, use the python-levenshtein for a c version
# And just convert ints to strs
for result in tqdm(results, total=len(results), desc="calculating levenshtein"):
if result["estimate"] is None:
distances.append(len(result["ground_truth"]))
else:
distances.append(self.levenshtein(result["ground_truth"], result["estimate"]))
from statistics import mean
final_results[row['asn']][output_table] = mean(distances)
from pprint import pprint
pprint(final_results)
agg_dict = {}
for _, outcomes in final_results.items():
for outcome_table, distance in outcomes.items():
agg_dict[outcome_table] = agg_dict.get(outcome_table, []) + [distance]
for outcome_table, outcome_list in agg_dict.items():
agg_dict[outcome_table] = mean(outcome_list)
pprint(agg_dict)
# https://stackoverflow.com/a/6709779/8903959
def levenshtein(self, a,b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
```
#### File: verification_parser/tests/test_mrt_parser.py
```python
__authors__ = ["<NAME>", "<NAME>, <NAME>"]
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pytest
import validators
import os
import filecmp
from subprocess import check_call
from .collectors import Collectors
from ..mrt_file import MRT_File
from ..mrt_parser import MRT_Parser
from ..mrt_sources import MRT_Sources
from ..tables import MRT_Announcements_Table
from ...utils import utils
from ...database import Database
@pytest.mark.mrt_parser
class Test_MRT_Parser:
"""Tests all functions within the mrt parser class."""
def setup(self):
"""NOTE: For all of your tests, run off a single time.
Do NOT get the default start and end after every test.
The reason being that the day could change, then the times will
differ.
"""
# Set times for testing purposes
self._start = 1592351995
# 6/16/2020, 23:59:55
self._end = 1592438399
# 6/17/2020, 23:59:59
# This test passes (as of 4 Apr 2020)
# However, it is recommended to test on a machine where dependencies
# are not installed.
def test___init__(self):
"""Tests initialization of the MRT parser
When dependencies are not installed, the install function
should be called. (Mock this, don't duplicate install test)
In addition, the mrt_announcement table should be cleared.
"""
# Connect to database
with Database() as db:
# Check if we warned that the dependencies are not installed
# First check if we need to install dependencies
if not os.path.exists("/usr/bin/bgpscanner"):
with pytest.warns(None) as record:
# MRT_Parser should emit a warning here
MRT_Parser()
# If no warning was given even though it should have
if not record:
pytest.fail("Warning not issued when deps not installed")
# No need to install anything
else:
# Run init
MRT_Parser()
# Check that the table exists and is empty
assert db.execute("SELECT * FROM mrt_announcements") == []
@pytest.mark.parametrize("sources, collectors", [(MRT_Sources, 5),
([], 5)])
def test_get_iso_mrt_urls(self, sources, collectors):
"""Tests getting isolario data.
Should asser that when ISOLARIO not in sources it should
return [].
Should assert that there are 5 collectors by default.
probably should parametize this function
"""
# Create a parser
test_parser = MRT_Parser()
# Get our URLs
urls = test_parser._get_iso_mrt_urls(self._start, sources)
# Assert that files is empty if ISOLARIO is not in sources
if MRT_Sources.ISOLARIO not in sources:
assert urls == []
# Verify that we have valid URLs
for url in urls:
assert validators.url(url)
# Assert that we have 5 (by default) collectors
assert len(urls) == collectors
@pytest.mark.parametrize("sources, collectors, api_param",
[(MRT_Sources, 1, Collectors.collectors_1.value),
(MRT_Sources, 2, Collectors.collectors_2.value),
(MRT_Sources, 3, Collectors.collectors_3.value),
([], 0, Collectors.collectors_0.value),
([MRT_Sources.ROUTE_VIEWS], 27, {}),
([MRT_Sources.RIPE], 21, {}),
(MRT_Sources, 48, {})])
def test_get_caida_mrt_urls(self, sources, collectors, api_param):
"""Tests getting caida data.
Should assert that when sources is just routeview that there
are X number of collectors.
Should assert that when sources is just ripe that there
are X number of collectors.
Should assert that when sources is all of them there are X
collectors
Should test api parameters and make sure they work.
Probably should parametize this function
"""
# Create a parser
test_parser = MRT_Parser()
# Get our URLS
urls = test_parser._get_caida_mrt_urls(self._start,
self._end,
sources,
api_param)
# If we have no sources, then urls should be empty.
if MRT_Sources.RIPE not in sources and MRT_Sources.ROUTE_VIEWS not in sources:
assert urls == []
# Verify we have valid URLs
for url in urls:
assert validators.url(url)
# Verify expected collectors == #urls
assert len(urls) == collectors
@pytest.mark.parametrize("sources, collectors, api_param",
[(MRT_Sources, 53, {})])
def test_get_mrt_urls(self, sources, collectors, api_param):
"""Tests getting url data.
Assert that there is 52 total collectors. Also test param mods.
Probably should parametize this function.
"""
# Create the parser
test_parser = MRT_Parser()
# Call get mrt urls
urls = test_parser._get_mrt_urls(self._start,
self._end,
api_param,
sources)
# Ensure we have proper URLs
for url in urls:
assert validators.url(url)
assert len(urls) == collectors
return urls
def test_multiprocess_download(self, url_arg=None):
"""Test multiprocess downloading of files
NOTE: Run this with just a few quick URLs
-in other words not from isolario
Test that changing number of threads doesn't break it.
Test that all files are downloaded correctly.
Test that end result is same as no multiprocessing
"""
# Create the parser
parser = MRT_Parser()
# Get URLs
urls = url_arg if url_arg is not None else(
self.test_get_mrt_urls([MRT_Sources.ROUTE_VIEWS],
3,
Collectors.collectors_3.value))
# Get MRT files
mrt_files = parser._multiprocess_download(3, urls)
# Test all files were downloaded correctly
assert len(mrt_files) == len(urls)
# Test using more threads doesn't break things
parser._multiprocess_download(5, urls)
# Test no multiprocessing, check end result
no_multi = parser._multiprocess_download(1, urls)
# Sanity check
assert len(no_multi) == len(mrt_files)
return mrt_files
def test_multiprocess_parse_dls(self, scanner=True):
"""Test multiprocess parsing of files
NOTE: Run this with just a few quick URLs
-in other words not from isolario
Test that changing number of threads doesn't break it.
Test that all files are parsed correctly. Do this by determining
the total output of all files, and make sure that the database has
that number of announcements in it.
Test that the end result would be the same without multiprocessing.
"""
# Create the parser
parser = MRT_Parser()
# Get URLs
urls = self.test_get_mrt_urls([MRT_Sources.ROUTE_VIEWS],
3,
Collectors.collectors_3.value)
# Get a few MRT files
mrt_files = self.test_multiprocess_download(urls)
# Get expected amount of lines from the files
expected_lines = self._get_total_number_of_lines(mrt_files)
print(str(expected_lines))
with Database() as db:
# Parse files
parser._multiprocess_parse_dls(3, mrt_files, scanner)
# Make sure all files were inserted
db_lines = db.execute("SELECT COUNT(*) FROM mrt_announcements")
lines = db_lines[0]['count']
assert lines == expected_lines
# Ok, return result
return lines
@pytest.mark.slow
def test_bgpscanner_vs_bgpdump_parse_dls(self):
"""Tests bgpscanner vs bgpdump
A while back we changed our tool to bgpscanner. This tool had
to be modified so that it did not ignore malformed announcements.
We want to ensure that the output of these tools are the same. To
do this we must run them over all the input files, since only some
files have these malformed announcements. Essentially, just run the
parser twice. Once with bgpscanner and once with bgpdump. Store
them into two separate database tables, and check that they are
exactly the same.
Also, don't wait while the test is running. Be working on other
tasks, as this will take hours and hours.
"""
scanner = self.test_multiprocess_parse_dls(True)
dump = self.test_multiprocess_parse_dls(False)
assert scanner == dump
def test_filter_and_clean_up_db(self):
"""Tests that this function runs without error.
No need to duplicate tests in tables.py. Make it fast.
"""
# Make our parser
parser = MRT_Parser()
# Do what is necessary to create a table to filter and clean.
urls = self.test_get_mrt_urls([MRT_Sources.ROUTE_VIEWS],
3,
Collectors.collectors_3.value)
files = parser._multiprocess_download(5, urls)
parser._multiprocess_parse_dls(5, files, True)
# Hope that we don't run into an error here.
parser._filter_and_clean_up_db(True, True)
def test_parse_files(self):
"""Test that the parse files function
Should raise a warning and parse correctly. Use API Params
and sources to ensure a fast runtime.
"""
# Make a parser
parser = MRT_Parser()
# Call and see if we get a deprecated warn.
kwargs = dict({'start': self._start, 'end': self._end,
'api_param_mods': {}, 'download_threads': 1,
'parse_threads': 1, 'IPV4': True, 'IPV6': False,
'bgpscanner': True, 'sources': []})
with pytest.deprecated_call():
parser.parse_files(**kwargs)
########################
### Helper Functions ###
########################
# From old test
def _get_total_number_of_lines(self, mrt_files, bgpscanner=True):
"""Gets total number of entries with no as sets.
A test file is created. Bgpscanner or bgpdump is used with a
simple grep to remove AS sets. The total number of lines in
this file is counted for the total number of entries in the
original MRT files.
"""
test_path = "/tmp/testfile.txt"
utils.delete_paths(test_path)
# This could be multithreaded to count into different files
# But this should only be ever run once
# And there are only two files. Idc.
for mrt_file in mrt_files:
# Remove as sets
# Must do it this way or else complains about the "{"
tool = "bgpscanner" if bgpscanner else "bgpdump"
bash_args = '{} {} | grep -v '.format(tool, mrt_file.path)
bash_args += '"{"'
bash_args += ">> {}".format(test_path)
print(bash_args)
check_call(bash_args, shell=True)
num_lines = utils.get_lines_in_file(test_path)
# Deletes the files that we no longer need
utils.delete_paths(test_path)
return num_lines
```
#### File: extrapolator/wrappers/extrapolator_wrapper.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
from multiprocessing import cpu_count
import os
from ...collectors.relationships.tables import Peers_Table
from ...collectors.relationships.tables import Provider_Customers_Table
from ...utils.base_classes import Parser
from ...utils.database import Database
from ...utils.database import config
from ...utils import utils
class Extrapolator_Wrapper(Parser):
"""This class runs the extrapolator.
In depth explanation at the top of module.
"""
__slots__ = []
default_results_table = "exr_results"
default_depref_table = "exr_results_depref"
branch = "master"
def _run(self, input_table="mrt_w_metadata", bash_args=None):
"""Runs the bgp-extrapolator and verifies input.
Installs if necessary. See README for in depth instructions.
"""
self._input_validation([input_table])
logging.info("About to run the forecast extrapolator")
# People who are in charge of extrapolator need to change this
default_bash_args = (f"{self.install_location}"
f" -a {input_table}"
f" -r {self.default_results_table}"
f" -d {self.default_depref_table}"
f" --config-section={config.global_section_header}")
utils.run_cmds(bash_args if bash_args else default_bash_args)
def _input_validation(self, input_tables: list):
"""Validates proper tables exist and exr is installed"""
logging.debug("Validating install")
self.install()
logging.debug("Validating table exist that extrapolator needs")
with Database() as db:
sql = "SELECT * FROM {} LIMIT 1;"
for input_table in input_tables:
assert len(db.execute(sql.format(input_table))) > 0,\
f"{input_table} is empty and is necessary for extrapolator"
for table in [Peers_Table.name, Provider_Customers_Table.name]:
if len(db.execute(sql.format(table))) == 0:
Relationships_Parser.run(**self.kwargs)
break
#########################
### Install Functions ###
#########################
@utils.delete_files("BGPExtrapolator/")
def install(self, force=False):
"""Installs extrapolator and dependencies"""
if os.path.exists(self.install_location) and force is False:
return
logging.warning("Due to unstable branches, installing extrapolator now")
self._install_dependencies()
self._install_extrapolator()
logging.info("Installation complete")
def _install_dependencies(self):
"""Installs dependencies that the extrapolator needs"""
bash_args = ("sudo apt install -y "
"build-essential "
"make "
"libboost-dev "
"libboost-test-dev "
"libboost-program-options-dev "
"libpqxx-dev "
"libboost-filesystem-dev "
"libboost-log-dev "
"libboost-thread-dev "
"libpq-dev")
utils.run_cmds(bash_args)
def _install_extrapolator(self):
"""Installs extrapolator and moves it to /usr/bin"""
cmds = [f"cd {self.path} ",
"git clone https://github.com/c-morris/BGPExtrapolator.git",
"cd BGPExtrapolator"]
# Sometimes dev team moves stuff to other branches
if self.branch:
cmds += [f"git checkout {self.branch}"]
cmds += [f"make -j{cpu_count()}",
"sudo make",
f"cp bgp-extrapolator {self.install_location}"]
utils.run_cmds(cmds)
@property
def install_location(self):
"""Returns install location for the extrapolator"""
return f"/usr/bin/{self.branch}_extrapolator"
```
#### File: wrappers/tests/test_rovpp_extrapolator.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pytest
from ..rovpp_extrapolator_parser import ROVPP_Extrapolator_Parser
@pytest.mark.extrapolator
@pytest.mark.rovpp
class Test_ROVPP_Extrapolator:
"""Tests all local functions within the Extrapolator class."""
@pytest.mark.skip(reason="New hires, and not finished writing")
def test_run(self):
"""Tests the run method of the extrapolator.
Run a system test
Should also run input validation tests.
Make sure that if the extrapolator fails to populate it errors
Make sure that if the table exists beforehand that it is dropped
Make sure that the ribs out table is populated
"""
pass
@pytest.mark.skip(reason="New hires, and not finished writing")
def test_install(self):
"""Tests that the extrapolator is installed.
Note: maybe just inherit from the other test class for this?
Same test but different branch that gets installed
Test if not installed that it installs
Test that can be installed twice.
Test that once installed, can run.
"""
pass
```
#### File: wrappers/tests/test_tables.py
```python
import pytest
from ..tables import ROVPP_Extrapolator_Rib_Out_Table
from ...database import Generic_Table_Test
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
@pytest.mark.skip(reason="New hires")
@pytest.mark.extrapolator
@pytest.mark.rovpp
class Test_ROVPP_Extrapolator_Ribs_Out_Table(Generic_Table_Test):
"""Tests all functions within the mrt announcements class.
Inherits from the test_generic_table class, which will test
for table creation and dropping the table.
"""
# Needed for inheritance
table_class = ROVPP_Extrapolator_Rib_Out_Table
@pytest.mark.skip(reason="new hires")
def test_fill_table(self):
"""Tests the fill table function
Run with the following inputs:
atk and vic prefixes no overlap(multiple of each)
atk and vic prefixes some overlap (multiple of each)
atk and vic prefixes complete overlap (multiple)
atk prefixes (multiple)
vic prefixes (multiple)
Make sure output is as expected. Should be ribs out.
Also make sure that tables are destroyed afterwards
Run any other checks you can think of to find bugs
"""
```
#### File: forecast/api/__init__.py
```python
from flask import Flask
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.routing import BaseConverter
from flasgger import Swagger
from ..utils import Thread_Safe_Logger as Logger
from ..database import Database
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# http://exploreflask.com/en/latest/views.html#custom-converters
class ListConverter(BaseConverter):
"""Converts a comma separated url into a list."""
def to_python(self, value):
return value.split(',')
def to_url(self, values):
return ','.join([BaseConverter.to_url(value)
for value in values])
def create_app(args={}):
"""Creates the application and runs it."""
application = Flask(__name__)
# For flasgger docs
swagger = Swagger(application)
# Fixes the proxy problems we've been having
application.wsgi_app = ProxyFix(application.wsgi_app)
# For the list converter
application.url_map.converters['list'] = ListConverter
# Creates the database
application.db = Database(Logger(args))
# Imports all the blueprints that we have
# From the flask tutorial I watched they did it all here,
# so I assume that is correct
from .averages import averages_app
from .extrapolator_engine_results import extrapolator_engine_results_app\
as exr_app
from .hijacks import hijacks_app
from .policies import policies_app
from .relationships import relationships_app
from .roas import roas_app
from .rpki_validity_results import RPKI_app
for sub_app in [averages_app, exr_app, hijacks_app, policies_app,
relationships_app, roas_app, RPKI_app]:
# Sets the database
sub_app.db = application.db
# Registers the blueprint
application.register_blueprint(sub_app)
# Runs the application. Do NOT use in prod!!!
application.run(host='0.0.0.0', debug=True)
```
#### File: forecast/api/policies.py
```python
from flask import Blueprint
from flasgger import swag_from
from .api_utils import validate_asns, validate_policies, format_json, dictify
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
policies_app = Blueprint("policies_app", __name__)
def get_policy_descriptions():
"""Gets the descriptions for each policy."""
return {"rov": ("Block route announcements using standard"
" Resource Certification (RPKI) Route Origin"
" Validation (ROV) as defined in RFC 6811"),
"invalid_asn": ("Block route announcements only when"
" the originating AS is in conflict"
" with a Route Origin Authorization"
" (ROA). Announcements containing"
" prefixes exceeding the maximum length"
" authorized by a ROA are allowed."),
"invalid_length": ("Block route announcements only"
" when the length of the announced"
" prefix exceeds the maximum length"
" authorized by a ROA."
" Announcements originating from an"
" AS that is not authorized to do"
" so are allowed.")}
def get_policy_metadata():
"""Gets the metadata for the policies route."""
metadata = {"disclaimers": ("All announcements and hijacks in this"
" dataset are covered by a ROA. In other"
" words, the prefixes are a subset of a"
" ROA.")}
metadata.update(get_policy_descriptions())
return metadata
@policies_app.route("/policy_stats/<list:asns>/<list:policies>/")
@swag_from("flasgger_docs/policy_stats.yml")
@format_json(get_policy_metadata)
def policy_stats(asns, policies):
"""Returns the policy statistics and hijack data for asns."""
# Validate asns and convert to parents
asns = validate_asns(asns, policies_app.db)
# Validate policies
policies = validate_policies(policies)
results = {policy: {} for policy in policies}
for policy in policies:
for asn in asns:
# Gets overall statistics for the policy
results[policy][str(asn)] = get_stats(asn, policy)
# Gets the hijack data for the policy
results[policy][str(asn)].update(get_hijack_data(asn, policy))
# Gets average data over all asns for the policy
results[policy].update(get_avg_stats(dictify(results[policy]), asn))
return results
def get_stats(asn, policy):
"""Gets general statistics for each policy/asn."""
sql = "SELECT * FROM {} WHERE parent_asn={}".format(policy, asn)
results = policies_app.db.execute(sql)
if len(results) == 0:
results = [{}]
return results[0]
def get_avg_stats(policy_dict, asn):
"""Gets the average statistics among all asns for each policy."""
num_asns = len(policy_dict)
average = {}
# For each column/data point:
for key in policy_dict[str(asn)]:
# Ignore parent_asn and hijack data, since avg is meaningless
if key == "parent_asn" or "hijack_data" in key:
continue
# Sums up the data point for all asns and divides by num_asns
average[key] = sum([policy_dict[x][key] for x in policy_dict])/num_asns
return {"aggregate_average_data": average}
def get_hijack_data(asn, pol):
"""Gets the hijack data for an asn and policy pol."""
info = {}
og_sql = """SELECT iabh.prefix, iabh.origin, iabh.url
FROM input iabh
LEFT OUTER JOIN
(SELECT * FROM extrapolation_inverse_results
WHERE asn={}) exr
ON exr.prefix = iabh.prefix AND exr.origin = iabh.origin
WHERE exr.prefix IS NULL AND exr.origin IS NULL;""".format(asn)
# ROV's hijack data is just the sum of invalid_asn and invalid_length
policies = ["invalid_asn", "invalid_length"] if pol == "rov" else [pol]
for policy in policies:
# What to call the hijacks
p_descr = "{}_hijack_data".format(policy)
info[p_descr] = {}
# The two kinds of hijack data
for y in ["blocked_hijacked", "not_blocked_hijacked"]:
sql = og_sql.replace("input", "{}_{}".format(policy, y))
info[p_descr]["{}_info".format(y)] = [x for x in
policies_app.db.execute(sql)]
return info
```
#### File: forecast/api/roas.py
```python
from flask import Blueprint
from flasgger import swag_from
from .api_utils import format_json
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
roas_app = Blueprint("roas_app", __name__)
@roas_app.route("/roas_data/")
@swag_from("flasgger_docs/roas.yml")
@format_json(lambda: {"description": "All ROAs used"})
def roas():
"""Returns all roas data."""
return roas_app.db.execute("SELECT * FROM roas;")
```
#### File: forecast/what_if_analysis/pre_exr_sql.py
```python
from .sql_utils import Policies, Validity
from .sql_utils import create_gist, create_index, create_btree
from .sql_utils import create_table_w_gist
from .sql_utils import create_table_w_btree
from .sql_utils import create_table_w_btree_asn
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def get_pre_exr_sql(valid_before_time):
# MUST BE IN EPOCH!!!!!!
all_sql = []
create_index_sql = []
for table in ["mrt_announcements",
"rov_validity",
"unique_prefix_origins",
"hijack_temp"]:
create_index_sql.append(create_gist(table))
create_index_sql.append(create_index("rov_validity", "validity"))
all_sql.extend(create_index_sql)
all_sql.append("VACUUM ANALYZE;")
all_policies = {Policies.ASN.value: {Validity.INVALID.value: "= -2",
Validity.VALID.value: "!= -2"},
Policies.LENGTH.value: {Validity.INVALID.value: "= -1",
Validity.VALID.value: "!= -1"},
Policies.ROV.value: {Validity.INVALID.value: "< 0",
Validity.VALID.value: ">= 0"}}
# We want:
# invalid asn:
# invalid and hijacked prefix origins
prefix_origin_sql = []
for policy, v_dict in all_policies.items():
# Gets invalid and hijacked prefix origins
prefix_origin_sql.extend(
create_table_w_gist(
"invalid_{}_hijacked_prefix_origins".format(policy),
"""SELECT DISTINCT u.prefix, u.origin, h.url FROM unique_prefix_origins u
INNER JOIN rov_validity r ON r.prefix = u.prefix AND r.origin = u.origin
INNER JOIN hijack_temp h ON h.prefix = r.prefix AND h.origin = r.origin
WHERE r.validity {}""".format(v_dict[Validity.INVALID.value])))
prefix_origin_sql.extend(
create_table_w_gist(
"invalid_{}_not_hijacked_prefix_origins".format(policy),
"""SELECT DISTINCT u.prefix, u.origin FROM unique_prefix_origins u
INNER JOIN rov_validity r ON r.prefix = u.prefix AND r.origin = u.origin
LEFT JOIN hijack_temp h ON h.prefix = r.prefix AND h.origin = r.origin
WHERE r.validity {} AND h.prefix IS NULL
""".format(v_dict[Validity.INVALID.value])))
prefix_origin_sql.extend(
create_table_w_gist(
"invalid_{}_extra_prefix_origins".format(policy),
"""SELECT DISTINCT r.prefix,
r.origin,
u.prefix AS extra_prefix,
u.origin AS extra_origin
FROM unique_prefix_origins u
INNER JOIN rov_validity r
ON r.prefix << u.prefix
OR (r.prefix = u.prefix AND r.origin != u.origin)
LEFT JOIN (
SELECT prefix, origin
FROM rov_validity
WHERE validity {0}) r2
ON r2.prefix = u.prefix AND r2.origin = u.origin
WHERE r2.prefix IS NULL AND r.validity {0}
""".format(v_dict[Validity.INVALID.value]),
extra=True))
all_sql.extend(prefix_origin_sql)
interesting_sql = []
interesting_sql.extend(
create_table_w_gist(
"interesting_prefix_origins",
"""SELECT prefix, origin FROM invalid_rov_hijacked_prefix_origins
UNION
SELECT prefix, origin FROM invalid_rov_not_hijacked_prefix_origins
UNION
SELECT extra_prefix, extra_origin FROM invalid_rov_extra_prefix_origins"""))
interesting_sql.append("VACUUM ANALYZE;")
interesting_sql.extend(
create_table_w_gist(
"interesting_ann",
"""SELECT DISTINCT ON (m.prefix, m.origin, m.time)
m.prefix,
m.as_path,
m.origin,
m.time
FROM mrt_announcements m
INNER JOIN interesting_prefix_origins i
ON i.prefix = m.prefix AND i.origin = m.origin"""))
interesting_sql.append("ALTER TABLE interesting_ann ADD COLUMN mrt_index SERIAL PRIMARY KEY;")
for col in ["time", "mrt_index"]:
interesting_sql.append(create_index("interesting_ann", col))
interesting_sql.append("VACUUM ANALYZE;")
all_sql.extend(interesting_sql)
for policy in [x.value for x in Policies.__members__.values()]:
ann_indexes_sql = [
create_table_w_btree(
"invalid_{0}_hijacked_ann_indexes".format(policy),
"""SELECT m.mrt_index FROM interesting_ann m
INNER JOIN invalid_{0}_hijacked_prefix_origins i
ON i.prefix = m.prefix AND i.origin = m.origin""".format(policy)),
create_table_w_btree(
"invalid_{0}_not_hijacked_ann_indexes".format(policy),
"""SELECT m.mrt_index FROM interesting_ann m
INNER JOIN invalid_{0}_not_hijacked_prefix_origins i
ON i.prefix = m.prefix AND i.origin = m.origin""".format(policy)),
create_table_w_btree(
"invalid_{0}_extra_ann_indexes".format(policy),
"""SELECT m.mrt_index, m2.mrt_index AS extra_mrt_index
FROM interesting_ann m
INNER JOIN invalid_{0}_extra_prefix_origins i
ON i.prefix = m.prefix AND i.origin = m.origin
INNER JOIN interesting_ann m2
ON i.extra_prefix = m2.prefix AND i.extra_origin = m2.origin""".format(policy),
extra=True)]
for table_sql in ann_indexes_sql:
all_sql.extend(table_sql)
for policy in [x.value for x in Policies.__members__.values()]:
ann_indexes_w_time_sql = [
create_table_w_btree(
"invalid_{0}_time_extra_ann_indexes".format(policy, valid_before_time),
"""SELECT m.mrt_index, m2.mrt_index AS extra_mrt_index FROM interesting_ann m
INNER JOIN invalid_{0}_extra_prefix_origins i
ON i.prefix = m.prefix AND i.origin = m.origin
INNER JOIN interesting_ann m2
ON i.extra_prefix = m2.prefix AND i.extra_origin = m2.origin
WHERE m.time > {1} AND m2.time < {1}""".format(policy, valid_before_time),
extra=True),
create_table_w_btree(
"invalid_{0}_time_hijacked_ann_indexes".format(policy, valid_before_time),
"""SELECT m.mrt_index FROM interesting_ann m
INNER JOIN invalid_{0}_hijacked_ann_indexes i
ON i.mrt_index = m.mrt_index
WHERE m.time > {1}""".format(policy, valid_before_time)),
create_table_w_btree(
"invalid_{0}_time_not_hijacked_ann_indexes".format(policy, valid_before_time),
"""SELECT m.mrt_index FROM interesting_ann m
INNER JOIN invalid_{0}_not_hijacked_ann_indexes i
ON i.mrt_index = m.mrt_index
WHERE m.time > {1}""".format(policy, valid_before_time)),
create_table_w_btree(
"invalid_{0}_time_ann_indexes".format(policy, valid_before_time),
"""SELECT mrt_index
FROM invalid_{0}_time_not_hijacked_ann_indexes
UNION
SELECT mrt_index
FROM invalid_{0}_time_hijacked_ann_indexes""".format(policy, valid_before_time)),
create_table_w_btree(
"invalid_{0}_time_extra_ann_indexes".format(policy, valid_before_time),
"""SELECT DISTINCT m.mrt_index, m2.mrt_index AS extra_mrt_index
FROM interesting_ann m
INNER JOIN invalid_{0}_time_ann_indexes i
ON i.mrt_index = m.mrt_index
INNER JOIN interesting_ann m2
ON i.mrt_index != m2.mrt_index AND
(m.prefix << m2.prefix
OR (m.prefix = m2.prefix AND m.origin != m2.origin))""".format(policy, valid_before_time),
extra=True)]
for table_sql in ann_indexes_w_time_sql:
all_sql.extend(table_sql)
return all_sql
```
#### File: lib_bgp_data/lib_bgp_data/__main__.py
```python
__authors__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from argparse import ArgumentParser, Action
from logging import DEBUG
from sys import argv
from .utils.base_classes import Parser
from .utils import config_logging
def main():
"""Does all the command line options available
See top of file for in depth description"""
parser = ArgumentParser(description="lib_bgp_data, see github")
for cls in Parser.parsers:
# This will override the argparse action class
# Now when the arg is passed, the func __call__ will be called
# In this case, __call__ is set to the parsers run method
# Note that this is dynamically creating the class using type
# https://www.tutorialdocs.com/article/python-class-dynamically.html
# https://stackoverflow.com/a/40409974
argparse_action_cls = type(cls.__name__, # Class name
(Action, ), # Classes inherited
{'__call__': cls.argparse_call()}) # __dict__
parser.add_argument(f"--{cls.__name__.lower()}",
nargs=0,
action=argparse_action_cls)
# Is this right? Can I name the class anything?
argparse_action_cls = type('Backer-upper',
(Action, ),
{'__call__': Parser.run_backupables()})
parser.add_argument('--backup',
nargs=0,
action=argparse_action_cls)
# Configure logging to be debug if passed in
# I know this should be done differently, but to make the module extendable
# Sacrafices had to be made, and destroying argparse was one of them
for i, arg in enumerate(argv):
if "--debug" == arg.lower():
config_logging(DEBUG)
argv.pop(i)
break
parser.parse_args()
if __name__ == "__main__":
main()
```
#### File: simulations/simulation_grapher/line.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from .tables import Simulation_Results_Avg_Table
class Line:
def __init__(self, policy):
self.policy = policy
self.x = []
self.y = []
self.yerr = []
def add_data(self, attr_combo_dict, x_attrs, x_axis_col, graph_type):
"""results consists of lits of RealDictRows from db
This adds db data to the line to be graphed
"""
results = None
with Simulation_Results_Avg_Table() as db:
sql = f"""SELECT * FROM {db.name}
WHERE """
for col_name, col_val in attr_combo_dict.items():
if isinstance(col_val, int):
sql += f"{col_name} = {col_val}"
elif isinstance(col_val, str):
sql += f"{col_name} = '{col_val}'"
elif col_val is None:
sql += f"{col_name} IS NULL"
else:
print(col_name, col_val)
assert False, "improper column value"
sql += " AND "
sql += f"adopt_pol = '{self.policy}'"
# Gets the name
sql += f" ORDER BY {x_axis_col}"
# Gets the list of potential x values
results = [x for x in db.execute(sql)
if x[x_axis_col] in x_attrs]
for result in results:
self.x.append(int(result[x_axis_col]))
self.y.append(float(result[graph_type]) * 100)
self.yerr.append(float(result[graph_type + "_confidence"]) * 100)
def fmt(self, line_formatter):
return line_formatter.get_format_kwargs(self.policy)
```
#### File: simulations/simulation_grapher/tables.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from ..simulator.tables import Simulation_Results_Table
from ...utils.database import Generic_Table
test_info = ["adopt_pol",
"attack_type",
"number_of_attackers",
"subtable_name",
"percent",
"round_num",
"extra_bash_arg_1",
"extra_bash_arg_2",
"extra_bash_arg_3",
"extra_bash_arg_4",
"extra_bash_arg_5"]
class Simulation_Results_Agg_Table(Generic_Table):
"""Table used to aggregate the results for graphing"""
name = "simulation_results_agg"
def fill_table(self):
global test_info
sql = f"""
CREATE UNLOGGED TABLE {self.name} AS (
SELECT
{", ".join(test_info)},
--adopting traceback
trace_hijacked_adopting::decimal / trace_total_adopting::decimal AS trace_hijacked_adopting,
(trace_blackholed_adopting::decimal + no_rib_adopting::decimal)::decimal / trace_total_adopting::decimal AS trace_disconnected_adopting,
trace_nothijacked_adopting::decimal / trace_total_adopting::decimal AS trace_connected_adopting,
--collateral traceback
trace_hijacked_collateral::decimal / trace_total_collateral::decimal AS trace_hijacked_collateral,
(trace_blackholed_collateral::decimal + no_rib_collateral::decimal)::decimal / trace_total_collateral::decimal AS trace_disconnected_collateral,
trace_nothijacked_collateral::decimal / trace_total_collateral::decimal AS trace_connected_collateral,
--adopting control plane
c_plane_has_attacker_prefix_origin_adopting::decimal / trace_total_adopting::decimal AS c_plane_hijacked_adopting,
(c_plane_has_bhole_adopting::decimal + no_rib_adopting::decimal)::decimal / trace_total_adopting::decimal AS c_plane_disconnected_adopting,
c_plane_has_only_victim_prefix_origin_adopting::decimal / trace_total_adopting::decimal AS c_plane_connected_adopting,
--collateral control plane
c_plane_has_attacker_prefix_origin_collateral::decimal / trace_total_collateral::decimal AS c_plane_hijacked_collateral,
(c_plane_has_bhole_collateral::decimal + no_rib_collateral::decimal)::decimal / trace_total_collateral::decimal AS c_plane_disconnected_collateral,
c_plane_has_only_victim_prefix_origin_collateral::decimal / trace_total_collateral::decimal AS c_plane_connected_collateral,
--adopting hidden hijacks
visible_hijacks_adopting::decimal / trace_total_adopting::decimal AS visible_hijacks_adopting,
(trace_hijacked_adopting::decimal - visible_hijacks_adopting::decimal)::decimal / trace_total_adopting::decimal AS hidden_hijacks_adopting,
--collateral hidden hijacks
visible_hijacks_collateral::decimal / trace_total_collateral::decimal AS visible_hijacks_collateral,
(trace_hijacked_collateral::decimal - visible_hijacks_collateral::decimal)::decimal / trace_total_collateral::decimal AS hidden_hijacks_collateral
FROM {Simulation_Results_Table.name}
WHERE trace_total_adopting > 0 AND trace_total_collateral > 0
);"""
self.execute(sql)
class Simulation_Results_Avg_Table(Generic_Table):
"""Table used to get the confidence intervals for graphing"""
name = "simulation_results_avg"
def fill_table(self):
global test_info
sql = f"""
CREATE UNLOGGED TABLE {self.name} AS (
SELECT
{", ".join(test_info)},
--adopting traceback
AVG(trace_hijacked_adopting) AS trace_hijacked_adopting,
(1.96 * STDDEV(trace_hijacked_adopting))::decimal / SQRT(COUNT(*))::decimal AS trace_hijacked_adopting_confidence,
AVG(trace_disconnected_adopting) AS trace_disconnected_adopting,
(1.96 * STDDEV(trace_disconnected_adopting))::decimal / SQRT(COUNT(*))::decimal AS trace_disconnected_adopting_confidence,
AVG(trace_connected_adopting) AS trace_connected_adopting,
(1.96 * STDDEV(trace_connected_adopting))::decimal / SQRT(COUNT(*))::decimal AS trace_connected_adopting_confidence,
--collateral traceback
AVG(trace_hijacked_collateral) AS trace_hijacked_collateral,
(1.96 * STDDEV(trace_hijacked_collateral))::decimal / SQRT(COUNT(*))::decimal AS trace_hijacked_collateral_confidence,
AVG(trace_disconnected_collateral) AS trace_disconnected_collateral,
(1.96 * STDDEV(trace_disconnected_collateral))::decimal / SQRT(COUNT(*))::decimal AS trace_disconnected_collateral_confidence,
AVG(trace_connected_collateral) AS trace_connected_collateral,
(1.96 * STDDEV(trace_connected_collateral))::decimal / SQRT(COUNT(*))::decimal AS trace_connected_collateral_confidence,
--adopting control plane
AVG(c_plane_hijacked_adopting) AS c_plane_hijacked_adopting,
(1.96 * STDDEV(c_plane_hijacked_adopting))::decimal / SQRT(COUNT(*))::decimal AS c_plane_hijacked_adopting_confidence,
AVG(c_plane_disconnected_adopting) AS c_plane_disconnected_adopting,
(1.96 * STDDEV(c_plane_disconnected_adopting))::decimal / SQRT(COUNT(*))::decimal AS c_plane_disconnected_adopting_confidence,
AVG(c_plane_connected_adopting) AS c_plane_connected_adopting,
(1.96 * STDDEV(c_plane_connected_adopting))::decimal / SQRT(COUNT(*))::decimal AS c_plane_connected_adopting_confidence,
--collateral control plane
AVG(c_plane_hijacked_collateral) AS c_plane_hijacked_collateral,
(1.96 * STDDEV(c_plane_hijacked_collateral))::decimal / SQRT(COUNT(*))::decimal AS c_plane_hijacked_collateral_confidence,
AVG(c_plane_disconnected_collateral) AS c_plane_disconnected_collateral,
(1.96 * STDDEV(c_plane_disconnected_collateral))::decimal / SQRT(COUNT(*))::decimal AS c_plane_disconnected_collateral_confidence,
AVG(c_plane_connected_collateral) AS c_plane_connected_collateral,
(1.96 * STDDEV(c_plane_connected_collateral))::decimal / SQRT(COUNT(*))::decimal AS c_plane_connected_collateral_confidence,
--adopting hidden hijacks
AVG(visible_hijacks_adopting) AS visible_hijacks_adopting,
(1.96 * STDDEV(visible_hijacks_adopting))::decimal / SQRT(COUNT(*))::decimal AS visible_hijacks_adopting_confidence,
AVG(hidden_hijacks_adopting) AS hidden_hijacks_adopting,
(1.96 * STDDEV(hidden_hijacks_adopting))::decimal / SQRT(COUNT(*))::decimal AS hidden_hijacks_adopting_confidence,
--collateral hidden hijacks
AVG(visible_hijacks_collateral) AS visible_hijacks_collateral,
(1.96 * STDDEV(visible_hijacks_collateral))::decimal / SQRT(COUNT(*))::decimal AS visible_hijacks_collateral_confidence,
AVG(hidden_hijacks_collateral) AS hidden_hijacks_collateral,
(1.96 * STDDEV(hidden_hijacks_collateral))::decimal / SQRT(COUNT(*))::decimal AS hidden_hijacks_collateral_confidence
FROM {Simulation_Results_Agg_Table.name}
GROUP BY
{", ".join(test_info)}
);"""
self.execute(sql)
```
#### File: simulator/attacks/attack_classes.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from .attack import Attack
class Subprefix_Hijack(Attack):
"""Hijack where there is a ROA for default_prefix"""
attacker_prefixes = [Attack.default_subprefix]
victim_prefixes = [Attack.default_prefix]
class Prefix_Hijack(Attack):
"""Hijack where there is a ROA for default_prefix"""
attacker_prefixes = [Attack.default_prefix]
victim_prefixes = [Attack.default_prefix]
class Prefix_Superprefix_Hijack(Attack):
"""Hijack where there is a ROA for default_prefix"""
attacker_prefixes = [Attack.default_prefix, Attack.default_superprefix]
victim_prefixes = [Attack.default_prefix]
class Unannounced_Hijack(Attack):
"""Should be inherited, unnanounced attacks"""
victim_prefixes = []
class Unannounced_Prefix_Hijack(Unannounced_Hijack):
"""Hijack where there is a ROA for default_prefix"""
attacker_prefixes = [Attack.default_prefix]
class Unannounced_Subprefix_Hijack(Unannounced_Hijack):
"""Hijack where there is a ROA for default_prefix"""
attacker_prefixes = [Attack.default_prefix, Attack.default_subprefix]
class Unannounced_Prefix_Superprefix_Hijack(Unannounced_Hijack):
"""Hijack where there is a ROA for default_prefix"""
attacker_prefixes = [Attack.default_prefix, Attack.default_superprefix]
class Naive_Origin_Hijack(Attack):
victim_prefixes = [Attack.default_prefix]
attacker_prefixes = [Attack.default_prefix]
# True victim is true origin
def _get_as_path(self, true_asn, true_victim, true_attacker):
# Victim path
if true_asn == true_victim:
return [true_victim]
# Attacker path
else:
return [true_attacker, true_victim]
class Intermediate_AS_Hijack_1(Attack):
victim_prefixes = [Attack.default_prefix]
attacker_prefixes = [Attack.default_prefix]
reserved_asns = [65534, 65533, 65532, 65531, 65530]
number_intermediate_ases = 1
# True victim is true origin
def _get_as_path(self, true_asn, true_victim, true_attacker):
if true_asn == true_victim:
return [true_victim]
# Attacker path
else:
return [true_attacker] + self.reserved_asns[:self.number_intermediate_ases] + [true_victim]
class Intermediate_AS_Hijack_2(Intermediate_AS_Hijack_1):
number_intermediate_ases = 2
class Intermediate_AS_Hijack_3(Intermediate_AS_Hijack_1):
number_intermediate_ases = 3
class Intermediate_AS_Hijack_4(Intermediate_AS_Hijack_1):
number_intermediate_ases = 4
class Intermediate_AS_Hijack_5(Intermediate_AS_Hijack_1):
number_intermediate_ases = 5
```
#### File: simulations/simulator/simulator.py
```python
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
from .attacks import Attack
from .data_point_info import Data_Point
from .multiline_tqdm import Multiline_TQDM
from .subtables import Subtables
from .tables import Simulation_Results_Table
from ..enums import Non_Default_Policies
from ...collectors import AS_Rank_Website_Parser
from ...collectors.as_rank_website.tables import AS_Rank_Table
from ...collectors import Relationships_Parser
from ...collectors.relationships.tables import ASes_Table
from ...extrapolator import Simulation_Extrapolator_Wrapper as Sim_Exr
from ...utils.base_classes import Parser
class Simulator(Parser):
"""This class simulates Attack defend scenarios on the internet.
In depth explanation at the top of the file
"""
def _run(self,
percents=[1],
num_trials=2,
exr_bash=None, # For development only
exr_cls=Sim_Exr, # For development only
seeded_trial=None,
deterministic=False,
attack_types=Attack.runnable_attacks[:1],
adopt_policies=list(Non_Default_Policies.__members__.values())[:1],
# These args prob don't change for most cases
number_of_attackers=[1],
rounds=1,
extra_bash_args_1=[None],
extra_bash_args_2=[None],
extra_bash_args_3=[None],
extra_bash_args_4=[None],
extra_bash_args_5=[None],
edge_hijack=True,
etc_hijack=False,
top_100_hijack=False,
redownload_base_data=False):
"""Runs Attack/Defend simulation.
In depth explanation at top of module.
"""
self._validate_input(num_trials,
rounds,
number_of_attackers,
extra_bash_args_1,
extra_bash_args_2,
extra_bash_args_3,
extra_bash_args_4,
extra_bash_args_5)
if redownload_base_data:
# Download as rank, relationships, extrapolator
# Separate function for development ease
self._redownload_base_data()
# Clear the table that stores all trial info
with Simulation_Results_Table(clear=True) as _:
pass
# Gets the subdivisions of the internet to track
tables = Subtables(percents, edge_hijack, etc_hijack, top_100_hijack)
tables.fill_tables()
# All data points that we want to graph
data_pts = [Data_Point(tables, i, percent, self.csv_dir, deterministic)
for i, percent in enumerate(percents)]
# Total number of attack/defend scenarios for tqdm
total = self._total(data_pts,
attack_types,
number_of_attackers,
adopt_policies,
num_trials,
extra_bash_args_1,
extra_bash_args_2,
extra_bash_args_3,
extra_bash_args_4,
extra_bash_args_5)
with Multiline_TQDM(total) as pbars:
for trial in range(num_trials):
for data_pt in data_pts:
data_pt.get_data(pbars,
attack_types,
number_of_attackers,
adopt_policies,
trial,
seeded_trial=seeded_trial,
exr_bash=exr_bash,
exr_kwargs=self.kwargs,
exr_cls=exr_cls,
rounds=rounds,
extra_bash_args_1=extra_bash_args_1,
extra_bash_args_2=extra_bash_args_2,
extra_bash_args_3=extra_bash_args_3,
extra_bash_args_4=extra_bash_args_4,
extra_bash_args_5=extra_bash_args_5)
tables.close()
def _validate_input(self,
trials,
rounds,
number_of_attackers,
extra_bash_args_1,
extra_bash_args_2,
extra_bash_args_3,
extra_bash_args_4,
extra_bash_args_5):
if trials <= 1:
logging.warning("Need at least 2 trials for conf intervals")
assert rounds >= 1, "Need at least 1 round"
assert number_of_attackers == [1], "No. Just no."
for extra_bash_args in [extra_bash_args_1,
extra_bash_args_2,
extra_bash_args_3,
extra_bash_args_4,
extra_bash_args_5]:
assert isinstance(extra_bash_args, list)
if extra_bash_args != [None]:
for x in extra_bash_args:
assert isinstance(x, int)
def _redownload_base_data(self, Exr_Cls=Sim_Exr):
"""Downloads/creates data, tools, and indexes for simulator
Tools: Extrapolator with speficied branch
Data: Relationships data, AS Rank data
Indexes: ASes_Table, AS_Rank_Table (for creating top_100_ases)
"""
if Exr_Cls is not None:
# forces new install of extrapolator
Exr_Cls(**self.kwargs).install(force=True)
# Gets relationships table
Relationships_Parser(**self.kwargs)._run()
# Get as rank data
AS_Rank_Website_Parser()._run(random_delay=True
)
# Index to speed up Top_100_ASes_Table.fill_table
# The following indexes were considered:
# ases(asn), as_rank(asn), as_rank(as_rank), as_rank(asn, as_rank)
# Analysis concluded any one of the above would be sufficient.
# Could change in the future if they become useful elsewhere.
with AS_Rank_Table() as db:
db.execute(f"CREATE INDEX ON {db.name}(as_rank);")
def _total(self,
data_pts,
attack_types,
number_of_attackers,
adopt_pols,
trials,
extra_bash_args_1,
extra_bash_args_2,
extra_bash_args_3,
extra_bash_args_4,
extra_bash_args_5):
"""tqdm runs off every possible attack/defend scenario
This includes: attack type, adopt policy, percent, trial
This way we can jump to any attack/defend scenario quickly
Multiline is to display as much info as possible
Get total number of scenarios (Test class objects that will be run).
This value will be used in the tqdm
"""
total = 0
for data_pt in data_pts:
for test in data_pt.get_possible_tests(attack_types,
number_of_attackers,
adopt_pols,
0, # trial
extra_bash_args_1,
extra_bash_args_2,
extra_bash_args_3,
extra_bash_args_4,
extra_bash_args_5,
set_up=False):
total += trials
return total
```
#### File: tests/system_tests/test_special_cases.py
```python
import pytest
from .graph_tester import Graph_Tester
#from ..tables import Hijack
from ....enums import Non_Default_Policies, Policies, Data_Plane_Conditions as Conds
from ...attacks.attack_classes import Subprefix_Hijack
from ...attacks.attack import Attack
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class Test_Special_Cases(Graph_Tester):
"""Tests all example graphs within our paper."""
def test_v2_customer_blackhole(self):
r"""
55
/ \
44 3
/ \
666 77
Here we're testing that v2 ASes should not create blackhole announcements for attack
announcements received from a customer, but rather just drop and blackhole the announcement.
That can be capture here as 55 and 44 implementing ASes. AS 44 should not have a blackhole, but
AS 55 should have a blackhole.
"""
attack_types = [Subprefix_Hijack]
adopt_policies = [Non_Default_Policies.ROVPP_V2]
peer_rows = []
provider_customer_rows = [[55, 44],
[55, 3],
[3, 666],
[3, 77]]
# Set adopting rows
bgp_ases = [3, 666, 77]
rov_adopting_ases = []
rovpp_adopting_ases = [55, 44]
adopting_rows = []
for bgp_as in bgp_ases:
adopting_rows.append([bgp_as, Policies.DEFAULT.value, False])
# for adopting_as in rov_adopting_ases:
# adopting_rows.append([adopting_as, Policies.ROV.value, True])
for adopting_as in rovpp_adopting_ases:
adopting_rows.append([adopting_as, Policies.ROVPP_V2.value, True])
attacker = 666
victim = 77
exr_output = [
{'asn': 77, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 64514},
{'asn': 77, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 64514},
{'asn': 44, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 55},
{'asn': 55, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 3},
{'asn': 55,
'origin': 64512,
'prefix': '1.2.3.0/24',
'received_from_asn': 64512},
{'asn': 3, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 77},
{'asn': 3, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 666},
{'asn': 666, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 3},
{'asn': 666,
'origin': 666,
'prefix': '1.2.3.0/24',
'received_from_asn': 64513}
]
self._test_graph(attack_types=attack_types,
adopt_policies=adopt_policies,
peer_rows=peer_rows,
provider_customer_rows=provider_customer_rows,
adopting_rows=adopting_rows,
attacker=attacker,
victim=victim,
exr_output=exr_output)
def test_v2_customer_peer_and_provider(self):
r"""
55 --- 88
/ \ \
22 33 44
/ \
666 77
"""
attack_types = [Subprefix_Hijack]
adopt_policies = [Non_Default_Policies.ROVPP_V2]
peer_rows = [[55, 88]]
provider_customer_rows = [[55, 22],
[55, 33],
[33, 666],
[33, 77],
[88, 44]]
# Set adopting rows
bgp_ases = [33, 22, 44, 666, 77, 88]
rov_adopting_ases = []
rovpp_adopting_ases = [55]
adopting_rows = []
for bgp_as in bgp_ases:
adopting_rows.append([bgp_as, Policies.DEFAULT.value, False])
for adopting_as in rovpp_adopting_ases:
adopting_rows.append([adopting_as, Policies.ROVPP_V2.value, True])
attacker = 666
victim = 77
exr_output = [
{'asn': 666, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 33},
{'asn': 666,
'origin': 666,
'prefix': '1.2.3.0/24',
'received_from_asn': 64513},
{'asn': 44, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 88},
{'asn': 77, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 64514},
{'asn': 77, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 64514},
{'asn': 55, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 33},
{'asn': 55,
'origin': 64512,
'prefix': '1.2.3.0/24',
'received_from_asn': 64512},
{'asn': 22, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 55},
{'asn': 88, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 55},
{'asn': 33, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 77},
{'asn': 33, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 666}
]
self._test_graph(attack_types=attack_types,
adopt_policies=adopt_policies,
peer_rows=peer_rows,
provider_customer_rows=provider_customer_rows,
adopting_rows=adopting_rows,
attacker=attacker,
victim=victim,
exr_output=exr_output)
```
#### File: simulator/tests/test_multiline_tqdm.py
```python
__authors__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import inspect
import pytest
from ..multiline_tqdm import Multiline_TQDM
from ..attacks.attack import Attack
from ..attacks.attack_classes import Subprefix_Hijack
from ...enums import Non_Default_Policies
@pytest.mark.simulator
class Test_Multiline_TQDM:
"""Tests all functions within the multiline tqdm class."""
def test___init___and_close(self):
"""Tests initialization of the Multiline TQDM
NOTE: please close it after completion
"""
# Create multiline tqdm object
ml_tqdm = Multiline_TQDM(total_trials=10)
# Assert instantiation variables
assert len(ml_tqdm.pbars) == len(ml_tqdm._get_desc())
assert ml_tqdm.total_trials == 10
assert ml_tqdm.current_trial == 0
for pbar in ml_tqdm.pbars:
assert pbar.total == 10
# Close the multiline tqdm object
ml_tqdm.close()
def test_context_manager(self):
"""Tests using the multiline tqdm as a context manager"""
# Create multiline tqdm object using context manager
with Multiline_TQDM(total_trials=10) as ml_tqdm:
# Assert instantiaion variables
assert len(ml_tqdm.pbars) == len(ml_tqdm._get_desc())
assert ml_tqdm.total_trials == 10
assert ml_tqdm.current_trial == 0
# Check instantiation of individual pbars
for pbar in ml_tqdm.pbars:
assert pbar.total == 10
def test_update(self):
"""Tests calling the update func in multiline tqdm.
make sure that the tqdm bars are all incrimented.
"""
# Create multiline tqdm object using context manager
with Multiline_TQDM(total_trials=1) as ml_tqdm:
# Assert that initial value of each pbar is 0
for pbar in ml_tqdm.pbars:
assert pbar.n == 0
# Update the pbars
ml_tqdm.update()
# Assert that the values were actually updated
assert ml_tqdm.current_trial == 1
for pbar in ml_tqdm.pbars:
assert pbar.n == 1
def test_update_extrapolator(self):
"""Tests calling the update extrapolator func.
make sure that extrapolator running is changed to false.
"""
# Create multiline tqdm object using context manager
with Multiline_TQDM(total_trials=1) as ml_tqdm:
# Assert that the extraploator-running desccription is True
assert ml_tqdm.pbars[-1].desc[:-2].strip() == "Extrapolator Running: True"
# Call the set_desc() method, this is necessary for us to be able
# to call the update_extrapolator() method. We'll pass in 'None'
# for the args for now
# https://stackoverflow.com/a/41188411
params = inspect.signature(Multiline_TQDM.set_desc).parameters
num_params = len(params)
# Subtract 2 since we don't need the self and exr_running args
args = [None for _ in range(num_params-2)]
ml_tqdm.set_desc(*args)
# Call the update_extrapolator method and make sure that
# extrapolator running is changed to False
ml_tqdm.update_extrapolator()
assert ml_tqdm.pbars[-1].desc[:-2].strip() == "Extrapolator Running: False"
def test_set_desc(self):
"""Tests the set_desc function.
Parametize this, test with exr running and not
make sure all text updates properly
"""
# Create multiline tqdm object using context manager
with Multiline_TQDM(total_trials=1) as ml_tqdm:
# Arguments that will be supplied to the set_desc method
adopt_pol = Non_Default_Policies.BGP
attack = Subprefix_Hijack("1.2.0.0/16",
"1.2.3.0/24")
percent = 50
barg_1 = "barg1"
barg_2 = "barg2"
barg_3 = "barg3"
barg_4 = "barg4"
barg_5 = "barg5"
exr_running = True
# Call set_desc() method
ml_tqdm.set_desc(adopt_pol,
percent,
attack,
barg_1,
barg_2,
barg_3,
barg_4,
barg_5,
exr_running)
# Assert that appropriate variables were instantiated
assert ml_tqdm.adopt_pol == adopt_pol
assert ml_tqdm.percent == percent
assert ml_tqdm.attack == attack
assert ml_tqdm.extra_bash_arg_1 == barg_1
assert ml_tqdm.extra_bash_arg_2 == barg_2
assert ml_tqdm.extra_bash_arg_3 == barg_3
assert ml_tqdm.extra_bash_arg_4 == barg_4
assert ml_tqdm.extra_bash_arg_5 == barg_5
# Get the descs that were generated
descs = ml_tqdm._get_desc(adopt_pol,
percent,
attack,
barg_1,
barg_2,
barg_3,
barg_4,
barg_5,
exr_running)
# Assert that each tqdm bar in multiline tqdm object has
# the appropriate description
for pbar, desc in zip(ml_tqdm.pbars, descs):
# When we set a description for a tqdm bar, ": " is added.
# Need to trim this off when checking for equality
assert pbar.desc[:-2] == desc
def test_get_desc(self):
"""Tests the _get_desc function
Make sure text is returned properly, both for None and real vals
"""
# Create multiline tqdm object using context manager
with Multiline_TQDM(total_trials=1) as ml_tqdm:
# Expected output if no args are supplied to _get_desc()
expected_none = ["Attack_cls: NoneType ",
"Adopt Policy: ",
"Adoption Percentage: ",
"Attacker: ",
"Victim: ",
"Extra_bash_arg_1: ",
"Extra_bash_arg_2: ",
"Extra_bash_arg_3: ",
"Extra_bash_arg_4: ",
"Extra_bash_arg_5: ",
"Extrapolator Running: True "]
# Expected output if args are supplied to _get_desc()
expected_real = ["Attack_cls: Subprefix_Hijack ",
"Adopt Policy: BGP ",
"Adoption Percentage: 50 ",
"Attacker: 1.2.3.0/24 ",
"Victim: 1.2.0.0/16 ",
"Extra_bash_arg_1: barg1 ",
"Extra_bash_arg_2: barg2 ",
"Extra_bash_arg_3: barg3 ",
"Extra_bash_arg_4: barg4 ",
"Extra_bash_arg_5: barg5 ",
"Extrapolator Running: False "]
# Arguments that will be supplied to the _get_desc() method
adopt_pol = Non_Default_Policies.BGP
attack = Subprefix_Hijack("1.2.0.0/16",
"1.2.3.0/24")
percent = 50
barg_1 = "barg1"
barg_2 = "barg2"
barg_3 = "barg3"
barg_4 = "barg4"
barg_5 = "barg5"
# Check if expected outputs matches actual outputs
for exp, desc in zip(expected_none, ml_tqdm._get_desc()):
assert exp == desc
for exp, desc in zip(expected_real,
ml_tqdm._get_desc(adopt_pol,
percent,
attack,
barg_1,
barg_2,
barg_3,
barg_4,
barg_5,
False)):
assert exp == desc
```
#### File: base_classes/tests/test_enumerable_enum.py
```python
import pytest
from ..enumerable_enum import Enumerable_Enum
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
@pytest.mark.base_classes
class Test_Enumerable_Enum:
"""Tests all local functions within the Enumerable_enum class."""
@pytest.mark.skip(reason="work for new hires")
def test_list_values(self):
"""Test that the values of the enum are listed"""
pass
```
#### File: base_classes/tests/test_parser.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pytest
from ..parser import Parser
@pytest.mark.base_classes
class Test_Parser:
"""Tests all functions within the Parser class."""
@pytest.mark.skip(reason="New hire work")
def test_init_subclass(self):
"""Test the __init_sublcass method.
Make sure that all inherited classes are in the parsers list.
"""
def test__innit__(self):
"""Tests init function.
Should have a section.
Logging should be configured.
path and csv directories should be created and empty
should fail if _run not present, and vice versa.
"""
class Foo(Parser):
pass
with pytest.raises(AssertionError):
f = Foo()
# defaults
class Subparser(Parser):
def _run(self):
pass
sp = Subparser()
assert sp.kwargs['section'] == 'test'
assert logging.root.level == logging.INFO
path = '/tmp/test_Subparser'
csv_dir = '/dev/shm/test_Subparser'
assert not os.listdir(path)
assert not os.listdir(csv_dir)
# reset, otherwise logging can only be configured once
logging.root.handlers = []
# with kwargs
stream_level = logging.ERROR
path = './foo'
csv_dir = './csv'
sp = Subparser(stream_level=stream_level, path=path, csv_dir=csv_dir)
assert logging.root.level == logging.ERROR
assert not os.listdir(path)
assert not os.listdir(csv_dir)
def assert_cleanup(self, parser):
assert not os.path.exists(parser.path)
assert not os.path.exists(parser.csv_dir)
@pytest.mark.skip(reason="New hire work")
def test_run(self):
"""Tests the run function
One test where there is an exception - do not raise, but log
-test should still clean out dirs
One test should be where there is no exception
-tests should still clean out dirs
"""
@pytest.mark.skip(reason="New hire work")
def test_end_parser(self):
"""tests end_parser func
Make's sure that dirs are cleaned out. Don't worry about the time.
"""
def test_argparse_call(self):
"""Tests argparse call method.
See how __main__.py uses this function. Read the docstrings.
Attempt to have a class be able to be called with this. Make
sure that it works.
"""
# Add Foo_Parser to an existing parser file
# Run it, and assert its _run function is called
# this gets us up to /base_classes
p = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
p = os.path.join(os.path.dirname(p), 'bgpstream_website_parser',
'bgpstream_website_parser.py')
with open(p, 'r') as f:
og_cpy = f.read()
sample = 'sample.txt'
code = ("class Foo_Parser(Parser):\n\tdef _run(self):\n\t\t"
f"with open('{sample}', 'w+') as f: f.write('abc')")
with open(p, 'a') as f:
f.write('\n')
f.write(code)
os.system('lib_bgp_data --foo_parser')
with open(sample, 'r') as f:
assert f.read() == 'abc'
with open(p, 'w') as f:
f.write(og_cpy)
os.remove(sample)
```
#### File: utils/database/config.py
```python
__authors__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import time
import logging
import os
import pytest
import configparser
from configparser import NoSectionError, ConfigParser as SCP
from psutil import virtual_memory
def set_global_section_header(section=None):
global global_section_header
if section is not None:
global_section_header = section
# More readable to write it this way imo
elif "global_section_header" in globals() and global_section_header is not None:
pass
else:
global_section_header = "bgp"
return global_section_header
class Config:
"""Interact with config file"""
__slots__ = []
path = "/etc/bgp/bgp.conf"
def __init__(self, section: str = None):
"""Initializes path for config file."""
# Declare the section header to be global so Database can refer to it
set_global_section_header(section)
@property
def section(self):
global global_section_header
return global_section_header
def create_config(self, _password: str):
"""Creates the default config file."""
# Do this here so that ram is set correctly
restart = self.restart_postgres_cmd
# Creates the /etc/bgp directory
self._create_config_dir()
# Removes old conf section
self._remove_old_config_section(self.section)
# Gets ram on the machine
_ram = int((virtual_memory().available / 1000 / 1000) * .9)
logging.info(f"Setting ram to {_ram}")
# Conf info
_config = SCP()
_config.read(self.path)
_config[self.section] = {"host": "localhost",
"database": self.section,
"password": <PASSWORD>,
"user": self.section + "_user",
"ram": _ram,
"restart_postgres_cmd": restart}
# Writes the config
with open(self.path, "w+") as config_file:
_config.write(config_file)
def _create_config_dir(self):
"""Creates the /etc/bgp directory."""
try:
os.makedirs(os.path.split(self.path)[0])
except FileExistsError:
logging.debug(f"{os.path.split(self.path)[0]} exists, "
"not creating new directory")
@classmethod
def _remove_old_config_section(cls, section: str):
"""Removes the old config file if it exists."""
# Initialize ConfigParser
_conf = SCP()
# Read from .conf file
_conf.read(cls.path)
# Try to delete the section
try:
del _conf[section]
# If it doesn' exist, doesn't matter
except KeyError:
return
# Otherwise, write the change to the file
with open(cls.path, "w+") as configfile:
_conf.write(configfile)
def _read_config(self, section: str, tag: str, raw: bool = False):
"""Reads the specified section from the configuration file."""
for i in range(10):
try:
_parser = SCP()
_parser.read(self.path)
string = _parser.get(section, tag, raw=raw)
try:
return int(string)
except ValueError:
return string
except configparser.ParsingError:
logging.warning("Config parsing error. Potentially someone else edited config. Retrying")
time.sleep(2)
def get_db_creds(self, error=False) -> dict:
"""Returns database credentials from the config file."""
try:
# section = "bgp"
subsections = ["user", "host", "database"]
args = {x: self._read_config(self.section, x) for x in subsections}
args["password"] = self._read_config(self.section,
"password",
raw=True)
return args
except NoSectionError:
if error:
raise NoSectionError
self.install()
return self.get_db_creds()
def install(self) -> dict:
"""Installs the database section"""
try:
return self.get_db_creds(error=True)
except Exception as e:#NoSectionError as e:
# Database section is not installed, install it
# Needed here due to circular imports
from .postgres import Postgres
Postgres(section=self.section).install(self.section)
self.__init__(self.section)
return self.get_db_creds()
@property
def ram(self) -> int:
"""Returns the amount of ram on a system."""
return self._read_config(self.section, "ram")
@property
def restart_postgres_cmd(self) -> str:
"""Returns restart postgres cmd or writes it if none exists."""
subsection = "restart_postgres_cmd"
try:
cmd = self._read_config(self.section, subsection)
except NoSectionError:
typical_cmd = "sudo systemctl restart [email protected]"
prompt = ("Enter the command to restart postgres\n"
f"Enter: {typical_cmd}\n"
"Custom: Enter cmd for your machine\n")
# https://stackoverflow.com/a/58866220
if "PYTEST_CURRENT_TEST" in os.environ:
return typical_cmd
print(os.environ)
cmd = input(prompt)
if cmd == "":
cmd = typical_cmd
return cmd
def _write_to_config(self, section, subsection, string):
"""Writes to a config file."""
_conf = SCP()
_conf.read(self.path)
_conf[section][subsection] = str(string)
with open(self.path, 'w') as configfile:
_conf.write(configfile)
```
#### File: utils/database/generic_table.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import inspect
import warnings
import logging
from multiprocessing import cpu_count
from subprocess import check_call
import os
import time
from .database import Database
# SHOULD INHERIT DECOMETA!
class Generic_Table(Database):
"""Interact with the database"""
__slots__ = ["name"]
def __init__(self, *args, **kwargs):
"""Asserts that name is set
Makes sure sql queries are formed properly"""
assert hasattr(self, "name"), "Inherited class MUST have a table name attr"
unlogged_err = ("Create unlogged tables for speed.\n Ex:"
"CREATE UNLOGGED TABLE IF NOT EXISTS {self.name}...")
# https://stackoverflow.com/a/427533/8903959
if "create table" in inspect.getsource(self.__class__):
raise Exception(unlogged_err + "\n And also capitalize SQL")
if "CREATE TABLE" in inspect.getsource(self.__class__):
raise Exception(unlogged_err)
super(Generic_Table, self).__init__(*args, **kwargs)
def get_all(self) -> list:
"""Gets all rows from table"""
return self.execute(f"SELECT * FROM {self.name}")
def get_count(self, sql: str = None, data: list = []) -> int:
"""Gets count from table"""
sql = sql if sql else f"SELECT COUNT(*) FROM {self.name}"
return self.execute(sql, data)[0]["count"]
def clear_table(self):
"""Clears the table"""
logging.debug(f"Dropping {self.name} Table")
self.cursor.execute(f"DROP TABLE IF EXISTS {self.name}")
logging.debug(f"{self.name} Table dropped")
def copy_table(self, path: str):
"""Copies table to a specified path"""
logging.debug(f"Copying file from {self.name} to {path}")
self.execute(f"COPY {self.name} TO %s DELIMITER '\t';", [path])
logging.debug("Copy complete")
def filter_by_IPV_family(self, IPV4: bool, IPV6: bool, col="prefix"):
"""Filters the data by IPV family"""
logging.info("Filtering by IPV family")
for num, ipv_bool in zip([4, 6], [IPV4, IPV6]):
if not ipv_bool:
logging.debug(f"Deleting IPV{num} from {self.name}")
sql = f"DELETE FROM {self.name} WHERE family({col}) = {num};"
self.execute(sql)
logging.debug(f"IPV{num} deleted from mrt_announcements")
@property
def columns(self) -> list:
"""Returns the columns of the table
used in utils to insert csv into the database"""
sql = """SELECT column_name FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = %s;
"""
self.cursor.execute(sql, [self.name])
# Make sure that we don't get the _id columns
return [x['column_name'] for x in self.cursor.fetchall()
if "_id" not in x['column_name']]
```
#### File: selenium_related/tests/test_sel_driver.py
```python
__authors__ = ["<NAME>"]
__credits__ = ["<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from urllib3.exceptions import MaxRetryError
import pytest
from ..sel_driver import Selenium_Driver
def driver_is_closed(driver):
"""Determines whether the driver is closed or not.
A driver instance will error if it is closed
and the title is tried to be found.
"""
try:
driver.title
return False
except MaxRetryError:
return True
@pytest.mark.asrank_website_parser
class TestSeleniumDriver:
"""Tests all functions within the SeleniumDriver class."""
def test_init_driver(self):
"""Tests producing creating a headless selenium driver."""
with Selenium_Driver() as sel_driver:
assert sel_driver._driver is not None
def test_get_page(self):
"""Tests getting the dynamic html of a url.
Checks that the dynamic html is fetched by looking
for the dynamic class name within the HTML.
"""
url = 'https://asrank.caida.org'
timeout = 20
dynamic_class = 'asrank-row-org'
soup = Selenium_Driver().get_page(url,
timeout=timeout,
dynamic_class_name=dynamic_class)
assert soup.find_all("td", {'class': dynamic_class}) != []
def test_close(self):
"""Test if the driver instance correctly closes."""
driver = Selenium_Driver()
driver.close()
assert driver_is_closed(driver._driver)
```
#### File: utils/tests/test_logger.py
```python
__authors__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pytest
import logging
import os
import sys
import io
import contextlib
import warnings
from datetime import datetime
from ..logger import config_logging, _get_log_path
from ...database import config
@pytest.mark.logging
class Test_Logging:
"""Tests all local functions within the logger.py file."""
def test_config_logging(self):
"""Tests the config logging function
Should log to proper section
should be able to change the section and have it log properly
Should log to a file and stdout
Shouldn't change when you run the func twice
Should be able to change the default log level
Should capture warnings
"""
# Make a test logger
config_logging()
# Get the path of the log file
path = _get_log_path(config.global_section_header)
# Clear it
open(path, 'w').close()
# Prepare to capture stdout
output = io.StringIO()
# Try to catch a warning
with contextlib.redirect_stdout(output):
logging.warning("Test warning")
# Check that stdout is not none
assert output.getvalue() is not None
# Check file is not none
with open(path, 'r') as f:
assert f.read() is not None
f.close()
# Clear the file again
open(path, 'w').close()
# Set logger to only log warning and worse, and change the section
config_logging(logging.WARNING, "test")
# Get the new path
path = _get_log_path("test")
# Send info
logging.info("Test info. You should not see this.")
# Check to see that it's empty
with open(path, 'r') as f:
assert f.read() == ''
f.close()
def test_get_log_path(self):
"""Tests get log path func
Make sure output is what's expected.
Make sure can create log dir.
"""
section = "log_test"
fname = f"{section}_{datetime.now().strftime('%Y_%m_%d')}.log"
log_path = _get_log_path(section)
assert os.path.exists("/var/log/lib_bgp_data")
assert log_path == os.path.join("/var/log/lib_bgp_data", fname)
return log_path
```
#### File: lib_bgp_data/utils/utils.py
```python
__authors__ = ["<NAME>", "<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from contextlib import contextmanager
import csv
from datetime import datetime, timedelta
import fileinput
import functools
import json
import logging
from multiprocessing import cpu_count
import os
from subprocess import check_call, DEVNULL
import sys
import time
import smtplib
from email.message import EmailMessage
from bs4 import BeautifulSoup as Soup
from bz2 import BZ2Decompressor
import gzip
from pathos.multiprocessing import ProcessingPool
import pytz
import requests
import urllib
import shutil
from psutil import process_iter
from signal import SIGTERM
# This decorator deletes paths before and after func is called
def delete_files(files=[]):
"""This decorator deletes files before and after a function.
This is very useful for installation procedures.
"""
def my_decorator(func):
@functools.wraps(func)
def function_that_runs_func(self, *args, **kwargs):
# Inside the decorator
# Delete the files - prob don't exist yet
delete_paths(files)
# Run the function
stuff = func(self, *args, **kwargs)
# Delete the files if they do exist
delete_paths(files)
return stuff
return function_that_runs_func
return my_decorator
@contextmanager
def Pool(threads: int, multiplier: int, name: str):
"""Context manager for pathos ProcessingPool"""
# Creates a pool with threads else cpu_count * multiplier
p = ProcessingPool(threads if threads else cpu_count() * multiplier)
logging.debug(f"Created {name} pool")
yield p
# Need to clear due to:
# https://github.com/uqfoundation/pathos/issues/111
p.close()
p.join()
p.clear()
def low_overhead_log(msg: str, level: int):
"""Heavy multiprocessed stuff should not log...
This is because the overhead is too great, even with
rotating handlers and so on. To fix this, we impliment this func
This func by default has level set to None. If level is not None
we know a heavy multiprocess func is running. If level is debug, we
print. Otherwise we do not.
"""
# Not running heavy parallel processes
if level is None:
logging.debug(msg)
# logging.DEBUG is 10, but I can't write that because then
# it might restart logging again and deadlock
elif level == 10:
print(msg)
def write_to_stdout(msg: str, log_level: int, flush=True):
# Note that we need log level here, since if we are doing
# This only in heaavily parallel processes
# For which we do not want the overhead of logging
if log_level <= 20:
sys.stdout.write(msg)
sys.stdout.flush()
# tqdm fails whenever large multiprocess operations take place
# We don't want 60 print statements every time we run multiprocess things
# So instead I wrote my own progress bar
# This prints a progress bar, func should write to sys.stdout to incriment
# This works well with multiprocessing for our applications
# https://stackoverflow.com/a/3160819/8903959
@contextmanager
def progress_bar(msg: str, width: int):
log_level = logging.root.level
write_to_stdout(f"{datetime.now()}: {msg} X/{width}",
log_level,
flush=False)
write_to_stdout("[%s]" % (" " * width), log_level)
write_to_stdout("\b" * (width+1), log_level)
yield
write_to_stdout("]\n", log_level)
def now():
"""Returns current time"""
# https://stackoverflow.com/a/7065242
return pytz.utc.localize(datetime.utcnow())
def get_default_start() -> int:
"""Gets default start time, used in multiple places."""
return int((now()-timedelta(days=2)).replace(hour=0,
minute=0,
second=0,
microsecond=0
).timestamp() - 5)
def get_default_end() -> int:
"""Gets default end time, used in multiple places."""
# NOTE: Should use the default start for this method
return int((now()-timedelta(days=2)).replace(hour=23,
minute=59,
second=59,
microsecond=59
).timestamp())
def download_file(url: str,
path: str,
file_num=1,
total_files=1,
sleep_time=0,
progress_bar=False,
verify=True):
"""Downloads a file from a url into a path.
Verify: SSL certificate"""
log_level = logging.root.level
if progress_bar: # mrt_parser or multithreaded app running, disable log
logging.root.handlers.clear()
logging.shutdown()
low_overhead_log(f"Downloading\n Path:{path}\n Link:{url}\n", log_level)
# This is to make sure that the network is not bombarded with requests
time.sleep(sleep_time)
retries = 10
while retries > 0:
try:
# Code for downloading files off of the internet
import ssl
ctx = ssl._create_unverified_context() if verify \
else ssl.create_default_context()
with urllib.request.urlopen(url, timeout=60, context=ctx)\
as response, open(path, 'wb') as out_file:
# Copy the file into the specified file_path
shutil.copyfileobj(response, out_file)
low_overhead_log(f"{file_num} / {total_files} downloaded",
log_level)
if progress_bar:
incriment_bar(log_level)
return
# If there is an error in the download this will be called
# And the download will be retried
except Exception as e:
retries -= 1
time.sleep(5)
if retries <= 0 or "No such file" in str(e):
logging.error(f"Failed download {url}\nDue to: {e}")
sys.exit(1)
def incriment_bar(log_level: int):
# Needed here because mrt_parser can't log
if log_level <= 20: # INFO
sys.stdout.write("#")
sys.stdout.flush()
else:
sys.stdout.flush()
def delete_paths(paths):
"""Removes directory if directory, or removes path if path"""
if not paths:
paths = []
# If a single path is passed in, convert it to a list
if not isinstance(paths, list):
paths = [paths]
for path in paths:
try:
remove_func = os.remove if os.path.isfile(path) else shutil.rmtree
remove_func(path)
# If the path is a file
if os.path.isfile(path):
# Delete the file
os.remove(path)
# If the path is a directory
if os.path.isdir(path):
# rm -rf the directory
shutil.rmtree(path)
# Just in case we always delete everything at the end of a run
# So some files may not exist anymore
except AttributeError:
logging.debug(f"Attribute error when deleting {path}")
except FileNotFoundError:
logging.debug(f"File not found when deleting {path}")
except PermissionError:
logging.warning(f"Permission error when deleting {path}")
def clean_paths(paths):
"""If path exists remove it, else create it"""
# If a single path is passed in, convert it to a list
if not isinstance(paths, list):
paths = [paths]
delete_paths(paths)
for path in paths:
# Fix this later
os.makedirs(path, mode=0o777, exist_ok=False)
def unzip_bz2(old_path: str) -> str:
"""Unzips a bz2 file from old_path into new_path and deletes old file"""
new_path = f"{old_path[:-4]}.decompressed"
with open(new_path, 'wb') as new_file, open(old_path, 'rb') as file:
decompressor = BZ2Decompressor()
for data in iter(lambda: file.read(100 * 1024), b''):
new_file.write(decompressor.decompress(data))
logging.debug(f"Unzipped a file: {old_path}")
delete_paths(old_path)
return new_path
def unzip_gz(path):
# https://stackoverflow.com/a/44712152/8903959
with gzip.open(path, 'rb') as f_in:
with open(path.replace(".gz", ""), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def write_csv(rows: list, csv_path: str):
"""Writes rows into csv_path, a tab delimited csv"""
logging.debug(f"Writing to {csv_path}")
delete_paths(csv_path)
with open(csv_path, mode='w') as temp_csv:
csv_writer = csv.writer(temp_csv,
delimiter='\t',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
# Writes all the information to a csv file
# Writing to a csv then copying into the db
# is the fastest way to insert files
csv_writer.writerows(rows)
def csv_to_db(Table, csv_path: str, clear_table=False):
"""Copies csv into table and deletes csv_path
Copies tab delimited csv into table and deletes csv_path
Table should inherit from Database class and have name attribute and
columns attribute
NOTE: I know there is a csv_dict_writer or something like that. However
I think because we do this on such a massive scale, this will surely be
slower, and the columns should always remain the same. So for speed,
we just use list of lists of rows to copy, not list of dicts of rows."""
with Table() as t:
if clear_table:
t.clear_table()
t._create_tables()
# No logging for mrt_announcements, overhead slows it down too much
logging.debug(f"Copying {csv_path} into the database")
try:
# Opens temporary file
with open(r'{}'.format(csv_path), 'r') as f:
columns = [x for x in t.columns if x != "id"]
# Copies data from the csv to the db, this is the fastest way
t.cursor.copy_from(f, t.name, sep='\t', columns=columns, null="")
t.cursor.execute("CHECKPOINT;")
except Exception as e:
print(e)
print(csv_path)
input()
raise e
# No logging for mrt_announcements, overhead slows it down too much
logging.debug(f"Done inserting {csv_path} into the database")
delete_paths(csv_path)
def rows_to_db(rows: list, csv_path: str, Table, clear_table=True):
"""Writes rows to csv and from csv to database"""
write_csv(rows, csv_path)
csv_to_db(Table, csv_path, clear_table)
def get_tags(url: str, tag: str, verify=True):
"""Gets the html of a given url, and returns a list of tags"""
# SHOULD NOT HAVE NO VERIFY BUT ISOLARIO GIVING SSL ERRORS
response = requests.get(url, verify=verify)
# Raises an exception if there was an error
response.raise_for_status()
# Get all tags within the beautiful soup from the html and return them
tags = [x for x in Soup(response.text, 'html.parser').select(tag)]
response.close()
return tags
def get_json(url: str, headers={}):
"""Gets the json from a url"""
# Formats request
req = urllib.request.Request(url, headers=headers)
# Opens request
with urllib.request.urlopen(req) as url:
# Gets data from the json in the url
return json.loads(url.read().decode())
def get_lines_in_file(filename: str) -> int:
"""Returns the number of lines in a given file"""
with open(filename, 'r') as f:
for count, line in enumerate(f):
pass
return count + 1
def run_cmds(cmds, timeout=None):
cmd = " && ".join(cmds) if isinstance(cmds, list) else cmds
kwargs = {"shell": True}
# If logging is greater than or equal to info
if logging.root.level >= 20:
kwargs.update({"stdout": DEVNULL, "stderr": DEVNULL})
if timeout is not None:
kwargs["timeout"] = timeout
logging.debug(f"Running: {cmd}")
check_call(cmd, **kwargs)
def replace_line(path, prepend, line_to_replace, replace_with):
"""Replaces a line withing a file that has the path path"""
lines = [prepend + x for x in [line_to_replace, replace_with]]
for line in fileinput.input(path, inplace=1):
line = line.replace(*lines)
sys.stdout.write(line)
def send_email(subject, body, recipients=[]):
"""Sends an email notification"""
# Get the adress and password from the environment variables
email_address = os.environ.get("BGP_EMAIL_USER")
password = <PASSWORD>("BGP_EMAIL_PASS")
assert isinstance(recipients, list)
# Build the message
message = EmailMessage()
message["Subject"] = subject
message["From"] = email_address
message["To"] = ", ".join([email_address] + recipients)
message.set_content(body)
# Send the message
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(email_address, password)
smtp.send_message(message)
def kill_port(port: int, wait: bool = True):
for proc in process_iter():
for conns in proc.connections(kind='inet'):
if conns.laddr.port == port:
proc.send_signal(SIGTERM) # or SIGKILL
# Sometimes the above doesn't do it's job
run_cmds(f"sudo kill -9 $(lsof -t -i: {port})")
if wait:
time.sleep(120)
def add_cronjob(name, time, executable, overwrite=False):
"""Creates a cronjob of name, that runs executable at (cron) time."""
cronjob = f'/etc/cron.d/{name}'
if not os.path.exists(cronjob) or overwrite:
run_cmds(f'echo "{time} root {executable}" > {cronjob}')
```
|
{
"source": "jfuruness/lib_bgp_simulator_engine",
"score": 3
}
|
#### File: lib_bgp_simulator_engine/lib_bgp_simulator_engine/bgp_as.py
```python
from copy import deepcopy
from .announcement import Announcement as Ann
from .base_as import AS
from .incoming_anns import IncomingAnns
from .relationships import Relationships
class BGPAS(AS):
__slots__ = []
def propogate_to_providers(self):
"""Propogates to providers"""
send_rels = set([Relationships.SEEDED, Relationships.CUSTOMERS])
self._propogate(Relationships.PROVIDERS, send_rels)
def propogate_to_customers(self):
"""Propogates to customers"""
send_rels = set([Relationships.SEEDED,
Relationships.PEERS,
Relationships.PROVIDERS])
self._propogate(Relationships.CUSTOMERS, send_rels)
def propogate_to_peers(self):
"""Propogates to peers"""
send_rels = set([Relationships.SEEDED,
Relationships.CUSTOMERS])
self._propogate(Relationships.PEERS, send_rels)
def _propogate(self, propogate_to: Relationships, send_rels: list):
"""Propogates announcements from local rib to other ASes
send_rels is the relationships that are acceptable to send
Later you can change this so it's not the local rib that's
being sent. But this is just proof of concept.
"""
for as_obj in getattr(self, propogate_to.name.lower()):
for prefix, ann in self.local_rib.items():
if ann.recv_relationship in send_rels:
incoming_anns = as_obj.incoming_anns.get(prefix, [])
incoming_anns.append(deepcopy(ann))
as_obj.incoming_anns[prefix] = incoming_anns
def process_incoming_anns(self, recv_relationship: Relationships):
"""Process all announcements that were incoming from a specific rel"""
for prefix, ann_list in self.incoming_anns.items():
# Add to the AS path of all announcements incoming
for ann in ann_list:
ann.as_path = (self.asn, *ann.as_path)
# Get announcement currently in local rib
best_ann = self.local_rib.get(prefix)
# If there is no announcement currently in local rib,
if best_ann is None:
# Make the best announcement be the first one in the list
best_ann = ann_list[0]
# And assign the priority to it
self._assign_priority(best_ann, recv_relationship)
# If that was the only ann, just continue
if len(ann_list) == 1:
self.local_rib[prefix] = best_ann
continue
# For each announcement that was incoming
for ann in ann_list:
# Assign priority and relationship
# Without assigning priority, we would have to recalculate it
# For the best ann every time
# Also, without priority as an int it's a bunch of if's
self._assign_priority(ann, recv_relationship)
# If the new announcement is better, save it
# Don't bother tiebreaking, if priority is same, keep existing
# Just like normal BGP
# Tiebreaking with time and such should go into the priority
# If we ever decide to do that
if best_ann < ann:
best_ann = ann
# Save to local rib
self.local_rib[prefix] = best_ann
self.incoming_anns = IncomingAnns()
def _assign_priority(self, ann: Ann, recv_relationship: Relationships):
"""Assigns the priority to an announcement according to Gao Rexford"""
ann.recv_relationship = recv_relationship
# Relationship
# Path length
# 100 - is to invert the as_path so that longer paths are worse
assert len(as_path) < 100
ann.priority = recv_relationship.value * 100 + (100 - len(ann.as_path))
```
#### File: defaults/hijacks/prefix_hijack_vic_ann.py
```python
from ..enums import ASNs, Prefixes, Timestamps
from ....announcement import Announcement
from ....roa_validity import ROAValidity
class PrefixHijackVicAnn(Announcement):
"""Default hijack victim announcement"""
def __init__(self,
prefix=Prefixes.PREFIX.value,
timestamp=Timestamps.VICTIM.value,
as_path=(ASNs.VICTIM.value,),
roa_validity=ROAValidity.VALID):
super(PrefixHijackVicAnn, self).__init__(prefix=prefix,
timestamp=timestamp,
as_path=as_path,
roa_validity=roa_validity)
```
#### File: tests/utils/customer_provider_link.py
```python
class CustomerProviderLink:
"""Customer provider link for unit tests
Without this provider and customer order are easily swapped
"""
# Kwargs is used here to prevent args to avoid out of order problems
def __init__(self, **kwargs):
self.customer = kwargs["customer"]
self.provider = kwargs["provider"]
```
#### File: tests/utils/run_example.py
```python
from datetime import datetime
from .graph_writer import write_graph
from ...bgp_as import BGPAS
from ...simulator_engine import SimulatorEngine
# tmp_path is a pytest fixture
def run_example(tmp_path,
peers=list(),
customer_providers=list(),
as_types=dict(),
as_classes_dict={0: BGPAS},
announcements=list(),
local_ribs=dict(),
):
"""Runs an example"""
path = tmp_path / "example.tsv"
write_graph(peers, customer_providers, as_types, path)
print("populating engine")
start = datetime.now()
engine = SimulatorEngine(str(path), as_classes_dict)
print((start-datetime.now()).total_seconds())
print("Running engine")
start = datetime.now()
engine.run(announcements, clear=False)
input((start-datetime.now()).total_seconds())
for as_obj in engine:
print("ASN:", as_obj.asn)
for prefix, ann in as_obj.local_rib.items():
print(ann)
if local_ribs:
as_obj.local_rib.assert_eq(local_ribs[as_obj.asn])
```
|
{
"source": "jfuruness/lib_bgp_simulator",
"score": 2
}
|
#### File: lib_bgp_simulator/announcements/announcement.py
```python
import dataclasses
from yamlable import YamlAble, yaml_info, yaml_info_decorate
from ..enums import Relationships
# Because of the two issues below, we MUST use
# unsafe_hash and not frozen
# We can't use their backports
# Because we inherit announcements
# So the slots in an announcement != vars
# When python3.10 is supported by pypy3, we can
# use Frozen properly
# https://stackoverflow.com/q/55307017/8903959
# https://bugs.python.org/issue45520
@yaml_info(yaml_tag="Announcement")
@dataclasses.dataclass(unsafe_hash=True)
class Announcement(YamlAble):
"""MRT Announcement"""
# I ran tests for 50 trials for 20% adoption for ROV
# On average, with slots and pypy, an average of 207 seconds
# But without slots, more RAM, but 175 seconds
# I ran a lot of these trials and this was very consistent
# extremely low standard deviation. Maybe 1s difference across trials
# My thought is twofold. One is that maybe object creation with slots
# Is slightly slower, but less RAM with faster attr access
# Maybe this has to do with deepcopy doing better without slots
# Or maybe pypy is more optimized without slots since it's more common
# No reason to figure out exactly why, but turning off slots for
# Slightly more RAM but slightly faster trials makes sense to me since
# We have very few prefixes per attack. If that ever changes
# Maybe it makes sense to add this back
# Additionally, without slots, I tried making it according to the RFC
# I added dicts for simulation attrs, path attrs, transitive, non trans
# But this was much slower, 228 seconds
# NOTE: All of this was when we were deep copying announcements instead of
# Creating new ones from scratch. After creating new ones from scratch,
# Trials were more than twice as fast, completing between 90-95s without
# slots
# With slots, around 88-89s per trial. Not much of a difference, but also
# less ram
# And it's just faster. We'd have to do larger timing tests to find out
# more
# NOTE: also add prefix_id reasoning to design_decisions
__slots__ = ("prefix", "timestamp", "as_path", "roa_valid_length",
"roa_origin",
"recv_relationship", "seed_asn", "withdraw", "traceback_end")
# NOTE: can't have defaults due to slots. Sorry man
# https://stackoverflow.com/a/50180784/8903959
prefix: str
as_path: tuple
timestamp: int
seed_asn: int
roa_valid_length: bool
roa_origin: int
recv_relationship: Relationships
withdraw: bool
traceback_end: bool
communities: tuple
def __init_subclass__(cls, *args, **kwargs):
"""This method essentially creates a list of all subclasses
This is allows us to easily assign yaml tags
"""
super().__init_subclass__(*args, **kwargs)
yaml_info_decorate(cls, yaml_tag=cls.__name__)
def __eq__(self, other):
if isinstance(other, Announcement):
return dataclasses.asdict(self) == dataclasses.asdict(other)
else:
return NotImplemented
def prefix_path_attributes_eq(self, ann):
"""Checks prefix and as path equivalency"""
if ann is None:
return False
elif isinstance(ann, Announcement):
return (ann.prefix, ann.as_path) == (self.prefix, self.as_path)
else:
raise NotImplementedError
def copy(self, **extra_kwargs):
"""Creates a new ann with proper sim attrs"""
kwargs = {"seed_asn": None, "traceback_end": False}
kwargs.update(extra_kwargs)
return dataclasses.replace(self, **kwargs)
@property
def invalid_by_roa(self) -> bool:
"""Returns True if Ann is invalid by ROA
False means ann is either valid or unknown
"""
# Not covered by ROA, unknown
if self.roa_origin is None:
return False
else:
return self.origin != self.roa_origin or not self.roa_valid_length
@property
def valid_by_roa(self) -> bool:
"""Returns True if Ann is valid by ROA
False means ann is either invalid or unknown
"""
return self.origin == self.roa_origin and self.roa_valid_length
@property
def unknown_by_roa(self) -> bool:
"""Returns True if ann is not covered by roa"""
return self.origin is None
@property
def covered_by_roa(self):
"""Returns if an announcement has a roa"""
return not self.unknown_by_roa
@property
def roa_routed(self):
return self.roa_origin != 0
@property
def origin(self) -> int:
return self.as_path[-1]
def __str__(self):
return f"{self.prefix} {self.as_path} {self.recv_relationship}"
##############
# Yaml funcs #
##############
def __to_yaml_dict__(self):
""" This optional method is called when you call yaml.dump()"""
return dataclasses.asdict(self)
@classmethod
def __from_yaml_dict__(cls, dct, yaml_tag):
""" This optional method is called when you call yaml.load()"""
return cls(**dct)
```
#### File: lib_bgp_simulator/announcements/gen_superprefix_ann.py
```python
from .announcement import Announcement as Ann
from .generate_ann import generate_ann
from ..enums import Prefixes, Timestamps
def gen_attacker_superprefix_ann(AnnCls,
attacker_asn: int,
**extra_kwargs) -> Ann:
return generate_ann(AnnCls,
attacker_asn,
Prefixes.SUPERPREFIX.value,
Timestamps.ATTACKER.value,
roa_valid_length=None,
roa_origin=None,
**extra_kwargs)
```
#### File: engine/ann_containers/ann_container.py
```python
import pprint
from yamlable import YamlAble, yaml_info_decorate
class AnnContainer(YamlAble):
"""Container for announcement that has slots and equality"""
__slots__ = "_info",
def __init_subclass__(cls, *args, **kwargs):
"""This method essentially creates a list of all subclasses
This is allows us to easily assign yaml tags
"""
super().__init_subclass__(*args, **kwargs)
yaml_info_decorate(cls, yaml_tag=cls.__name__)
def __eq__(self, other):
# Remove this after updating the system tests
if isinstance(other, self.__class__):
return self._info == other._info
else:
return NotImplemented
def __str__(self):
"""Returns contents of the container as str"""
# https://stackoverflow.com/a/521545/8903959
return pprint.pformat(self._info, indent=4)
def __to_yaml_dict__(self):
""" This optional method is called when you call yaml.dump()"""
return self._info
@classmethod
def __from_yaml_dict__(cls, dct, yaml_tag):
""" This optional method is called when you call yaml.load()"""
return cls(_info=dct)
```
#### File: bgp/bgp_simple_as/gao_rexford.py
```python
from typing import Optional
from .....announcements import Announcement as Ann
from .....enums import Relationships
opt_bool = Optional[bool]
def _new_ann_better(self,
current_ann: Optional[Ann],
current_processed: bool,
default_current_recv_rel: Relationships,
new_ann: Ann,
new_processed: Relationships,
default_new_recv_rel: Relationships,
**kwargs) -> opt_bool:
"""Assigns the priority to an announcement according to Gao Rexford
NOTE: processed is processed for second ann"""
# Can't assert this here due to passing new_ann as None
# msg = "Should have been removed in the validation func"
# assert self.asn not in new_ann.as_path, msg
new_rel_better: opt_bool = self._new_rel_better(current_ann,
current_processed,
default_current_recv_rel,
new_ann,
new_processed,
default_new_recv_rel,
**kwargs)
if new_rel_better is not None:
return new_rel_better
else:
return self._new_as_path_ties_better(current_ann,
current_processed,
new_ann,
new_processed,
**kwargs)
def _new_as_path_ties_better(self,
current_ann: Optional[Ann],
current_processed: bool,
new_ann: Ann,
new_processed: bool,
**kwargs) -> opt_bool:
new_as_path_shorter: opt_bool = self._new_as_path_shorter(
current_ann,
current_processed,
new_ann,
new_processed,
**kwargs)
if new_as_path_shorter is not None:
return new_as_path_shorter
else:
return self._new_wins_ties(current_ann,
current_processed,
new_ann,
new_processed,
**kwargs)
def _new_rel_better(self,
current_ann: Optional[Ann],
current_processed: bool,
default_current_recv_rel: Relationships,
new_ann: Ann,
new_processed: bool,
default_new_recv_rel: Relationships,
**kwargs) -> opt_bool:
if current_ann is None:
return True
elif new_ann is None:
return False
else:
# Get relationship of current ann
if current_processed:
current_rel: Relationships = current_ann.recv_relationship
else:
current_rel: Relationships = default_current_recv_rel
# Get relationship of new ann. Common case first
if not new_processed:
new_rel: Relationships = default_new_recv_rel
else:
new_rel: Relationships = new_ann.recv_relationship
if current_rel.value > new_rel.value:
return False
elif current_rel.value < new_rel.value:
return True
else:
return None
def _new_as_path_shorter(self,
current_ann: Optional[Ann],
current_processed: bool,
new_ann: Ann,
new_processed: bool,
**kwargs) -> opt_bool:
current_as_path_len = len(current_ann.as_path) + int(not current_processed)
new_as_path_len: int = len(new_ann.as_path) + int(not new_processed)
if current_as_path_len < new_as_path_len:
return False
elif current_as_path_len > new_as_path_len:
return True
else:
return None
def _new_wins_ties(self,
current_ann,
current_processed,
new_ann,
new_processed,
**kwargs) -> bool:
# Gets the indexes of the neighbors
current_index = min(int(current_processed), len(current_ann.as_path) - 1)
new_index = min(int(new_processed), len(new_ann.as_path) - 1)
return new_ann.as_path[new_index] < current_ann.as_path[current_index]
```
#### File: lib_bgp_simulator/engine_input/non_routed_superprefix_hijack.py
```python
from .engine_input import EngineInput
from ..announcements import gen_attacker_superprefix_ann
class NonRoutedSuperprefixHijack(EngineInput):
__slots__ = tuple()
def _get_announcements(self, **extra_ann_kwargs):
return [gen_attacker_superprefix_ann(self.AnnCls,
self.attacker_asn,
**extra_ann_kwargs)]
```
#### File: lib_bgp_simulator/engine/simulator_engine.py
```python
from typing import Optional
from lib_caida_collector import BGPDAG
from .as_classes import BGPAS
from ..engine_input import EngineInput
from ..enums import Relationships
class SimulatorEngine(BGPDAG):
__slots__ = "_setup",
def __init__(self,
*args,
BaseASCls=BGPAS,
**kwargs):
super(SimulatorEngine, self).__init__(*args,
BaseASCls=BaseASCls,
**kwargs)
self._setup: bool = False
def __eq__(self, other):
if isinstance(other, SimulatorEngine):
return self.as_dict == other.as_dict
else:
raise NotImplementedError
def setup(self,
engine_input: EngineInput,
BaseASCls: BGPAS,
AdoptingASCls: BGPAS):
self._reset_as_classes(engine_input, BaseASCls, AdoptingASCls)
engine_input.seed(self, AdoptingASCls)
self._setup = True
def run(self,
propagation_round=0,
engine_input=None):
"""Propogates announcements"""
assert self._setup
self._propagate(propagation_round, engine_input)
def _reset_as_classes(self,
engine_input: EngineInput,
BaseASCls: BGPAS,
AdoptASCls: BGPAS):
as_cls_dict: dict = engine_input.get_as_classes(self,
BaseASCls,
AdoptASCls)
for as_obj in self:
as_obj.__class__ = as_cls_dict.get(as_obj.asn, BaseASCls)
# Reset base is false to avoid overriding AS info
as_obj.__init__(reset_base=False)
def _propagate(self,
propagation_round: Optional[int],
engine_input: Optional[EngineInput]):
"""Propogates announcements"""
kwargs = {"propagation_round": propagation_round,
"engine_input": engine_input}
self._propagate_to_providers(**kwargs)
self._propagate_to_peers(**kwargs)
self._propagate_to_customers(**kwargs)
def _propagate_to_providers(self, **kwargs):
for i, rank in enumerate(self.propagation_ranks):
# Nothing to process at the start
if i > 0:
# Process first because maybe it recv from lower ranks
for as_obj in rank:
as_obj.process_incoming_anns(Relationships.CUSTOMERS,
**kwargs)
# Send to the higher ranks
for as_obj in rank:
as_obj.propagate_to_providers()
def _propagate_to_peers(self, **kwargs):
# The reason you must separate this for loop here
# is because propagation ranks do not take into account peering
# It'd be impossible to take into account peering
# since different customers peer to different ranks
# So first do customer to provider propagation, then peer propagation
for as_obj in self:
as_obj.propagate_to_peers()
for as_obj in self:
as_obj.process_incoming_anns(Relationships.PEERS, **kwargs)
def _propagate_to_customers(self, **kwargs):
for i, rank in enumerate(reversed(self.propagation_ranks)):
# There are no incomming Anns at the top
if i > 0:
for as_obj in rank:
as_obj.process_incoming_anns(Relationships.PROVIDERS,
**kwargs)
for as_obj in rank:
as_obj.propagate_to_customers()
```
#### File: simulator/graph/graph.py
```python
from copy import deepcopy
from collections import defaultdict
from itertools import product
from multiprocessing import Pool
from lib_caida_collector import CaidaCollector
from ..data_point import DataPoint
from ..mp_method import MPMethod
from ..scenario import Scenario
from ...engine import BGPAS, SimulatorEngine
try:
import ray
# pypy3 does not support ray
except ModuleNotFoundError:
pass
class Graph:
from .graph_writer import aggregate_and_write, get_graphs_to_write
from .graph_writer import _get_line, _write
def __init__(self,
percent_adoptions=[0, 5, 10, 20, 30, 50, 75, 100],
adopt_as_classes=[],
EngineInputCls=None,
num_trials=1,
propagation_rounds=1,
BaseASCls=BGPAS,
profiler=None):
assert isinstance(percent_adoptions, list)
self.percent_adoptions = percent_adoptions
self.adopt_as_classes = adopt_as_classes
self.num_trials = num_trials
# Why propagation rounds? Because some atk/def scenarios might require
# More than one round of propagation
self.propagation_rounds = propagation_rounds
self.EngineInputCls = EngineInputCls
self.BaseASCls = BaseASCls
self.profiler = profiler
def run(self,
parse_cpus,
mp_method=MPMethod.SINGLE_PROCESS):
self.data_points = defaultdict(list)
if mp_method == MPMethod.SINGLE_PROCESS:
results = self._get_single_process_results()
elif mp_method == MPMethod.MP:
results = self._get_mp_results(parse_cpus)
self._get_engine_and_save_subgraphs()
elif mp_method == MPMethod.RAY:
results = self._get_ray_results(parse_cpus)
self._get_engine_and_save_subgraphs()
else:
raise NotImplementedError
for result in results:
for data_point, trial_info_list in result.items():
self.data_points[data_point].extend(trial_info_list)
print("\nGraph complete")
######################################
# Multiprocessing/clustering methods #
######################################
def _get_chunks(self, parse_cpus):
"""Not a generator since we need this for multiprocessing"""
# https://stackoverflow.com/a/34032549/8903959
percents_trials = list(product(self.percent_adoptions,
list(range(self.num_trials))))
# https://stackoverflow.com/a/2136090/8903959
return [percents_trials[i::parse_cpus] for i in range(parse_cpus)]
def _get_single_process_results(self):
return [self._run_chunk(x) for x in self._get_chunks(1)]
def _get_mp_results(self, parse_cpus):
# Pool is much faster than ProcessPoolExecutor
with Pool(parse_cpus) as pool:
return pool.map(self._run_chunk, self._get_chunks(parse_cpus))
def _get_ray_results(self, parse_cpus):
results = [ray.remote(self.__class__._run_chunk).remote(self, x)
for x in self._get_chunks(
int(ray.cluster_resources()["CPU"]))]
return [ray.get(x) for x in results]
def _run_chunk(self, percent_adopt_trials):
# Engine is not picklable or dillable AT ALL, so do it here
# Changing recursion depth does nothing
# Making nothing a reference does nothing
engine = self._get_engine_and_save_subgraphs()
data_points = defaultdict(list)
for percent_adopt, trial in percent_adopt_trials:
og_engine_input = self.EngineInputCls(self.subgraphs,
engine,
percent_adopt)
for ASCls in self.adopt_as_classes:
print(f"{percent_adopt}% {ASCls.name}, #{trial}",
end=" " + "\r")
# Deepcopy input to make sure input is fresh
engine_input = deepcopy(og_engine_input)
# Change AS Classes, seed announcements before propagation
engine.setup(engine_input, self.BaseASCls, ASCls)
for propagation_round in range(self.propagation_rounds):
# Generate the test
scenario = Scenario(trial=trial,
engine=engine,
engine_input=engine_input,
profiler=self.profiler)
# Run test, remove reference to engine and return it
scenario.run(self.subgraphs, propagation_round)
# Get data point - just a frozen data class
# Just makes it easier to access properties later
dp = DataPoint(percent_adopt, ASCls, propagation_round)
# Append the test to all tests for that data point
data_points[dp].append(scenario)
engine_input.post_propagation_hook(engine, dp)
return data_points
##########################
# Subgraph ASN functions #
##########################
def _get_engine_and_save_subgraphs(self):
# Done just to get subgraphs, change this later
engine = CaidaCollector(BaseASCls=self.BaseASCls,
GraphCls=SimulatorEngine,
).run(tsv_path=None)
self.subgraphs = self._get_subgraphs(engine)
self._validate_subgraphs(engine)
return engine
def _get_subgraphs(self, engine=None):
"""Returns all the subgraphs that you want to keep track of"""
top_level = set(x.asn for x in engine if x.input_clique)
stubs_and_mh = set([x.asn for x in engine if x.stub or x.multihomed])
subgraphs = dict()
# Remove sets to make keeping deterministic properties easier
subgraphs["etc"] = set([x.asn for x in engine
if x.asn not in top_level
and x.asn not in stubs_and_mh])
subgraphs["input_clique"] = top_level
subgraphs["stubs_and_mh"] = stubs_and_mh
return subgraphs
def _validate_subgraphs(self, engine):
"""Makes sure subgraphs are mutually exclusive and contain ASNs"""
all_ases = []
for subgraph_asns in self.subgraphs.values():
msg = "Subgraphs must be sets for fast lookups"
assert isinstance(subgraph_asns, set), msg
all_ases.extend(subgraph_asns)
for x in all_ases:
assert isinstance(x, int), "Subgraph doesn't contain ASNs"
diff = len(all_ases) - len(set(all_ases))
assert diff == 0, f"Subgraphs not mutually exclusive {diff}"
@property
def total_scenarios(self):
total_scenarios = self.num_trials * len(self.percent_adoptions)
total_scenarios *= len(self.adopt_as_classes)
total_scenarios *= self.propagation_rounds
return total_scenarios
```
#### File: tests/graphs/graph_002.py
```python
from lib_caida_collector import PeerLink, CustomerProviderLink as CPLink
from .graph_info import GraphInfo
from ...enums import ASNs
class Graph002(GraphInfo):
r"""
Test propagating up without multihomed support in the following test graph.
Horizontal lines are peer relationships, vertical lines are
customer-provider.
1
|
2---3
/| \
4 777-5 6
Starting propagation at 5, all ASes should see the announcement.
"""
def __init__(self):
super(Graph002, self).__init__(
peer_links=set([PeerLink(2, 3),
PeerLink(ASNs.VICTIM.value, 5)]),
customer_provider_links=set(
[CPLink(provider_asn=1, customer_asn=2),
CPLink(provider_asn=2, customer_asn=4),
CPLink(provider_asn=2, customer_asn=ASNs.VICTIM.value),
CPLink(provider_asn=3, customer_asn=6)]))
```
#### File: tests/graphs/graph_008.py
```python
from lib_caida_collector import PeerLink, CustomerProviderLink as CPLink
from .graph_info import GraphInfo
from ...enums import ASNs
class Graph008(GraphInfo):
r"""
1 --- 2
/ \ \
3 4 5
/ \
attacker_asn victim_asn
"""
def __init__(self):
super(Graph008, self).__init__(
peer_links=set([PeerLink(1, 2)]),
customer_provider_links=set(
[CPLink(provider_asn=1, customer_asn=3),
CPLink(provider_asn=1, customer_asn=4),
CPLink(provider_asn=4, customer_asn=ASNs.VICTIM.value),
CPLink(provider_asn=4, customer_asn=ASNs.ATTACKER.value),
CPLink(provider_asn=2, customer_asn=5)]))
```
#### File: tests/graphs/graph_info.py
```python
class GraphInfo:
"""Contains information to build a graph"""
def __init__(self, customer_provider_links=None, peer_links=None):
if customer_provider_links:
self.customer_provider_links = customer_provider_links
else:
self.customer_provider_links = set()
self.peer_links = peer_links if peer_links else set()
@property
def asns(self):
asns = []
for link in self.customer_provider_links | self.peer_links:
asns.extend(link.asns)
return list(sorted(set(asns)))
```
#### File: tests/utils/diagram.py
```python
from graphviz import Digraph
import ipaddress
from ...engine import BGPAS, BGPSimpleAS
from ...enums import Outcomes, ASNs
class Diagram:
def __init__(self):
self.dot = Digraph(format="png")
# purple is cooler imo but whatever
# self.dot.attr(bgcolor='purple:pink')
def generate_as_graph(self, *args, path=None, view=False):
self._add_legend(*args)
self._add_ases(*args)
self._add_edges(*args)
self._add_propagation_ranks(*args)
self._add_as_types(*args)
self._add_traceback_types(*args)
self.render(path=path, view=view)
def render(self, path=None, view=False):
self.dot.render(path, view=view)
def _add_ases(self, engine, traceback, engine_input, *args):
# First add all nodes to the graph
for as_obj in engine:
self.encode_as_obj_as_node(self.dot,
as_obj,
engine,
traceback,
engine_input,
*args)
def _add_legend(self, engine, traceback, *args):
attacker_success_count = sum(1 for x in traceback.values()
if x == Outcomes.ATTACKER_SUCCESS)
victim_success_count = sum(1 for x in traceback.values()
if x == Outcomes.VICTIM_SUCCESS)
disconnect_count = sum(1 for x in traceback.values()
if x == Outcomes.DISCONNECTED)
html = f'''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD COLSPAN="2" BORDER="0">(For most specific prefix only)</TD>
</TR>
<TR>
<TD BGCOLOR="#ff6060:white">😈 ATTACKER SUCCESS 😈</TD>
<TD>{attacker_success_count}</TD>
</TR>
<TR>
<TD BGCOLOR="lightgreen:white">😇 VICTIM SUCCESS 😇</TD>
<TD>{victim_success_count}</TD>
</TR>
<TR>
<TD BGCOLOR="grey:white">✹ DISCONNECTED ✹</TD>
<TD>{disconnect_count}</TD>
</TR>
</TABLE>>'''
kwargs = {"color": "black", "style": "filled", "fillcolor": "white"}
self.dot.node("Legend", html, shape="plaintext", **kwargs)
def encode_as_obj_as_node(self,
subgraph,
as_obj,
engine,
traceback,
engine_input,
*args):
kwargs = dict()
if False:
kwargs = {"style": "filled,dashed",
"shape": "box",
"color": "black",
"fillcolor": "lightgray"}
html = self._get_html(as_obj,
engine,
traceback,
engine_input,
*args)
kwargs = self._get_kwargs(as_obj,
engine,
traceback,
engine_input,
*args)
subgraph.node(str(as_obj.asn), html, **kwargs)
def _add_edges(self, engine, *args):
# Then add all connections to the graph
# Starting with provider to customer
for as_obj in engine:
# Add provider customer edges
for customer_obj in as_obj.customers:
self.dot.edge(str(as_obj.asn), str(customer_obj.asn))
# Add peer edges
# Only add if the largest asn is the curren as_obj to avoid dups
for peer_obj in as_obj.peers:
if as_obj.asn > peer_obj.asn:
self.dot.edge(str(as_obj.asn),
str(peer_obj.asn),
dir="none",
style="dashed",
penwidth="2")
def _add_propagation_ranks(self, engine, *args):
for i, rank in enumerate(engine.propagation_ranks):
g = Digraph(f"Propagation_rank_{i}")
g.attr()
for as_obj in rank:
g.node(str(as_obj.asn))
self.dot.subgraph(g)
def _add_as_types(self, engine, *args):
pass
def _add_traceback_types(self, engine, *args):
pass
def _get_html(self, as_obj, engine, traceback, engine_input, *args):
asn_str = str(as_obj.asn)
if as_obj.asn == engine_input.victim_asn:
asn_str = "😇" + asn_str + "😇"
elif as_obj.asn == engine_input.attacker_asn:
asn_str = "😈" + asn_str + "😈"
html = f"""<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD COLSPAN="4" BORDER="0">{asn_str}</TD>
</TR>
<TR>
<TD COLSPAN="4" BORDER="0">({as_obj.name})</TD>
</TR>"""
if as_obj.asn not in engine_input.uncountable_asns and False:
outcome = traceback[as_obj.asn]
if outcome == Outcomes.ATTACKER_SUCCESS:
outcome_str = "😈 ATTACKER SUCCESS 😈"
elif outcome == Outcomes.VICTIM_SUCCESS:
outcome_str = "😇 VICTIM SUCCESS 😇"
elif outcome == Outcomes.DISCONNECTED:
outcome_str = "✹ DISCONNECTED ✹"
html += f"""<TR>
<TD COLSPAN="4">{outcome_str}</TD>
</TR>"""
local_rib_anns = list(as_obj._local_rib._info.values())
local_rib_anns = tuple(
sorted(local_rib_anns,
key=lambda x: ipaddress.ip_network(x.prefix).num_addresses,
reverse=True))
if len(local_rib_anns) > 0:
html += """<TR>
<TD COLSPAN="4">Local RIB</TD>
</TR>"""
for ann in local_rib_anns:
mask = "/" + ann.prefix.split("/")[-1]
path = ", ".join(str(x) for x in
ann.as_path)
ann_help = ""
if getattr(ann, "blackhole", False):
ann_help = "✹"
elif getattr(ann, "preventive", False):
ann_help = "🛡"
elif ASNs.ATTACKER.value in ann.as_path:
ann_help = "😈"
elif ann.origin == ASNs.VICTIM.value:
ann_help = "😇"
else:
raise Exception("Not valid ann for rib?")
html += f"""<TR>
<TD>{mask}</TD>
<TD>{path}</TD>
<TD>{ann_help}</TD>
</TR>"""
html += "</TABLE>>"
return html
def _get_kwargs(self, as_obj, engine, traceback, engine_input, *args):
kwargs = {"color": "black",
"style": "filled",
"fillcolor": "white",
"gradientangle": "270"}
# If the as obj is the attacker
if as_obj.asn == engine_input.attacker_asn:
kwargs.update({"fillcolor": "#ff6060", "shape": "doublecircle"})
if as_obj.__class__ not in [BGPAS, BGPSimpleAS]:
kwargs["shape"] = "doubleoctagon"
# If people complain about the red being too dark lol:
kwargs.update({"fillcolor": "#FF7F7F"})
# kwargs.update({"fillcolor": "#ff4d4d"})
# As obj is the victim
elif as_obj.asn == engine_input.victim_asn:
kwargs.update({"fillcolor": "lightgreen", "shape": "doublecircle"})
if as_obj.__class__ not in [BGPAS, BGPSimpleAS]:
kwargs["shape"] = "doubleoctagon"
# As obj is not attacker or victim
else:
if traceback[as_obj.asn] == Outcomes.ATTACKER_SUCCESS:
kwargs.update({"fillcolor": "#ff6060:yellow"})
elif traceback[as_obj.asn] == Outcomes.VICTIM_SUCCESS:
kwargs.update({"fillcolor": "lightgreen:white"})
elif traceback[as_obj.asn] == Outcomes.DISCONNECTED:
kwargs.update({"fillcolor": "grey:white"})
if as_obj.__class__ not in [BGPAS, BGPSimpleAS]:
kwargs["shape"] = "octagon"
return kwargs
```
|
{
"source": "jfuruness/lib_caida_collector",
"score": 2
}
|
#### File: lib_caida_collector/caida_collector/caida_collector.py
```python
from csv import DictWriter
from datetime import datetime, timedelta
import logging
from pathlib import Path
import shutil
from typing import List, Optional, Tuple, Type
from ..graph import AS, BGPDAG
# Can't import into class due to mypy issue:
# https://github.com/python/mypy/issues/7045
# File funcs
from .file_reading_funcs import read_file
from .file_reading_funcs import _read_from_cache
from .file_reading_funcs import _read_from_caida
from .file_reading_funcs import _download_bz2_file
from .file_reading_funcs import _copy_to_cache
# HTML funcs
from .html_funcs import _get_url
from .html_funcs import _get_hrefs
# Graph building funcs
from .data_extraction_funcs import _get_ases
from .data_extraction_funcs import _extract_input_clique
from .data_extraction_funcs import _extract_ixp_ases
from .data_extraction_funcs import _extract_provider_customers
from .data_extraction_funcs import _extract_peers
class CaidaCollector:
"""Downloads relationships, determines metadata, and inserts to db"""
read_file = read_file
_read_from_cache = _read_from_cache
_read_from_caida = _read_from_caida
_download_bz2_file = _download_bz2_file
_copy_to_cache = _copy_to_cache
# HTML funcs
_get_url = _get_url
_get_hrefs = _get_hrefs
# Graph building funcs
_get_ases = _get_ases
_extract_input_clique = _extract_input_clique
_extract_ixp_ases = _extract_ixp_ases
_extract_provider_customers = _extract_provider_customers
_extract_peers = _extract_peers
def __init__(self,
BaseASCls: Type[AS] = AS,
GraphCls: Type[BGPDAG] = BGPDAG):
# Base AS Class for the BGPDAG
self.BaseASCls: Type[AS] = BaseASCls
# BGPDAG class
self.GraphCls: Type[BGPDAG] = GraphCls
def run(self,
dl_time: Optional[datetime] = None,
cache_dir: Path = Path("/tmp/caida_collector_cache"),
tsv_path: Optional[Path] = Path("/tmp/caida_collector.tsv")):
"""Runs run func and deletes cache if anything is amiss"""
try:
return self._run(dl_time, cache_dir, tsv_path)
except Exception as e:
logging.critical(f"{e}: Potentially the result of a messed up"
"cache, which was just deleted. please try again")
# MAke sure no matter what don't create a messed up cache
shutil.rmtree(cache_dir)
raise
def _run(self,
dl_time_arg: Optional[datetime],
cache_dir: Path,
tsv_path: Optional[Path]) -> BGPDAG:
"""Downloads relationships, parses data, and inserts into the db.
https://publicdata.caida.org/datasets/as-relationships/serial-2/
Can specify a download time if you want to download an older dataset
if cache is True it uses the downloaded file that was cached
"""
# Get the download time
if dl_time_arg:
dl_time: datetime = dl_time_arg
else:
dl_time = self.default_dl_time()
if cache_dir:
# Make cache dir if cache is being used
cache_dir.mkdir(parents=True, exist_ok=True)
# Path to the cache file for that day
fmt = "%Y.%m.%d"
cache_path: Optional[Path] = cache_dir / dl_time.strftime(fmt)
else:
cache_path = None
file_lines: Tuple[str, ...] = self.read_file(cache_path, dl_time)
(cp_links,
peer_links,
ixps,
input_clique) = self._get_ases(file_lines)
bgp_dag: BGPDAG = self.GraphCls(cp_links,
peer_links,
ixps=ixps,
input_clique=input_clique,
BaseASCls=self.BaseASCls)
if tsv_path:
self._write_tsv(bgp_dag, tsv_path)
return bgp_dag
def default_dl_time(self) -> datetime:
"""Returns default DL time.
For most things, we download from 4 days ago
And for collectors, time must be divisible by 4/8
"""
# 10 days because sometimes caida takes a while to upload
# 7 days ago was actually not enough
dl_time: datetime = datetime.utcnow() - timedelta(days=10)
return dl_time.replace(hour=0, minute=0, second=0, microsecond=0)
def _write_tsv(self, dag: BGPDAG, tsv_path: Path):
"""Writes BGP DAG info to a TSV"""
logging.info("Made graph. Now writing to TSV")
with tsv_path.open(mode="w") as f:
# Get columns
cols: List[str] = list(next(iter(dag.as_dict.values())
).db_row.keys())
writer = DictWriter(f, fieldnames=cols, delimiter="\t")
writer.writeheader()
for x in dag.as_dict.values():
writer.writerow(x.db_row)
logging.debug("Wrote TSV")
```
#### File: lib_caida_collector/caida_collector/file_reading_funcs.py
```python
from datetime import datetime
import logging
import os
from pathlib import Path
import shutil
from tempfile import TemporaryDirectory
from typing import Optional, Tuple
import bz2
import requests
# Type for lines that are read from caida/cached files
LINES_TYPE = Tuple[str, ...]
def read_file(self,
cache_path: Optional[Path],
dl_time: datetime) -> LINES_TYPE:
"""Reads the file from the URL and unzips it and returns the lines
Also caches the file for later calls
"""
# If cache exists
if cache_path and cache_path.exists():
lines: LINES_TYPE = self._read_from_cache(cache_path)
else:
# Write the raw file
lines: LINES_TYPE = self._read_from_caida(dl_time) # type: ignore
# Copies to cache if cache_path is set
self._copy_to_cache(cache_path, lines)
return lines
def _read_from_cache(self, cache_path: Path) -> LINES_TYPE:
"""Reads from the cache"""
# Open cache
with cache_path.open(mode="r") as f:
# Read cached file
return tuple([x.strip() for x in f])
def _read_from_caida(self, dl_time: datetime) -> LINES_TYPE:
"""Reads Caida file"""
logging.info("No file cached from Caida. Downloading Caida file now")
# Create a temporary dir to write to
with TemporaryDirectory() as tmp_dir:
# Path to bz2 download
bz2_path: str = os.path.join(tmp_dir, "download.bz2")
# Download Bz2
self._download_bz2_file(self._get_url(dl_time), bz2_path)
# Unzip and read
with bz2.open(bz2_path, mode="rb") as f:
# Must decode the bytes into strings and strip
return tuple([x.decode().strip() for x in f])
def _download_bz2_file(self, url: str, bz2_path: str):
"""Downloads Caida BZ2 file"""
# https://stackoverflow.com/a/39217788/8903959
# Download the file
with requests.get(url, stream=True, timeout=5) as r:
r.raise_for_status()
with open(bz2_path, mode="wb") as f:
shutil.copyfileobj(r.raw, f)
def _copy_to_cache(self, cache_path: Optional[Path], lines: LINES_TYPE):
"""Copies file to the cache"""
if cache_path:
# Copy raw file to cache
with cache_path.open(mode="w") as f:
f.write("\n".join(lines))
```
#### File: caida_collector/tests/test_caida_collector.py
```python
from itertools import product
from pathlib import Path
import pytest
from .test_read_file_funcs import TestReadFileFuncs
from ..caida_collector import CaidaCollector
@pytest.mark.skip(reason="outdated after temp file refactor")
@pytest.mark.caida_collector_base_funcs
class TestCaidaCollector:
"""Tests the functions that reside in caida_collector.py"""
@pytest.mark.parametrize("write_tsv, cache_written, mock",
product(*[[True, False] for _ in range(3)]))
def test_run(self,
mock_caida_collector: CaidaCollector,
caida_collector: CaidaCollector,
decoded_path: Path,
tsv_path: Path,
write_tsv: bool,
cache_written: bool,
mock: bool,
# Delete these
tmp_caida_collector,
cache,
tsv):
"""Just runs with every possible param and cache
test_run_manual_checks should replace this
"""
# Write cache file
if cache_written:
read_tester = TestReadFileFuncs()
# Write cache file from mocked
if mock:
read_tester.test_write_cache_file_mock( # type: ignore
mock_caida_collector, decoded_path)
# Write real cache file
else:
read_tester.test_write_cache_file( # type: ignore
tmp_caida_collector)
collector = mock_caida_collector if mock else tmp_caida_collector
collector.run(cache=cache, tsv=tsv)
if tsv:
assert collector.tsv_path.exists()
@pytest.mark.skip(reason="New hire work")
def test_run_manual_checks(self):
"""Tests the run function
test with:
cache False, cache written, tsv False
cache should be rewritten
no TSV output
cache False, cache not written, tsv False
cache should be written
no TSV output
cache True, cache written, TSV false
cache should be used and not rewritten
no TSV output
cache True, cache not written, TSV false
cache should be written
no tsv output
cache False, cache written, tsv True
cache should be rewritten
compare TSV file against one that was manually checked
cache False, cache not written, tsv True
cache should be written
compare TSV file against one that was manually checked
cache True, cache written, TSV True
cache should be used and not rewritten
compare TSV file against one that was manually checked
cache True, cache not written, TSV True
cache should be written
compare TSV file against one that was manually checked
"""
raise NotImplementedError
@pytest.mark.skip(reason="New hire work")
def test_write_tsv(self):
"""Checks that the TSV written is correct for a given bgp_dag
bgp_dag should have every type of AS (stub, mh, input_clique, ixp)
"""
raise NotImplementedError
```
#### File: caida_collector/tests/test_html_funcs.py
```python
from datetime import date
from typing import Any, Dict
import pytest
from ..caida_collector import CaidaCollector
@pytest.mark.html_funcs
class TestHTMLFuncs:
"""Tests funcs related to html"""
def test_get_url(self,
caida_collector: CaidaCollector,
mock_caida_collector: CaidaCollector,
run_kwargs: Dict[str, Any]):
"""Tests that the URL collected from Caida is accurate
Get an example html and ensure that the URL is what we expect
"""
test_url: str = ("http://data.caida.org/datasets/as-relationships/"
"serial-2/20210901.as-rel2.txt.bz2")
dl_time = run_kwargs["dl_time"]
# This is from the test html file
assert mock_caida_collector._get_url(dl_time) == test_url
# This is from their website. Just to make sure their website
# format hasn't changed
dl_time = date.today().replace(day=1)
dl_time.replace(month=dl_time.month - 1)
caida_collector._get_url(dl_time)
```
#### File: lib_caida_collector/graph/customer_cone_funcs.py
```python
from typing import Dict, List, Set
from .base_as import AS
def _get_customer_cone_size(self):
"""Gets the AS rank by customer cone, the same way Caida does it"""
# Recursively assign the customer cone size
non_edges: List[AS] = []
cone_dict: Dict[int, Set[int]] = {}
for as_obj in self:
if as_obj.stub or as_obj.multihomed:
as_obj.customer_cone_size = 0
cone_dict[as_obj.asn] = set()
else:
non_edges.append(as_obj)
for as_obj in non_edges:
customer_cone: Set[int] = self._get_cone_size_helper(as_obj, cone_dict)
as_obj.customer_cone_size = len(customer_cone)
def _get_cone_size_helper(self,
as_obj: AS,
cone_dict: Dict[int, Set[int]]) -> Set[int]:
"""Recursively determines the cone size of an as"""
if as_obj.asn in cone_dict:
return cone_dict[as_obj.asn]
else:
cone_dict[as_obj.asn] = set()
for customer in as_obj.customers:
cone_dict[as_obj.asn].add(customer.asn)
self._get_cone_size_helper(customer, cone_dict)
cone_dict[as_obj.asn].update(cone_dict[customer.asn])
return cone_dict[as_obj.asn]
```
|
{
"source": "jfuruness/lib_checkbook",
"score": 2
}
|
#### File: system_tests/bgp/test_hidden_hijack_bgp.py
```python
import pytest
from lib_caida_collector import PeerLink, CustomerProviderLink as CPLink
from ..utils import run_example, HijackLocalRib
from ....enums import ASNs, Prefixes, Timestamps, ROAValidity, Relationships
from ....simulator.attacks import SubprefixHijack
from ....engine import LocalRib
from ....engine.bgp_policy import BGPPolicy
from ....engine.bgp_ribs_policy import BGPRIBSPolicy
from ....announcement import Announcement
@pytest.mark.parametrize("BasePolicyCls", [BGPPolicy, BGPRIBSPolicy])
def test_hidden_hijack_bgp(BasePolicyCls):
r"""Hidden hijack example with BGP
Figure 1a in our ROV++ paper
1
\
2 - 3
/ \
777 666
"""
# Graph data
peers = [PeerLink(2, 3)]
customer_providers = [CPLink(provider_asn=1, customer_asn=2),
CPLink(provider_asn=2, customer_asn=ASNs.VICTIM.value),
CPLink(provider_asn=3, customer_asn=ASNs.ATTACKER.value)]
# Number identifying the type of AS class
as_policies = {asn: BasePolicyCls for asn in
list(range(1, 4)) + [ASNs.VICTIM.value, ASNs.ATTACKER.value]}
vic_kwargs = {"prefix": Prefixes.PREFIX.value,
"timestamp": Timestamps.VICTIM.value,
"seed_asn": None,
"roa_validity": ROAValidity.VALID}
atk_kwargs = {"prefix": Prefixes.SUBPREFIX.value,
"timestamp": Timestamps.ATTACKER.value,
"seed_asn": None,
"roa_validity": ROAValidity.VALID}
# Local RIB data
local_ribs = {
1: LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(1, 2, ASNs.VICTIM.value),
recv_relationship=Relationships.CUSTOMERS,
**vic_kwargs)}),
2: LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(2, ASNs.VICTIM.value),
recv_relationship=Relationships.CUSTOMERS,
**vic_kwargs),
Prefixes.SUBPREFIX.value: Announcement(as_path=(2, 3, ASNs.ATTACKER.value),
recv_relationship=Relationships.PEERS,
**atk_kwargs)}),
3: LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(3, 2, ASNs.VICTIM.value),
recv_relationship=Relationships.PEERS,
**vic_kwargs),
Prefixes.SUBPREFIX.value: Announcement(as_path=(3, ASNs.ATTACKER.value),
recv_relationship=Relationships.CUSTOMERS,
**atk_kwargs)}),
ASNs.VICTIM.value:
LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(ASNs.VICTIM.value,),
recv_relationship=Relationships.ORIGIN,
**vic_kwargs),
Prefixes.SUBPREFIX.value: Announcement(as_path=(ASNs.VICTIM.value,
2,
3,
ASNs.ATTACKER.value),
recv_relationship=Relationships.CUSTOMERS,
**atk_kwargs)}),
ASNs.ATTACKER.value:
LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(ASNs.ATTACKER.value,3, 2, ASNs.VICTIM.value),
recv_relationship=Relationships.PROVIDERS,
**vic_kwargs),
Prefixes.SUBPREFIX.value: Announcement(as_path=(ASNs.ATTACKER.value,),
recv_relationship=Relationships.ORIGIN,
**atk_kwargs)}),
}
run_example(peers=peers,
customer_providers=customer_providers,
as_policies=as_policies,
announcements=SubprefixHijack().announcements,
local_ribs=local_ribs)
```
#### File: lib_checkbook/tests/test_bgp_ribs_policy.py
```python
from copy import deepcopy
import pytest
from lib_caida_collector import PeerLink, CustomerProviderLink as CPLink
from ..enums import ASNs, Relationships, ROAValidity
from ..announcement import Announcement
from ..engine.bgp_as import BGPAS
from ..engine.bgp_policy import BGPPolicy
from ..engine.bgp_ribs_policy import BGPRIBSPolicy
from ..engine.local_rib import LocalRib
def get_prefix_ann_ann_w_a():
prefix = '192.168.127.12/16'
ann = Announcement(prefix=prefix,
as_path=(13796,),
timestamp=0,
roa_validity=ROAValidity.UNKNOWN,
recv_relationship=Relationships.ORIGIN)
ann_w = ann.copy(withdraw=True)
a = BGPAS(1)
a.policy = BGPRIBSPolicy()
return prefix, ann, ann_w, a
def test_process_incoming_withdraw():
"""Test basic processing of incoming withdraw"""
prefix, ann, ann_w, a = get_prefix_ann_ann_w_a()
a.policy.recv_q[13796][prefix].append(ann)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert ann was received
assert(a.policy.local_rib[prefix].origin == ann.origin)
a.policy.recv_q[13796][prefix].append(ann_w)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert announcement is removed from the local rib
assert(a.policy.local_rib.get(prefix) is None)
a.policy.recv_q[13796][prefix].append(ann)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert ann was replaced in local rib
assert(a.policy.local_rib[prefix].origin == ann.origin)
def test_process_incoming_withdraw_send_q():
"""Test processing of incoming withdraw when announcement has not yet been sent to neighbors"""
prefix, ann, ann_w, a = get_prefix_ann_ann_w_a()
a.policy.recv_q[13796][prefix].append(ann)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert ann was received
assert(a.policy.local_rib[prefix].origin == ann.origin)
# Manually add this to the send queue
a.policy.send_q[2][prefix].append(a.policy.local_rib[prefix])
# Withdraw it
a.policy.recv_q[13796][prefix].append(ann_w)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert send_q is empty
assert(len(a.policy.send_q[2][prefix]) == 0)
def test_process_incoming_withdraw_ribs_out():
"""Test processing of incoming withdraw when announcement has already been sent to neighbors"""
prefix, ann, ann_w, a = get_prefix_ann_ann_w_a()
a.policy.recv_q[13796][prefix].append(ann)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert ann was received
assert(a.policy.local_rib[prefix].origin == ann.origin)
# Manually add this to the ribs out
a.policy.ribs_out[2][prefix] = a.policy.local_rib[prefix]
# Withdraw it
a.policy.recv_q[13796][prefix].append(ann_w)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert send_q has withdrawal
assert(len(a.policy.send_q[2][prefix]) == 1)
def test_withdraw_best_alternative():
"""Customers > Peers > Providers"""
prefix = '192.168.127.12/16'
ann1 = Announcement(prefix=prefix,
as_path=(13794,),
timestamp=0,
roa_validity=ROAValidity.UNKNOWN,
recv_relationship=Relationships.ORIGIN)
ann1_w = ann1.copy(withdraw=True)
ann2 = Announcement(prefix=prefix,
as_path=(13795,),
timestamp=0,
roa_validity=ROAValidity.UNKNOWN,
recv_relationship=Relationships.ORIGIN)
ann2_w = ann2.copy(withdraw=True)
ann3 = Announcement(prefix=prefix,
as_path=(13796,),
timestamp=0,
roa_validity=ROAValidity.UNKNOWN,
recv_relationship=Relationships.ORIGIN)
ann3_w = ann3.copy(withdraw=True)
a = BGPAS(1)
a.policy = BGPRIBSPolicy()
# Populate ribs_in with three announcements
a.policy.recv_q[13794][prefix].append(ann1)
a.policy.process_incoming_anns(a, Relationships.PROVIDERS)
a.policy.recv_q[13795][prefix].append(ann2)
a.policy.process_incoming_anns(a, Relationships.PEERS)
a.policy.recv_q[13796][prefix].append(ann3)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
assert(a.policy.local_rib[prefix].origin == ann3.origin)
# Withdraw ann3, now AS should use ann2
a.policy.recv_q[13796][prefix].append(ann3_w)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
assert(a.policy.local_rib[prefix].origin == ann2.origin)
# Withdraw ann2, now AS should use ann1
a.policy.recv_q[13795][prefix].append(ann2_w)
a.policy.process_incoming_anns(a, Relationships.PEERS)
assert(a.policy.local_rib[prefix].origin == ann1.origin)
def test_withdraw_seeded():
"""Customers > Peers > Providers"""
prefix, ann, ann_w, a = get_prefix_ann_ann_w_a()
# Populate ribs_in with an announcement
a.policy.recv_q[13796][prefix].append(ann)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
a.policy.local_rib[prefix].seed_asn = 1
# Withdraw ann
a.policy.recv_q[13796][prefix].append(ann_w)
a.policy.process_incoming_anns(a, Relationships.CUSTOMERS)
# Assert ann is still there
assert(a.policy.local_rib[prefix].origin == ann.origin)
```
|
{
"source": "jfuruness/lib_database",
"score": 2
}
|
#### File: lib_database/postgres/postgres_create_db.py
```python
import logging
from multiprocessing import cpu_count
from subprocess import check_output
from psutil import virtual_memory
from lib_config import Config
from lib_utils import file_funcs
from .postgres_defaults import DEFAULT_CONF_SECTION
def _write_db_conf(self, conf_section, **kwargs):
"""Writes database information in config"""
logging.info("Writing database config")
with Config(write=True) as conf_dict:
if conf_section not in conf_dict:
conf_dict[conf_section] = {}
for k, v in self.default_db_kwargs.items():
# Get the kwarg, or if not exists, the default_kwarg
conf_dict[conf_section][k] = kwargs.get(k, v)
# User is always only used for one db
# This is because we drop and restore user for each db
# And set password for each user for each db
user = conf_dict[conf_section]["database"] + "_user"
conf_dict[conf_section]["user"] = user
def _get_db_creds(self, conf_section=DEFAULT_CONF_SECTION):
"""Gets database creds"""
with Config(write=False) as conf_dict:
return {k: conf_dict[conf_section][k]
for k in list(self.default_db_kwargs.keys()) + ["user"]}
def _init_db(self, user=None, database=None, host=None, password=None):
"""Creates database and user and configures it for access"""
logging.info("Initializing database")
sqls = [f"DROP DATABASE IF EXISTS {database};",
#f"DROP OWNED BY {user} IF EXISTS;",
f"DROP USER IF EXISTS {user};",
f"CREATE DATABASE {database};",
f"CREATE USER {user};",
f"REVOKE CONNECT ON DATABASE {database} FROM PUBLIC;",
f"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM {user};",
f"GRANT ALL PRIVILEGES ON DATABASE {database} TO {user};",
"GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public"
f" TO {user};",
f"ALTER USER {user} WITH PASSWORD '{password}';",
f"ALTER USER {user} WITH SUPERUSER;"]
self.run_sql_cmds(sqls)
file_funcs.delete_paths("/var/lib/postgresql.psql_history")
def _modify_db(self, db=None, ram=None, cpus=cpu_count() - 1, ssd=True):
"""Modifies database for speed.
The database will be corrupted if there is a crash. These changes
work at a cluster level, so all databases will be changed. This is
also meant to maximize the database for the server, so other things
will run slower and have less RAM/cache.
"""
logging.info("Modifying db for speed")
ram = ram if ram else self._get_ram()
random_page_cost = 1 if ssd else 2
sqls = [f"ALTER DATABASE {db} SET timezone TO 'UTC';",
# These are settings that ensure data isn't corrupted in
# the event of a crash. We don't care so...
"ALTER SYSTEM SET fsync TO off;",
"ALTER SYSTEM SET synchronous_commit TO off;",
"ALTER SYSTEM SET full_page_writes TO off;",
# Allows for parallelization
f"ALTER SYSTEM SET max_parallel_workers_per_gather TO {cpus};",
f"ALTER SYSTEM SET max_parallel_workers TO {cpus};",
f"ALTER SYSTEM SET max_worker_processes TO {cpu_count() * 2};",
# Writes as few logs as possible
"ALTER SYSTEM SET wal_level TO minimal;",
"ALTER SYSTEM SET archive_mode TO off;",
"ALTER SYSTEM SET max_wal_senders TO 0;",
# https://www.postgresql.org/docs/current/
# runtime-config-resource.html
# https://dba.stackexchange.com/a/18486
# https://severalnines.com/blog/
# setting-optimal-environment-postgresql
# Buffers for postgres, set to 40%, and no more
f"ALTER SYSTEM SET shared_buffers TO '{int(.4 * ram)}MB';",
# Memory per process, since 11 paralell gathers and
# some for vacuuming, set to ram/(1.5*cores)
"ALTER SYSTEM SET work_mem TO "
f"'{int(ram / (cpu_count() * 1.5))}MB';",
# Total cache postgres has, ignore shared buffers
f"ALTER SYSTEM SET effective_cache_size TO '{ram}MB';",
# Set random page cost to 2 if no ssd, with ssd
# seek time is one for ssds
f"ALTER SYSTEM SET random_page_cost TO {random_page_cost};",
# Yes I know I could call this, but this is just for machines
# that might not have it or whatever
# Gets the maximum safe depth of a servers execution stack
# in kilobytes from ulimit -s
# https://www.postgresql.org/docs/9.1/runtime-config-resource.html
# Subtract one megabyte for safety
"ALTER SYSTEM SET max_stack_depth TO "
f"'{self._get_ulimit() - 1000}kB';"]
self.run_sql_cmds(sqls)
self.restart_postgres()
def _get_ram(self):
# Returns RAM in megabytes
return virtual_memory().available * .9 // 1000000
def _get_ulimit(self):
# What ulimit -s returns: https://superuser.com/a/220064
return int(check_output("ulimit -s", shell=True).decode().strip())
```
#### File: lib_database/postgres/postgres.py
```python
import logging
from multiprocessing import cpu_count
from subprocess import check_output, CalledProcessError
from psutil import virtual_memory
from lib_config import Config
from lib_utils import file_funcs, helper_funcs
from .postgres_defaults import DEFAULT_CONF_SECTION
class Postgres:
"""Handles Postgres configuration and functions"""
# Defaults
from .postgres_defaults import default_conf_section
from .postgres_defaults import default_db_kwargs
# Create database
def create_database(self, conf_section=DEFAULT_CONF_SECTION, **kwargs):
"""Writes database entry in config. Creates database. Modifies db"""
database = kwargs.get("database", self.default_db_kwargs["database"])
self.drop_database(database)
self._write_db_conf(conf_section, **kwargs)
self._init_db(**self._get_db_creds(conf_section))
self._modify_db(db=self._get_db_creds(conf_section)["database"])
# Create database helpers
from .postgres_create_db import _write_db_conf
from .postgres_create_db import _get_db_creds
from .postgres_create_db import _init_db
from .postgres_create_db import _modify_db
from .postgres_create_db import _get_ram
from .postgres_create_db import _get_ulimit
@staticmethod
def restart_postgres():
logging.info("Restarting Postgres")
helper_funcs.run_cmds("sudo systemctl restart postgresql")
logging.debug("Postgres restart complete")
def drop_all_databases(self):
"""Drops all databases that exist"""
sql = "SELECT datname FROM pg_database WHERE datistemplate = false;"
databases = check_output(self._get_sql_bash(sql), shell=True)
databases = databases.decode().split("\n")[2:-3]
for database in databases:
if "postgres" not in database:
self.drop_database(database.strip())
def drop_database(self, db_name: str):
"""Drops database if exists"""
try:
self._terminate_db_connections(db_name)
# This happens every time a conn is closed, so we ignore
except CalledProcessError as e:
pass
self.run_sql_cmds([f"DROP DATABASE IF EXISTS {db_name};"])
self._remove_db_from_config(db_name)
def _terminate_db_connections(self, db_name: str):
"""Closes all connections to a database"""
sql1 = f"REVOKE CONNECT ON DATABASE {db_name} FROM PUBLIC;"
sql2 = f"""select pg_terminate_backend(pid)
from pg_stat_activity where datname='{db_name}';"""
self.run_sql_cmds([sql1, sql2])
def _remove_db_from_config(self, db):
"""Removes all config entries that include a specific database"""
# Open config and get dict
with Config(write=True) as conf_dict:
sections_to_delete = []
# For each section in the config
for section, section_dict in conf_dict.items():
# If the database in that section is to be removed
if section_dict.get("database") == db:
# Save that section
sections_to_delete.append(db)
# Delete all sections we no longer need
for section_to_delete in sections_to_delete:
del conf_dict[section_to_delete]
def run_sql_cmds(self, sqls: list, database=None):
"""Runs SQL commands"""
assert isinstance(sqls, list), "Must be a list of SQL commands"
for sql in sqls:
assert sql[-1] == ";", f"{sql} statement has no ;"
helper_funcs.run_cmds(self._get_sql_bash(sql, database=database))
def _get_sql_bash(self, sql, database=None):
"""Returns SQL turned into bash"""
bash = "sudo -u postgres psql "
if database:
bash += f"-d {database} "
bash += f'-c "{sql}"'
return bash
```
|
{
"source": "jfuruness/lib_ddos_simulator",
"score": 3
}
|
#### File: lib_ddos_simulator/animations/anim_round_text.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from matplotlib.pyplot import text
class Anim_Round_Text:
def __init__(self,
high_res,
round_num,
ax,
name,
frames_per_round,
user_cls,
attacker_cls
):
self.name = name
self.frames_per_round = frames_per_round
self.user_cls = user_cls
self.attacker_cls = attacker_cls
bbox_kwargs = dict(facecolor='white', alpha=1)
if high_res:
bbox_kwargs["boxstyle"] = "square,pad=.05"
self.patch = text(ax.get_xlim()[1] * .5,
ax.get_ylim()[1] - .5,
self._get_round_text(int(round_num)),
fontsize=12,
bbox=bbox_kwargs,
horizontalalignment='center',
verticalalignment='center')
def add_to_anim(self, ax, zorder):
return zorder
@property
def anim_objects(self):
"""Returns animation objects used by matplotlib"""
return [self.patch]
def _get_round_text(self, round_num):
return (f"{self.name}: "
f"Round {int(round_num)} "
f"{self.attacker_cls.__name__}")
def animate(self, frame, frames_per_round, *args):
if frame % frames_per_round == 0:
self.patch.set_text(self._get_round_text(frame / frames_per_round))
```
#### File: lib_ddos_simulator/animations/anim_user.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from math import e
import random
from matplotlib.pyplot import Circle, text
class Anim_User:
"""Animated User"""
patch_radius = 1
patch_padding = .25
og_face_color = "g"
disconnected_location = (-10, -10)
# Needs different locs for disconnceted and detected
# Because based on location we animate
detected_location = (-20, -20)
def __init__(self, id, og_anim_bucket):
"""Stores user values"""
# Used to differentiate users
self.id = id
# Used to track suspicions
self.suspicions = []
# Used to track location
self.points = []
if og_anim_bucket:
center_x = og_anim_bucket.patch_center()
else:
center_x = self.disconnected_location[0]
self.patch = Circle((center_x, 5),
Anim_User.patch_radius,
fc=Anim_User.og_face_color)
self.text = text(center_x,
5,
self.id,
horizontalalignment="center",
verticalalignment="center")
@property
def anim_objects(self):
"""Animation objects used by the animation"""
return [self.patch, self.text]
@staticmethod
def patch_length():
"""Returns animation object length"""
return Anim_User.patch_radius * 2 + Anim_User.patch_padding * 2
def add_to_anim(self, ax, zorder):
"""Adds user patches to animation"""
# Add user patch
self.patch.center = self.points[0]
ax.add_patch(self.patch)
self.patch.set_zorder(zorder)
self.patch.set_facecolor(self.og_face_color)
# Add text. X is already set properly.
self.text.set_y(self.points[0][1])
self.text.set_zorder(zorder + 1)
return zorder + 2
def animate(self,
frame, # Frame
frames_per_round, # Frames per round
track_sus, # Track suspicion
*args,
):
detected_loc = self.detected_location
disconnected_loc = self.disconnected_location
current_pt, future_pt = self._get_points(frame, frames_per_round)
# If the points aren't the same or we're in the middle of a round
if current_pt != future_pt or frame % frames_per_round != 0:
self._move_user(current_pt, future_pt, frame, frames_per_round)
# At the start of the round
if frame % frames_per_round == 0:
self._take_action(current_pt, future_pt)
self._update_sus(track_sus, frame, frames_per_round)
def _get_points(self, frame, frames_per_round):
# Gets current point
current_point = self.points[frame // frames_per_round]
# Gets future point
future_point = self.points[(frame // frames_per_round) + 1]
return current_point, future_point
def _move_user(self,
cur_pt, # Current point
future_pt, # Future point
f, # Frame
fpr # Frames per round
):
next_point = self._get_next_point(cur_pt, future_pt, f, fpr)
# Set the center
self.patch.center = next_point
self.text.set_x(next_point[0])
self.text.set_y(next_point[1])
def _get_next_point(self,
cur_pt, # Current point
future_pt, # Future point
f, # Frame
fpr # Frames per round
):
"""Gets next point using math equation
probably distance along two points or something like that
"""
# Frames left in round
remainder = f - ((f // fpr) * fpr)
# Get the next point for x
next_point_x1_contr = cur_pt[0] * ((fpr - remainder) / fpr)
next_point_x2_contr = future_pt[0] * (remainder / fpr)
# Get the next point for y
next_point_y1_contr = cur_pt[1] * ((fpr - remainder) / fpr)
next_point_y2_contr = future_pt[1] * (remainder / fpr)
# Next point for the frame, not for the round
# inbetween current and future point
return (next_point_x1_contr + next_point_x2_contr,
next_point_y1_contr + next_point_y2_contr)
def _take_action(self, cur_pt, future_pt):
detected_loc = self.detected_location
disconnected_loc = self.disconnected_location
# If we're going to the detected location
if future_pt == detected_loc and cur_pt != detected_loc:
self._become_detected()
elif future_pt == disconnected_loc and cur_pt != disconnected_loc:
self._become_disconnected()
def _update_sus(self, track_sus, frame, frames_per_round):
if track_sus:
text = f"{self.suspicions[(frame//frames_per_round) + 1]:.1f}"
self.text.set_text(f"{self.id:2.0f}:{text}")
def _become_detected(self):
"""Sets animation to detected"""
self.text.set_text("Detected")
self.patch.set_facecolor("grey")
def _become_disconnected(self):
self.text.set_text("Disconnected")
self.patch.set_facecolor("purple")
```
#### File: lib_ddos_simulator/graphers/combination_grapher.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from copy import deepcopy
import os
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from statistics import mean, variance
from math import sqrt
from multiprocessing import cpu_count
from pathos.multiprocessing import ProcessingPool
import json
from ..base_grapher import Base_Grapher
from .combo_data_generator import Combo_Data_Generator
from ..attackers import Attacker
# Done this way to avoid circular imports
from ..ddos_simulators import ddos_simulator
from ..managers import Manager
from ..utils import Log_Levels
class Worst_Case_Attacker:
"""placeholder
Later used to graph the worst case attacker graph"""
pass
class Combination_Grapher(Base_Grapher):
"""Compares managers against each other
Plots total utility over all rounds on the Y axis
Plots % of users that are attackers on the X axis
"""
y_vals = ["HARM", "PERCENT_GOOD_NOT_SERVICED", "BUCKET_BOUND", "UTILITY"]
def __init__(self, *args, **kwargs):
super(Combination_Grapher, self).__init__(*args, **kwargs)
self.second_legend = []
def run(self, **kwargs):
"""Runs in parallel every possible scenario, then graphs
Looks complicated, but no real way to simplify it
sorry
kwargs: See Combo_Data_Generator. They are the same there."""
# Initializes graph path
self.make_graph_dir(destroy=True)
data = Combo_Data_Generator(**self.graph_kwargs).run(**kwargs)
self._graph_normal_attackers(data, kwargs)
self.graph_worst(data,
kwargs["managers"],
kwargs["attackers"],
kwargs["num_buckets"],
kwargs["users_per_bucket"],
kwargs["num_rounds"])
def _graph_normal_attackers(self, data, kwargs):
for attacker_cls in kwargs["attackers"]:
for y_val in self.y_vals:
self.graph_scenario(data,
attacker_cls,
y_val,
kwargs["num_buckets"],
kwargs["users_per_bucket"],
kwargs["num_rounds"])
def graph_worst(self,
data,
managers,
attackers,
num_buckets,
users_per_bucket,
num_rounds):
for y_val in self.y_vals:
worst_case_data = self.worst_case_data(managers,
deepcopy(data),
attackers,
y_val)
self.graph_scenario(worst_case_data,
Worst_Case_Attacker,
y_val,
num_buckets,
users_per_bucket,
num_rounds,
write_json=True)
def worst_case_data(self, managers, scenario_data, attackers, y_val):
"""Creates a json of worst case attacker data"""
# Create json of worst case attackers
worst_case_scenario_data = {manager: {Worst_Case_Attacker: {"X": [],
y_val: [],
y_val + "_YERR": [],
"ATKS": []}
}
for manager in managers}
for manager, manager_data in scenario_data.items():
xs = manager_data[attackers[0]]["X"]
for i, x in enumerate(xs):
# should be changed to be abs max but whatevs
if y_val in ["HARM", "PERCENT_GOOD_NOT_SERVICED", "BUCKET_BOUND"]:
worst_case_y = -10000000000
elif y_val == "UTILITY":
worst_case_y = 10000000000
else:
assert False, "OG y not supported"
worst_case_atk = None
yerr = None
for attacker in attackers:
if y_val in ["HARM", "PERCENT_GOOD_NOT_SERVICED", "BUCKET_BOUND"]:
cond = manager_data[attacker][y_val][i] > worst_case_y
elif y_val == "UTILITY":
cond = manager_data[attacker][y_val][i] < worst_case_y
else:
assert False, "y_val not supported"
# If there's a new worst case:
if cond:
worst_case_y = manager_data[attacker][y_val][i]
worst_case_atk = attacker
yerr = manager_data[attacker][y_val + "_YERR"][i]
atk = Worst_Case_Attacker
cur_data_point = worst_case_scenario_data[manager][atk]
cur_data_point["X"].append(x * 100)
cur_data_point[y_val].append(worst_case_y)
cur_data_point[y_val + "_YERR"].append(yerr)
cur_data_point["ATKS"].append(worst_case_atk.__name__)
return worst_case_scenario_data
def graph_scenario(self,
scenario_data,
attacker,
y_val: str,
num_buckets,
users_per_bucket,
num_rounds,
write_json=False):
fig, axs, title = self._get_formatted_fig_axs(scenario_data,
num_buckets,
users_per_bucket,
num_rounds,
attacker,
y_val)
for manager_i, manager in enumerate(scenario_data):
self.populate_axs(axs,
scenario_data,
manager,
attacker,
manager_i,
y_val,
write_json=write_json)
self.add_legend(axs)
graph_dir = self.get_attacker_graph_dir(attacker)
graph_path = os.path.join(graph_dir, y_val + f"_{title}.png")
self.save_graph(graph_path, plt, fig=fig)
if write_json:
self.write_json(graph_path, scenario_data)
def _get_formatted_fig_axs(self,
scenario_data,
num_buckets,
users_per_bucket,
num_rounds,
attacker,
y_val):
"""Creates and formats axes"""
fig, axs = plt.subplots(figsize=(20, 10))
title = (f"Scenario: "
f"users: {users_per_bucket * num_buckets}, "
f"rounds: {num_rounds}, attacker_cls: {attacker.__name__} ")
fig.suptitle(title)
# Gets maximum y value to set axis
max_y_limit = 0
for _, manager_data in scenario_data.items():
if max(manager_data[attacker][y_val]) > max_y_limit:
max_y_limit = max(manager_data[attacker][y_val])
# Sets y limit
axs.set_ylim(0, max_y_limit)
# Add labels to axis
axs.set(xlabel="Percent Attackers", ylabel=y_val)
return fig, axs, title
def get_attacker_graph_dir(self, attacker_cls):
graph_dir = os.path.join(self.graph_dir, attacker_cls.__name__)
if not os.path.exists(graph_dir):
os.makedirs(graph_dir)
return graph_dir
def populate_axs(self,
axs,
scenario_data,
manager,
attacker,
manager_i,
y_val: str,
write_json=False):
"""Plots error bar"""
axs.errorbar(scenario_data[manager][attacker]["X"], # X val
scenario_data[manager][attacker][y_val], # Y value
yerr=scenario_data[manager][attacker][y_val +"_YERR"],
label=f"{manager.__name__}",
ls=self.styles(manager_i),
# https://stackoverflow.com/a/26305286/8903959
marker=self.markers(manager_i))
# This means we are graphing worst case
if write_json:
self.overlay_scatter_plot(axs,
scenario_data,
manager,
attacker,
manager_i,
y_val,
write_json)
def overlay_scatter_plot(self,
axs,
scenario_data,
manager,
attacker,
manager_i,
y_val: str,
write_json):
"""Overlays error bars with worst case attacker colors"""
# Get list of colors
color_dict = self.get_worst_case_atk_color_dict()
colors = [color_dict[atk_name] for atk_name in
scenario_data[manager][attacker]["ATKS"]]
axs.scatter(scenario_data[manager][attacker]["X"],
scenario_data[manager][attacker][y_val],
c=colors,
s=45,
zorder=3,
marker=self.markers(manager_i))
# Sort worst case attacker by freq
atk_freq_dict = {}
for atk in scenario_data[manager][attacker]["ATKS"]:
atk_freq_dict[atk] = atk_freq_dict.get(atk, 0) + 1
atks = list(reversed(sorted(atk_freq_dict, key=atk_freq_dict.get)))
self.second_legend.extend(atks)
def get_worst_case_atk_color_dict(self):
"""Returns a dictionary of attacker to colors"""
# https://matplotlib.org/3.1.1/gallery/color/named_colors.html
colors = ["black", "dimgray", "lightcoral", "firebrick", "sienna",
"bisque", "gold", "olive", "lawngreen", "turquoise", "teal",
"deepskyblue", "midnightblue", "mediumpurple", "darkviolet",
"deeppink", "lightpink", "chocolate", "darkkhaki",
"powderblue"]
new_colors_needed = len(Attacker.runnable_attackers) - len(colors)
assert new_colors_needed <= 0, f"Add {new_colors_needed} more colors"
return {attacker.__name__: colors[i]
for i, attacker in enumerate(Attacker.runnable_attackers)}
def add_legend(self, axs):
"""Adds legend. Potentially combine with grapher class"""
# https://stackoverflow.com/a/4701285/8903959
box = axs.get_position()
axs.set_position([box.x0, box.y0, box.width * 0.8, box.height])
handles, labels = axs.get_legend_handles_labels()
# Put a legend to the right of the current axis
first = axs.legend(handles,
labels,
loc='center left',
bbox_to_anchor=(1, 0.5))
# If we are adding a second legend for worst case attacker colors
# Legacy code now. Amir changed his mind.
if len(self.second_legend) > 0 and False:
color_dict = self.get_worst_case_atk_color_dict()
legend_elements = [mpatches.Patch(color=color_dict[atk], label=atk)
for atk in set(self.second_legend)]
# https://riptutorial.com/matplotlib/example/32429/multiple-legends-on-the-same-axes
# https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/custom_legends.html
axs.legend(handles=legend_elements,
loc='upper right',
bbox_to_anchor=(1, 1))
axs.add_artist(first)
self.second_legend = []
def write_json(self, graph_path, scenario_data):
"""Writes json file"""
with open(graph_path.replace("png", "json"), "w") as f:
data = {m.__name__: {atk.__name__: end_dict
for atk, end_dict in m_data.items()}
for m, m_data in scenario_data.items()}
json.dump(data, f)
```
#### File: old/api/api.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pkg_resources
from flasgger import Swagger, swag_from
from flask import Flask, request
from .api_utils import format_json
from .api_utils import init_sim
from .api_utils import complete_turn
from .api_utils import connect_disconnect_uids
from ..managers import Manager
def create_app():
"""Create and configure an instance of the Flask application."""
app = Flask(__name__)
app.managers = {}
# https://stackoverflow.com/a/32965521/8903959
version = pkg_resources.get_distribution('lib_ddos_simulator').version
template = {
"swagger": "2.0",
"info": {
"title": "lib_ddos_simulator API",
"description": "Provides access to a number of shuffling algorithms for DDOS mitigation",
"contact": {
"responsibleOrganization": "Justin Furuness",
"responsibleDeveloper": "<NAME>",
"email": "<EMAIL>",
"url": "https://github.com/jfuruness/lib_ddos_simulator#lib_ddos_simulator",
},
"termsOfService": "https://github.com/jfuruness/lib_ddos_simulator/blob/master/LICENSE",
"version": version,
},
# "host": "lib_ddos_simulator_api.com", # overrides localhost:500
# "basePath": "/api", # base bash for blueprint registration
"schemes": [
"http",
"https"
],
"operationId": "getmyData"
}
swagger = Swagger(app, template=template)
@app.route("/")
@app.route("/home")
def home():
return "App is running"
@app.route("/init")
@swag_from("flasgger_docs/init_sim.yml")
@format_json(desc="Initializes simulation",
req_args=["uids", "num_buckets", "manager", "sys_id"])
def init():
"""Initializes app
input user ids, bucket ids, and manager name"""
# http://0.0.0.0:5000/init?uids=1,2,3,4&num_buckets=3&manager=protag_manager_merge
user_ids = [int(x) for x in request.args.get("uids", "").split(",")]
num_buckets = int(request.args.get("num_buckets"))
manager_str = request.args.get("manager", "")
manager_cls = None
for manager in Manager.runnable_managers:
if manager_str.lower() == manager.__name__.lower():
manager_cls = manager
assert manager_cls is not None, "Manager class is not correct"
sys_id = int(request.args.get("sys_id"))
# init here
init_sim(app, user_ids, num_buckets, manager_cls, sys_id)
return app.managers[sys_id].json
@app.route("/round")
@swag_from("flasgger_docs/turn.yml")
@format_json(desc="Cause simulation to take actions",
req_args=["sys_id"])
def round():
"""Takes a turn. Input downed buckets"""
# http://0.0.0.0:5000/round?bids=1,2,3
if len(request.args.get("bids", [])) > 0:
bucket_ids = [int(x) for x in request.args.get("bids").split(",")]
else:
bucket_ids = []
sys_id = int(request.args.get("sys_id"))
complete_turn(app, bucket_ids, sys_id)
return app.managers[sys_id].json
@app.route("/connect_disconnect")
@swag_from("flasgger_docs/connect_disconnect.yml")
@format_json(desc="Connect and disconnect users",
req_args=["sys_id"])
def connect_disconnect():
"""Connects and disconnects users."""
# http://0.0.0.0:5000/connect_disconnect?cuids=1,2,3&duids=4,5,6
if len(request.args.get("cuids", [])) > 0:
connecting_uids = [int(x) for x in
request.args.get("cuids").split(",")]
else:
connecting_uids = []
if len(request.args.get("duids", [])) > 0:
disconnecting_uids = [int(x) for x in
request.args.get("duids").split(",")]
else:
disconnecting_uids = []
sys_id = int(request.args.get("sys_id"))
connect_disconnect_uids(app,
connecting_uids,
disconnecting_uids,
sys_id)
return app.managers[sys_id].json
@app.route("/get_mappings")
@swag_from("flasgger_docs/get_mappings.yml")
@format_json(desc="Gets mappings", req_args=["sys_id"])
def get_mappings():
"""Gets mappings of users"""
# http://0.0.0.0:5000/get_mappings
sys_id = int(request.args.get("sys_id"))
return app.managers[sys_id].json
@app.route("/runnable_managers")
@swag_from("flasgger_docs/runnable_managers.yml")
@format_json(desc="List of runnable managers")
def runnable_managers():
return {"managers": ([x.__name__ for x in
Manager.runnable_managers])}
return app
```
#### File: api/tests/test_api.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from copy import deepcopy
import json
from unittest.mock import patch
import json
import pytest
from ...attackers import Basic_Attacker, Even_Turn_Attacker
from ...managers.manager import Manager
@pytest.mark.api
class Test_API:
test_threshold = -123
test_turn = -1
system_id = 0
def test_app_running(self, client):
"""Start with a blank database."""
rv = client.get('/')
assert "running" in str(rv.data).lower()
@pytest.mark.filterwarnings("ignore:Gtk")
def test_api_json(self, client):
"""Tests the api
I know this function is insane. This must be done this way
so that we get access to the client through this func closure
In short, first it patches __init__ of the manager
in this patch, it forces the manager to call the api
and ensure that the json is the same as it's own
Then is patches take_action, and again checks that the
api call is the same as it's own json
Note that random.shuffle is patched as well
"""
og_manager_init = deepcopy(Manager.__init__)
og_manager_take_action = deepcopy(Manager.take_action)
og_manager_connect_disconnect = deepcopy(Manager.connect_disconnect)
def init_patch(*args, **kwargs):
"""Must be defined here to acccess client/og_init"""
return self.init_patch(og_manager_init, client, *args, **kwargs)
def take_action_patch(*args, **kwargs):
return self.take_action_patch(og_manager_take_action,
client,
*args,
**kwargs)
def connect_disconnect_patch(*args, **kwargs):
return self.connect_disconnect_patch(og_manager_connect_disconnect,
client,
*args,
**kwargs)
# https://medium.com/@george.shuklin/mocking-complicated-init-in-python-6ef9850dd202
with patch.object(Manager, "__init__", init_patch):
with patch.object(Manager, "take_action", take_action_patch):
with patch.object(Manager,
"connect_disconnect",
connect_disconnect_patch):
# Don't ever import shuffle from random
# Or else this patch won't work
with patch('random.shuffle', lambda x: x):
with patch('random.random', lambda: 1):
# Call combo grapher, it will run sim and api in parallel
kwargs = {"attackers": [Basic_Attacker,
Even_Turn_Attacker],
"num_buckets_list": [4],
"users_per_bucket_list": [4],
"num_rounds_list": [5],
"trials": 2}
# Tired of dealing with circular imports sorry
from ...graphers import Combination_Grapher
Combination_Grapher(save=True).run(**kwargs)
###############
### Patches ###
###############
def init_patch(self,
og_init,
client,
manager_self,
num_buckets,
users,
threshold,
*args,
**kwargs):
"""Patches init func for manager
Calls api with same init args, checks that they are the same"""
# Unpatched init, calls init for sim
og_init(manager_self, num_buckets, users, threshold, *args, **kwargs)
# It's coming from our client, do not do anything else
if threshold == Test_API.test_threshold:
return
# Call api with these objects
uids, bids, manager, json_obj = self.json_to_init(manager_self.json)
url = ("/init?"
f'uids={",".join(str(x) for x in uids)}'
f'&num_buckets={len(bids)}'
f'&manager={manager}'
f'&sys_id={self.system_id}')
# Check that api output and sim are the same
self.compare_jsons(client.get(url).get_json()["data"], json_obj)
def take_action_patch(self, og_take_action, client, manager_self, turn=0):
"""Patches take_action func for manager
calls api with the downed buckets and checks json"""
# Get ids
attacked_ids = [x.id for x in manager_self.attacked_buckets]
# Take action
og_take_action(manager_self, turn=Test_API.test_turn)
# Don't recurse over own args
if turn == Test_API.test_turn:
return
# Call same action from api
url = (f'/round?bids={",".join(str(x) for x in attacked_ids)}'
f'&sys_id={self.system_id}')
# Compare results between api and sim
self.compare_jsons(client.get(url).get_json()["data"],
manager_self.json)
def connect_disconnect_patch(self,
og_connect_disconnect,
client,
manager_self,
user_ids_to_conn,
user_cls,
attacker_ids_to_conn,
attacker_cls,
disconnected_user_ids,
test_kwarg=None):
"""Patches take_action func for manager
calls api with the downed buckets and checks json"""
ret_val = og_connect_disconnect(manager_self,
user_ids_to_conn,
user_cls,
attacker_ids_to_conn,
attacker_cls,
disconnected_user_ids)
# Don't recurse over own args
if test_kwarg is True:
return
url = f"/connect_disconnect?"
conn_ids = user_ids_to_conn + attacker_ids_to_conn
if len(conn_ids) > 0:
url += f'cuids={",".join(str(x) for x in conn_ids)}'
if len(disconnected_user_ids) > 0:
if len(conn_ids) > 0:
url += "&"
url += f'duids={",".join(str(x) for x in disconnected_user_ids)}'
if len(disconnected_user_ids) > 0 or len(conn_ids) > 0:
url += "&"
url += f'sys_id={self.system_id}'
# Compare results between api and sim
self.compare_jsons(client.get(url).get_json()["data"],
manager_self.json)
########################
### Helper functions ###
########################
def json_to_init(self, json_obj):
"""Input json obj
Output:
url to init sim
expected json
"""
user_ids = []
bucket_ids = []
for bucket_id, user_id_list in json_obj["bucket_mapping"].items():
user_ids.extend(user_id_list)
bucket_ids.append(bucket_id)
return user_ids, bucket_ids, json_obj["manager"], json_obj
def compare_jsons(self, obj1, obj2):
"""Compares manager jsons, makes sure they are correct"""
# https://stackoverflow.com/a/54565257/8903959
assert json.loads(json.dumps(obj1),
parse_int=str) == json.loads(json.dumps(obj2),
parse_int=str)
def _get_bid(self, obj, _id):
"""Gets bucket id, done here for readability"""
return_id = obj["bucket_mapping"].get(_id)
if return_id is None:
return_id = obj["bucket_mapping"].get(int(_id))
return return_id
@pytest.mark.skip(reason="No time. Iterate over all files")
def test_random_patch(self):
"""Asserts that shuffle is never imported
if it is it breaks the random patch and test will fail
"""
assert False, "Not implimented"
assert "from random iimport shuffle" not in "All source code"
assert "from random import random" not in "all source code"
```
#### File: old/attackers/random_attacker.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from random import random
from .attacker import Attacker
class Random_Attacker(Attacker):
"""Attacks at random self._percent_change_attack of the time"""
runnable = False
def _attack(self, manager, turn):
random.seed(str(manager.json) + str(turn))
if random() < self._percent_chance_attack:
self.bucket.attacked = True
class Fifty_Percent_Attacker(Random_Attacker):
"""Attacks at random 50% of the time"""
runnable = True
_percent_chance_attack = .5
class Fifty_Percent_Lone_Attacker(Fifty_Percent_Attacker):
"""Attacks at 50% of the time, if no attacker attacked it's bucket"""
lone = True
class Ten_Percent_Attacker(Random_Attacker):
"""Attacks at random 10% of the time"""
_percent_chance_attack = .1
runnable = True
class Ten_Percent_Lone_Attacker(Ten_Percent_Attacker):
"""Attacks at 10% of the time, if no attacker attacked it's bucket"""
lone = True
```
#### File: graphers/tests/test_grapher.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from itertools import product
import pytest
from ...ddos_simulators import DDOS_Simulator
from ...managers.manager import Manager
@pytest.mark.grapher
@pytest.mark.parametrize("tikz,high_res", list(product([True, False],
repeat=2)))
class Test_Grapher:
@pytest.mark.filterwarnings("ignore:Gtk") # problems with tikz
@pytest.mark.filterwarnings("ignore:MatplotlibDeprecationWarning")
def test_grapher(self, tikz, high_res):
num_users = 12
num_attackers = 4
num_buckets = 4
threshold = 10
managers = Manager.runnable_managers
rounds = 5
DDOS_Simulator(num_users,
num_attackers,
num_buckets,
threshold,
managers,
save=True,
tikz=tikz,
high_res=high_res).run(rounds)
```
#### File: managers/dose/dose_attack_event.py
```python
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
from . import dose_manager
class DOSE_Attack_Event:
"""Purpose of this class is just to keep track of atk events
helpful in dealing with DOSE stats
"""
def __init__(self, bucket):
self.users = bucket.users
self.uids = set(x.id for x in bucket.users)
# 3 is from their matplotlib code
# This is CRPA val
self.sus_added = dose_manager.DOSE_Manager.dose_atk_sus_to_add(bucket)
def reduce_sus(self):
for user in self.users:
user.dose_atk_risk -= self.sus_added
```
|
{
"source": "jfuruness/lib_fhir",
"score": 3
}
|
#### File: lib_fhir/lib_fhir/fhir_hapi_patient.py
```python
import requests
from .fhir_patient import FHIRPatient
class FHIRHAPIPatient(FHIRPatient):
def upload_patient(self) -> str:
"""Uploads patient to FHIR server, returning ID"""
# Using existing patient since server blocks upload requestss
r = requests.get(
"http://hapi.fhir.org/baseR4/Patient"
"?_count=1&_format=json&_pretty=true",
headers={
"Accept-Charset": "utf-8",
"Accept": ("application/fhir+json;q=1.0, "
"application/json+fhir;q=0.9"),
"User-Agent": ("HAPI-FHIR/6.0.0-PRE8-SNAPSHOT "
"(FHIR Client; FHIR 4.0.1/R4; apache"),
"Accept-Encoding": "gzip"})
data = r.json()
from pprint import pprint
pprint(data)
id_ = data["entry"][0]["resource"]["id"]
print(f"HaPI FHIR Patient with id {id}")
return id_
@property
def url(self) -> str:
"""Returns URL"""
return "http://hapi.fhir.org/baseR4/Patient/"
```
#### File: lib_fhir/lib_fhir/fhir_patient.py
```python
from abc import abstractmethod, ABC
from pprint import pprint
import requests
class FHIRPatient(ABC):
def __init__(self):
self.patient_id = self.upload_patient()
@property
def patient_url(self) -> str:
"""Returns URL of the patient"""
return self.url + self.patient_id
def perform_operations(self):
"""Performs basic FHIR operations on a patient"""
# NOTE: initial upload was done in the upload_patient abstract method
patient_json = self.get_patient()
self.update_patient(patient_json)
self.delete_patient()
def get_patient(self) -> dict:
"""Gets patient"""
print("Retrieving patient info")
r = requests.get(self.patient_url,
headers={"Accept": "application/fhir+json"})
patient_json = r.json()
pprint(patient_json)
print("Patient info retrieved")
return patient_json
def update_patient(self, update_json) -> dict:
"""Update's patients birthday (can do any arbitrary update)"""
update_json["birthDate"] = "2000-01-01"
r = requests.put(self.patient_url,
json=update_json,
headers={"Accept": "application/fhir+json",
"Content-Type": "application/fhir+json"})
patient_json = r.json()
pprint(patient_json)
print("Patient info updated")
def delete_patient(self):
"""Deletes patient"""
print("Deleting patient info")
r = requests.delete(self.patient_url)
if str(r.status_code) == "204":
print("Status code is 204, patient was deleted")
print("Deleted patient")
##########################################
# Abstract Methods Requiring Inheritance #
##########################################
@abstractmethod
def upload_patient(self) -> str:
"""Uploads patient to FHIR server, returning ID"""
raise NotImplementedError
@property
@abstractmethod
def url(self):
"""Returns URL"""
raise NotImplementedError
```
|
{
"source": "jfuruness/lib_mrt_collector",
"score": 2
}
|
#### File: lib_mrt_collector/lib_mrt_collector/__main__.py
```python
from datetime import datetime
from pathlib import Path
from .mrt_collector import MRTCollector
def main():
mrt_path = Path("/tmp/mrt_dev/")
MRTCollector(dir_=mrt_path).timed_run()
```
#### File: lib_mrt_collector/lib_mrt_collector/mrt_collector.py
```python
from copy import deepcopy
import csv
from datetime import datetime
import logging
from multiprocessing import cpu_count
from multiprocessing.managers import BaseManager
import os
from pathlib import Path
import re
from urllib.parse import quote
from tqdm import tqdm
from lib_bgpstream_website_collector import BGPStreamWebsiteCollector
from lib_caida_collector import CaidaCollector
from lib_roa_collector import ROACollector
from lib_utils.base_classes import Base
from lib_utils.file_funcs import download_file, delete_paths
from lib_utils.helper_funcs import mp_call, run_cmds
from .mrt_file import MRTFile
from .po_metadata import POMetadata
from .sources import Source
from .tools import BGPGrep
class MRTCollector(Base):
"""This class downloads, parses, and stores MRT Rib dumps
NOTE: this library uses https://git.doublefourteen.io/bgp/ubgpsuite
The creator of bgpscanner moved on to this library since Isolario
reached it's end of life. bgpscanner contained bugs that were never
fixed
"""
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.raw_dir = self.dir_ / "raw"
self.dumped_dir = self.dir_ / "dumped"
self.prefix_dir = self.dir_ / "prefix"
self.parsed_dir = self.dir_ / "parsed"
for path in [self.raw_dir,
self.dumped_dir,
self.prefix_dir,
self.parsed_dir]:
path.mkdir(parents=True, exist_ok=self.dir_exist_ok)
other_collector_kwargs = deepcopy(self.kwargs)
other_collector_kwargs.pop("dir_", None)
other_collector_kwargs.pop("base_dir", None)
# Gets ROAs
self.roa_collector = ROACollector(**deepcopy(other_collector_kwargs), dir_=self.dir_ / ROACollector.__name__)
# Gets relationships
self.caida_collector = CaidaCollector(**deepcopy(other_collector_kwargs), dir_=self.dir_ / CaidaCollector.__name__)
# Gets hijacks, leaks, outage info from bgpstream.com
self.bgpstream_website_collector = BGPStreamWebsiteCollector(**deepcopy(other_collector_kwargs),
dir_=self.dir_ / BGPStreamWebsiteCollector.__name__)
# Temporary placeholder for AS designations (reserved, private, etc)
class IANACollector(Base):
def __init__(*args, **kwargs):
pass
def run(*args, **kwargs):
pass
self.iana_collector = IANACollector(**deepcopy(other_collector_kwargs), dir_=self.dir_ / IANACollector.__name__)
def run(self,
sources=Source.sources.copy(),
tool=BGPGrep,
max_block_size=2000,
local_files=None):
"""Downloads and parses the latest RIB dumps from sources.
First all downloading is done so as to efficiently multiprocess
the parsing. This was found to have significant speedup.
In depth explanation in readme
"""
# Downloads all other collectors that we need to process MRTs
self._download_collectors()
mrt_files = self._init_mrt_files(sources=sources,
local_files=local_files)
try:
# Get downloaded instances of mrt files
mrt_files = self._download_mrts(mrt_files)
# Saves files to CSVs. csv_paths is sorted for largest first
self._dump_mrts(mrt_files, tool=tool)
# Free up some disk space
delete_paths([self.raw_dir])
# Get all prefixes so you can assign prefix ids,
# which must be done sequentially
prefix_path: list = self._get_uniq_prefixes(mrt_files)
# Parse CSVs. Must be done sequentially for block/prefix id
self._parse_dumps(mrt_files, max_block_size, prefix_path)
# Remove unnessecary dirs
delete_paths([self.dumped_dir, self.prefix_dir])
# So much space, always clean up upon error
except Exception as e:
dirs = [x.dir_ for x in [self,
self.roa_collector,
self.caida_collector,
self.bgpstream_website_collector,]]
#self.iana_collector]]
delete_paths(dirs)
raise e
def _download_collectors(self):
"""Runs collectors which are needed to process MRTs"""
# Roa validity, relationships/reserved ASNs for path poisoning
for collector in [self.roa_collector,
self.caida_collector,
self.iana_collector,
self.bgpstream_website_collector]:
collector.run()
def _init_mrt_files(self, sources=Source.sources.copy(), local_files=None):
"""Gets MRT files for downloading from URLs of sources"""
logging.info(f"Sources: {[x.__class__.__name__ for x in sources]}")
path_kwargs = {"raw_dir": self.raw_dir,
"dumped_dir": self.dumped_dir,
"prefix_dir": self.prefix_dir,
"parsed_dir": self.parsed_dir}
# Initialize MRT files from URLs of sources
mrt_files = list()
for source in sources:
for url in source.get_urls(self.dl_time):
mrt_files.append(MRTFile(url, source, **path_kwargs))
if local_files is not None:
assert isinstance(local_files, list)
for path in local_files:
mrt_files.append(MRTFile(str(path), "local_file", **path_kwargs))
return mrt_files[:2] if self.debug else mrt_files
def _download_mrts(self, mrt_files):
"""Downloads MRT files from URLs into paths using multiprocessing"""
self.download_mp(lambda x: x.download(), [mrt_files])
return [x for x in mrt_files if x.downloaded]
def _dump_mrts(self, mrt_files, tool=BGPGrep):
"""Processes files in parallel and inserts into db"""
# Make sure to install deps for the tool
tool.install_deps()
# Sort MRT and CSV paths to parse the largest first
self.parse_mp(tool.parse, [sorted(mrt_files)], "Dumping MRTs")
def _get_uniq_prefixes(self, mrt_files):
"""Gets all prefixes and assigns prefix IDs
This must be done sequentially, and is done here so that other things
can be run very fast
"""
self.parse_mp(MRTFile.get_prefixes,
[sorted(mrt_files)],
"Getting prefixes")
prefix_path = self.prefix_dir / "all_prefixes.txt"
parsed_path = self.prefix_dir / "parsed.txt"
delete_paths([prefix_path, parsed_path])
# awk is fastest tool for unique lines
# it uses a hash map while all others require sort
# https://unix.stackexchange.com/a/128782/477240
# cat is also the fastest way to combine files
# https://unix.stackexchange.com/a/118248/477240
cmds = [f"cd {self.prefix_dir}",
f"cat ./* >> {prefix_path}",
f"awk '!x[$0]++' {prefix_path} > {parsed_path}"]
logging.info("Extracting prefix IDs")
run_cmds(cmds)
print(parsed_path)
# Returns a path here so that I can skip this function for development
return parsed_path
def _parse_dumps(self, mrt_files, max_block_size, uniq_prefixes_path):
"""Parses all CSVs
Note that if cpu count is = cpus than you have on the machine,
the progress bar doesn't update very well at all
NOTE: a data structure where concurrent reads can occur for the
prefix origin metadata would benifit this greatly
however python does not have such a way to do this, even with a manager
I created custom proxy objects, but even these are actually one at a time,
and slowed it down to an insane degree.
Instead I read prefixes beforehand. It's very fast, takes <10m. Origins
we must do later, because that would not be possible in cut and the regex
is too slow even with sed or perl
"""
# Return a list of prefixes
# Reads them in here so I can skip the uniq prefixes func for dev
with open(uniq_prefixes_path, "r") as f:
uniq_prefixes = [x.strip() for x in f]
meta = POMetadata(uniq_prefixes,
max_block_size,
self.roa_collector.tsv_path,
self.bgpstream_website_collector.tsv_path)
# Later make this not hardcoded
# https://www.iana.org/assignments/iana-as-numbers-special-registry/iana-as-numbers-special-registry.xhtml
# https://www.iana.org/assignments/as-numbers/as-numbers.xhtml
logging.warning("Make non public asns not hardcoded")
non_public_asns = set([0, 112, 23456, 65535]
+ list(range(64496, 64511))
+ list(range(64512, 65534))
+ list(range(65536, 65551))
# Unallocated
+ list(range(147770, 196607))
+ list(range(213404, 262143))
+ list(range(272797, 327679))
+ list(range(329728, 393215)))
max_asn = 401308
print("read caida df and pass to parse funcs. Do the same with iana")
#for mrt_file in sorted(mrt_files):
# mrt_file.parse(meta)
#input("remove above after caida and iana for mp")
logging.info("logging about to shutdown")
# Done here to avoid conflict when creating dirs
# TODO: move this to use the parsed_path of MRT file
parse_dirs = [self.parsed_dir / str(i) for i in range(meta.next_block_id + 1)]
for parse_dir in parse_dirs:
parse_dir.mkdir(exist_ok=self.dir_exist_ok)
logging.shutdown()
self.parse_mp(MRTFile.parse,
[sorted(mrt_files),
[meta] * len(mrt_files),
[non_public_asns] * len(mrt_files),
[max_asn] * len(mrt_files),
],
"Adding metadata to MRTs")
# Concatenate all chunk dirs into 1 file per chunk
output_files = []
for parse_dir in parse_dirs:
output_file = Path(str(parse_dir) + ".tsv")
output_files.append(output_file)
parsed_file = next(parse_dir.iterdir())
cmd = (f"head -n 1 {parsed_file} > {output_file} && "
f"tail -n+2 -q {parse_dir}/* >> {output_file}")
run_cmds([cmd])
delete_paths([parse_dir])
# Concatenate all chunks together
# Useful for statistics
output_file = Path(str(self.parsed_dir) + ".tsv")
parsed_file = next(self.parsed_dir.iterdir())
cmd = (f"head -n 1 {parsed_file} > {output_file} && "
f"tail -n+2 -q {self.parsed_dir}/* >> {output_file}")
run_cmds([cmd])
```
#### File: lib_mrt_collector/lib_mrt_collector/mrt_file.py
```python
import csv
import logging
from os import path
from shutil import copyfile
from urllib.parse import quote
from ipaddress import ip_network
from lib_bgpstream_website_collector import Row
from lib_utils import helper_funcs, file_funcs
class MRTFile:
"""This class contains functionality associated with MRT Files"""
def __init__(self,
url,
source,
raw_dir=None,
dumped_dir=None,
prefix_dir=None,
parsed_dir=None):
"""Inits MRT File and the paths at which to write to"""
self.url = url
self.source = source
self.parsed_dir = parsed_dir
self.raw_path = raw_dir / self._url_to_path()
self.dumped_path = dumped_dir / self._url_to_path(ext=".csv")
self.prefix_path = prefix_dir / self._url_to_path(ext=".txt")
def parsed_path(self, block_id):
"""Returns parsed path for that specific block id"""
block_dir = self.parsed_dir / str(block_id)
block_dir.mkdir(exist_ok=True)
return block_dir / self._url_to_path(ext=".tsv")
def __lt__(self, other):
"""Returns the file that is smaller"""
if isinstance(other, MRTFile):
for path_attr in ["dumped_path", "raw_path"]:
# Save the paths to variables
self_path = getattr(self, path_attr)
other_path = getattr(other, path_attr)
# If both parsed paths exist
if self_path.exists() and other_path.exists():
# Check the file size, sort in descending order
# That way largest files are done first
# https://stackoverflow.com/a/2104107/8903959
if self_path.stat().st_size > other_path.stat().st_size:
return True
else:
return False
return NotImplemented
def download(self):
"""Downloads raw MRT file"""
if self.source == "local_file":
print("LOCAL FILE")
copyfile(self.url, self.raw_path)
else:
file_funcs.download_file(self.url, self.raw_path)
def get_prefixes(self):
"""Gets all prefixes within the MRT files"""
# unique instead of awk here because it's sometimes ribs in
# so may prefix origin pairs are next to each other
# By adding uniq here. mrt_collector._get_prefix_ids has a 3x speedup
# Even the bash cmd speeds up because it doesn't write as much
cmd = f'cut -d "|" -f 2 {self.dumped_path} | uniq > {self.prefix_path}'
helper_funcs.run_cmds([cmd])
def parse(self, po_metadata, non_public_asns: set, max_asn: int):
"""Parses MRT file and adds metadata
Things I've tried to make this faster that didn't work
/dev/shm
writing to lists then writing all at once
pypy3
etc
unfortunately, I think it's largely csv writing that makes it slow
Note that using dicts over lists is wayyyy slower
"""
# TYPE|PREFIXES|PATH ATTRIBUTES|PEER|TIMESTAMP|ASN32BIT
# PATH ATTRIBUtES:
# AS_PATH|NEXT_HOP|ORIGIN|ATOMIC_AGGREGATE|AGGREGATOR|COMMUNITIES
# File that will be read from
rfile = self.dumped_path.open(mode="r")
# Opens all files for the block ids
wfiles = [open(self.parsed_path(i), "w")
for i in range(po_metadata.next_block_id + 1)]
# CSV reader
reader = csv.reader(rfile, delimiter="|")
writers = [csv.writer(x, delimiter="\t") for x in wfiles]
# Things parsed in this file
wfields = ("prefix", "as_path", "atomic_aggregate", "aggregator",
"communities", "timestamp", "origin", "collector",
"prepending", "loops", "ixps", "gao_rexford", "new_asns",
"path_poisoning",)
# Bgpstream
wfields += Row.columns
# Roa validity and prefix meta
wfields += ("roa_validity", "roa_routed", "prefix_id", "block_id", "prefix_block_id")
wfields += ("url", "source")
for writer in writers:
writer.writerow(wfields)
for i, ann in enumerate(reader):
try:
(_type,
prefix,
as_path,
next_hop,
bgp_type,
atomic_aggregate,
aggregator,
communities,
peer,
timestamp,
asn_32b) = ann
except ValueError as e:
print(f"Problem with ann line {i} for {self.dumped_path}, fix later")
continue
try:
prefix_obj = ip_network(prefix)
# This occurs whenever host bits are set
except ValueError:
continue
if _type != "=":
continue
if atomic_aggregate:
if atomic_aggregate != "AT":
print("ann doesn't have AT for attomic aggregate")
input(ann)
else:
atomic_aggregate = True
# AS set in the path
if "{" in as_path:
continue
# There is no AS path
if not as_path:
continue
_as_path = as_path.split(" ")
path_data = self._get_path_data(_as_path,
non_public_asns,
max_asn,
set())
origin = _as_path[-1]
collector = _as_path[0]
if aggregator:
# (aggregator_asn aggregator_ip_address)
aggregator = aggregator.split(" ")[0]
# Adding:
# prefix_id
# block_id
# prefix_block_id
# origin_id
# NOTE: This is a shallow copy for speed! Do not modify!
meta = po_metadata.get_meta(prefix, prefix_obj, int(origin))
block_id = meta[-2]
# NOT SAVING:
# type of announcement
# next_hop - an ipaddress
# bgp_type (i vs ebgp)
# peer (peer-address, collector)
# Aggregator ip address
# asn_32_bit - 1 if yes 0 if no
# Feel free to add these later, it won't break things
# Just also add them to the table
wfields = (prefix,
as_path,
atomic_aggregate,
aggregator,
communities,
timestamp,
origin,
collector,) + path_data + meta + (self.url, self.source,)
# Saving rows to a list then writing is slower
writers[block_id].writerow(wfields)
for f in wfiles + [rfile]:
f.close()
def _get_path_data(self, as_path, non_public_asns, max_asn, ixps):
"""Returns as path data"""
# NOTE: make sure this matches the header!!
prepending = False
loop = False
ixp = False
wrong_asn = False
as_path_set = set()
last_asn = None
last_non_ixp = None
for asn in as_path:
asn = int(asn)
if asn in non_public_asns:
wrong_asn = True
if asn > max_asn:
wrong_asn = True
if last_asn == asn:
prepending = True
loop = True
if asn in as_path_set:
loop = True
as_path_set.add(asn)
last_asn = asn
if asn in ixps:
ixp = True
else:
last_non_ixp = asn
# doesn't follow Gao rexford according to Caida
# Contains ASNs that Caida doesn't have (that aren't non public)
# path poisoning by reserved asn, non public asn, or clique being split
return (int(prepending), int(loop), int(ixp), int(False), int(False), int(False),)
def _url_to_path(self, ext=""):
_path = quote(self.url).replace("/", "_")
if ext:
_path = _path.replace(".gz", ext).replace(".bz2", ext)
return _path
@property
def downloaded(self):
"""Returns true if the raw file was downloaded"""
return self.raw_path.exists()
```
|
{
"source": "jfuruness/lib_off_campus_housing_parser",
"score": 3
}
|
#### File: lib_off_campus_housing_parser/lib_off_campus_housing_parser/listing.py
```python
import re
from enum import Enum
from .google_maps import GMaps
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class Utility:
"""Utility class that contains a Utility"""
def __init__(self, possible_strs, cost):
self.possible_strs = possible_strs
self.cost = cost
class Utilities(Enum):
"""Enum for all utilities"""
INTERNET = Utility(["High-Speed Internet"], 45)
HEAT = Utility(["Heat"], 1400 / 12) # Winter heat cost / 12 mo
LAUNDRY = Utility({"Laundry Access",
"Washer/Dryer in Unit",
"Laundry Room"}, 70) # laundromatt cost + hassle
ELECTRIC = Utility(["Electricity"], .1724 * 750) # Cost per kwhr * avg kwhr
WATER = Utility(["Water"], 25)
class Pets(Enum):
CONSIDERED = "Considered"
ALL = "Pets Allowed"
CATS = "Pets Allowed (Cats)"
DOGS = "Pets Allowed (Dogs)"
NOT_ALLOWED = "Pets Not Allowed"
class Listing:
"""House listing class that contains all info on a listing"""
def __init__(self, soup, base_url, logger):
"""Stores the soup and the url"""
self.soup = soup
# Can't use the actual url here because it adds search query parameters
# (It gets ugly)
self.url = base_url + self.soup.findAll("a")[0].get("href")
self.logger = logger
def __repr__(self):
"""Returns a formatted string for all attributes we care about"""
return "\n".join(["{}: {}".format(x, getattr(self, x))
for x in Listing.header_info()]) + "\n"
def __lt__(self, other):
"""Comparison operator for sorting houses"""
return self.total_cost < other.total_cost
def calculate_all(self, specific_soup, wage=42, max_roomates=1):
"""Calculates all the attributes we care about.
Note: self.specific soup needs to be set before this func is called"""
self.specific_soup = specific_soup
self._get_price()
self._get_address()
self._get_commute_info(wage)
# Needs specific soup to be able to run the funcs below
self._get_beds()
self._get_utilities_pets_included()
self._get_total_cost(max_roomates)
@property
def row(self):
"""Returns a row for the csv of this listing"""
return [getattr(self, x) for x in Listing.header_info()]
@staticmethod
def header_info():
"""Gets header information for the csv of this listing"""
return ["price",
"drive_time",
"walk_time",
"total_cost",
"address",
"url",
"utils_cost",
"drive_cost",
"walk_cost"]
########################
### Helper Functions ###
########################
def _get_price(self):
"""Sets price"""
try:
# Gets price string and removes commas from 1,000
price_str = str(self.soup.findAll("div",
{"class": "price"})[0].text).replace(",", "")
self.price = re.findall(r'\d+', price_str)[0]
except:
# If the price cannot be found, discard by setting price high
self.price = 100000
def _get_address(self):
"""Gets address and remove spaces"""
self.address = str(self.soup.findAll("span", {"class": "address"}
)[0].string).lstrip()
def _get_commute_info(self, wage, tax=.3):
"""Gets drive and walking commute info"""
self.logger.info("Calculating commute time for {}".format(self.url))
# Consider parallelizing this for faster throughput?
self.drive_time, self.walk_time = GMaps(
self.logger).calculate(self.address)
self._get_walk_price(wage * (1 - tax))
self._get_drive_price(wage * (1 - tax))
def _get_walk_price(self, wage, days_on_campus_p_mo=30):
"""Gets walk price based on time spent walking"""
# Multiply by 2 for round trip, divide by 60 for per hour, * wage
# Times 30 for the average month
self.walk_cost = days_on_campus_p_mo* (int(self.walk_time)*2/60 * wage)
def _get_drive_price(self,
wage,
mpg=21,
gas_cost_per_gallon=3,
mph=35,
parking_pass_price_per_year=240,
days_on_campus_p_mo=30):
"""Gets drive price based on a variety of factors"""
# We multiply by 2 to get both ways, /60 for per hour
drive_time_per_hour = int(self.drive_time)*2/60
# Gets drive time cost
drive_time_cost = drive_time_per_hour*wage
# Multiplies the miles by cost per mile (gas)
gas_cost = drive_time_per_hour / mph * gas_cost_per_gallon * mpg
# Cost of a parking pass per month (per month cause everythings per mo)
parking_pass_cost = parking_pass_price_per_year/12
self.drive_cost = (drive_time_cost + gas_cost) * days_on_campus_p_mo + parking_pass_cost
########################################
### Funcs that require specific_soup ###
########################################
def _get_beds(self):
try:
# Gets bed numbers
soup = list(self.specific_soup.find_all("p",
{"class": "bedbath"}))[0]
beds_str = " ".join(soup.stripped_strings)
# Gets digits
digits = [int(s) for s in beds_str.split("bed")[0] if s.isdigit()]
# Finds kind of home
soup = list(self.specific_soup.find_all("span",
{"class": "unit"}))[0]
if "per bedroom" not in " ".join(soup.stripped_strings):
self.beds = digits[0]
else:
self.beds = 1
except:
# If the above fails, assume it has one bedroom
self.beds = 1
def _get_utilities_pets_included(self):
utils_str = " ".join(list(self.specific_soup.find_all("ul",
{"class": "snapshot-extras-list"}))[0].stripped_strings)
self.included_utils = set()
for utility in Utilities.__members__.values():
for possible_str in utility.value.possible_strs:
if possible_str in utils_str:
self.included_utils.add(utility.value)
all_utils = set([x.value for x in Utilities.__members__.values()])
self.utils_cost = sum([util.cost for util in
all_utils.difference(self.included_utils)])
for pet_option in Pets.__members__.values():
if pet_option.value in utils_str:
self.pets = pet_option.value
def _get_total_cost(self, max_roomates):
"""Gets total_cost of listing"""
# Divide total cost by number of expected roomates
beds = self.beds if self.beds < max_roomates else max_roomates
self.total_cost = min(self.walk_cost, self.drive_cost)
self.total_cost += (int(self.price) + self.utils_cost) / beds
```
#### File: lib_off_campus_housing_parser/lib_off_campus_housing_parser/logger.py
```python
import re
import sys
import datetime
import os
import functools
import traceback
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# compiled regex global to prevent unnessecary recompilation
tb_re = re.compile(r'''
.*?/lib_off_campus_housing_parser
(?P<file_name>.*?.py)
.*?line.*?
(?P<line_num>\d+)
.*?in\s+
(?P<function>.*?)
\\n\s+
(?P<line>.+)
\\n
''', re.VERBOSE | re.DOTALL
)
# This decorator wraps all funcs in a try except statement
# Note that it can only be put outside of funcs with self
# The point of the decorator is so code errors nicely with useful information
def error_catcher(msg=None):
def my_decorator(func):
@functools.wraps(func)
def function_that_runs_func(self, *args, **kwargs):
# Inside the decorator
try:
return func(self, *args, **kwargs)
except Exception as e:
# Gets traceback object and error information
error_class, error_desc, tb = sys.exc_info()
# Makes sure it's not a system exit call
if not str(error_desc) == '1':
for msg in traceback.format_tb(tb):
self.logger.debug(msg)
# Gets last call from program
tb_to_re = [x for x in str(traceback.format_tb(tb))
.split("File") if "lib_off_campus_housing_parser" in x][-1]
# Performs regex to capture useful information
capture = tb_re.search(tb_to_re)
# Formats error string nicely
err_str = ("\n{0}{1}{0}\n"
" msg: {2}\n"
" {3}: {4}\n"
" file_name: {5}\n"
" function: {6}\n"
" line #: {7}\n"
" line: {8}\n"
"{0}______{0}\n"
).format("_"*36,
"ERROR!",
msg,
error_class,
error_desc,
capture.group("file_name"),
capture.group("function"),
capture.group("line_num"),
capture.group("line"))
self.logger.error(err_str)
# hahaha so professional
print('\a')
# Exit program and also kills all parents/ancestors
sys.exit(1) # Turning this on breaks pytest - figure it out
raise e
return function_that_runs_func
return my_decorator
class Logger:
def __init__(self, args={}):
"""Initializes logger
Logging levels are, in order:
logging.CRITICAL
logging.ERROR
logging.WARNING
logging.INFO
logging.DEBUG
Anything equal to or higher than file_level will be appended to path
Anything equal to or higher than stream_level will be printed
"""
# Sets different logging properties
self._set_properties(args)
# Inits initial logger instance
logger = self._init_logging(args)
# Adds file handler to logger
self._init_file_handler(logger, args)
# Adds stream handler if not child
self._init_stream_handler(logger, args)
self.logger = logger
def _set_properties(self, args):
"""Inits different logging properties"""
# Import done here so as not to overwrite my enum
import logging
# Sets variables if args is not set
log_name = args.get("log_name")
if log_name is None:
log_name = "lib_off_campus_housing_parser.log"
self.file_level = args.get("file_level")
if self.file_level is None:
self.file_level = logging.WARNING
self.stream_level = args.get("stream_level")
if self.stream_level is None:
self.stream_level = logging.INFO
log_dir = args.get("log_dir")
if log_dir is None:
log_dir = "/var/log/lib_off_campus_housing_parser"
#self._make_dir(log_dir)
prepend = args.get("prepend")
if prepend is None:
prepend = datetime.datetime.now().strftime("%Y_%m_%d_%I_%M%S")
self.log_name = "{}_{}".format(prepend, log_name)
def _make_dir(self, path):
"""Initializes a directory"""
if not os.path.exists(path):
os.makedirs(path)
self.log_dir = path
def _init_logging(self, args):
"""Initializes the initial logger"""
# Import done here so as not to overwrite my enum
import logging
# Initialize logging
logger = logging.getLogger(__name__)
if logger.hasHandlers() and args.get("is_child") is None:
logger.handlers.clear()
# Must use multiprocessing logger to avoid locking
# logger = multiprocessing.get_logger()
logger.setLevel(logging.DEBUG)
return logger
def _init_file_handler(self, logger, args):
"""Initializes file handler for logging"""
# Import done here so as not to overwrite my enum
import logging
# Initialize File Handler
self.file_path = os.path.join(self.log_dir, self.log_name)
self.file_handler = logging.FileHandler(self.file_path)
self.file_handler.setLevel(self.file_level)
file_handler_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(name)s - %(message)s')
self.file_handler.setFormatter(file_handler_formatter)
logger.addHandler(self.file_handler)
def _init_stream_handler(self, logger, args):
"""Adds stream handler while avoiding multithreading problems"""
# Import done here so as not to overwrite my enum
import logging
if args.get("is_child") is None:
# Initialize Stream Handler
self.stream_handler = logging.StreamHandler()
self.stream_handler.setLevel(self.stream_level)
stream_handler_formatter = logging.Formatter(
'%(levelname)s:%(asctime)s: %(message)s')
self.stream_handler.setFormatter(stream_handler_formatter)
logger.addHandler(self.stream_handler)
# Must be done or else:
# https://stackoverflow.com/questions/21127360/
logger.propogate = False
else:
logger.propogate = True
```
#### File: lib_off_campus_housing_parser/lib_off_campus_housing_parser/off_campus_parser.py
```python
import os
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver import Chrome as webstuff
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import ElementNotInteractableException
from .excel import open_excel
from .listing import Listing
from .logger import Logger
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__Lisence__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class Off_Campus_Parser:
"""Off Campus Parser that can parse all listings"""
def __init__(self, executable_path='/home/anon/Downloads/chromedriver'):
"""Inits the browser and stores the url"""
# Needs these environment vars to run properly
self._check_for_environ_vars()
options = webdriver.ChromeOptions();
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--no-sandbox")
self.browser = webstuff(options=options, executable_path=executable_path)
self.url = "https://offcampushousing.uconn.edu"
self.listings = []
try:
self.logger = Logger().logger
except PermissionError:
raise Exception("Make the log files in /var/log/lib_off_campus_housing_parser and make the directory have permissions to create log files in it")
def parse_houses(self,
drive_time_max=20,
max_rent=1500,
netid="jmf14015",
pets=False,
test=True,
excel_path="/tmp/off_campus.xlsx"):
"""Parses all houses.
1. Logs into uconn off campus housing
2. Parses all general pages with multiple listings
3. Gets html for all specific listing pages
4. Calculate all costs of each listing
5. Open all listings
6. Store relevant listings into a csv
Filters by drive time max, and by pets if pets is True
"""
self._login(self.url + "/login", max_rent, netid)
# Parse all pages
pages_left = True
while pages_left:
# Extends listings
pages_left = self._parse_page()
if test or True:
break
# Gets all listing info for the pages parsed
self._get_listing_info()
# Get rid of useless listings
self._filter_listings(drive_time_max, pets)
self._open_all_listings()
input("After pressing enter all open pages will be saved into excel")
with open_excel(excel_path) as spreadsheet:
spreadsheet.write_rows(self.get_listing_rows())
def _check_for_environ_vars(self):
for key in ["google_api_key", "netid_password"]:
if os.environ.get(key) in [None, ""]:
raise Exception("Must input environment var: {}".format(key))
def _login(self, login_url, max_rent, netid):
"""Logs user into uconn off campus housing"""
self._load(login_url)
# Find and click login
login_box = self.browser.find_element_by_css_selector(
".login-block.sso-student")
login_box.click()
# Send netid keys and password keys and hit submit
netid_box = self.browser.find_element_by_id("username")
self._sleep(netid_box.send_keys(netid))
pswd_box = self.browser.find_element_by_id("password")
self._sleep(pswd_box.send_keys(os.environ.get("netid_password")))
submit_box = self.browser.find_element_by_name("submit")
self._sleep(submit_box.click())
# Load the property search page and send max_rent
self._load(self.url + "/property/search")
max_rent_box = self.browser.find_element_by_id("rent-max")
self._sleep(max_rent_box.send_keys(str(max_rent)), 5)
def _sleep(self, func_call, sleep_time=.2):
"""Function gets wrapped with sleep"""
time.sleep(sleep_time)
return func_call
def _load(self, page_url):
"""Loads a page and waits for ajax calls"""
self._sleep(self.browser.get(page_url), 2)
def _parse_page(self):
"""Parses the general page that has multiple listings, clicks next"""
soup = BeautifulSoup(self.browser.page_source, 'html.parser')
# List comprehension of listings instances on the page
self.listings.extend([Listing(x, self.url, self.logger) for x in
soup.findAll("article", {"class": "compare_icon_init"})])
# Returns False if cannot continue
return self._click_next_page(soup)
def _click_next_page(self, soup):
"""Clicks next page and return True, else return False"""
try:
next_box = self.browser.find_element_by_css_selector(
".next.load-more.scroll-up")
self._sleep(next_box.click(), 3)
return True
except ElementNotVisibleException:
return False
except ElementNotInteractableException:
return False
def _get_listing_info(self, wage=45):
"""Calculates listing info"""
for listing in self.listings:
self._load(listing.url)
listing.calculate_all(BeautifulSoup(self.browser.page_source,
'html.parser'),
wage)
def _filter_listings(self, drive_time_max, pets):
"""Filters listings by drive time(int) and pets(bool)"""
# If something is listed twice get rid of it
unique_listings = list({x.url: x for x in self.listings}.values())
# Filters by drive time and pets, then sorts by total_cost
self.listings = sorted([x for x in unique_listings
if int(x.drive_time) <= drive_time_max
and (x.pets != "No" or not pets)])
self.logger.info(self.listings)
def _open_all_listings(self):
"""Opens all windows of listings"""
for i, listing in enumerate(self.listings):
self.browser.execute_script("window.open('');")
self._sleep(self.browser.switch_to.window(
self.browser.window_handles[-1]))
self._load(listing.url)
if i == 0:
# Gets rid of the other windows
self.browser.switch_to.window(
self.browser.window_handles[0])
self.browser.close()
self._sleep(self.browser.switch_to.window(
self.browser.window_handles[-1]))
def get_listing_rows(self):
"""Returns all listing rows that are open"""
# Gets all URLs open
urls = set()
for handle in self.browser.window_handles:
self.browser.switch_to.window(handle)
urls.add(self.browser.current_url)
# Return only listings that are open
return [x.row for x in self.listings if x.url in urls]
```
|
{
"source": "jfuruness/lib_roa_checker",
"score": 3
}
|
#### File: lib_roa_checker/lib_roa_checker/roa_checker.py
```python
from ipaddress import ip_network
from .roa_tries import IPv4ROATrie, IPv6ROATrie
from .roa_validity import ROAValidity
class ROAChecker:
"""Gets validity of prefix origin pairs against ROAs"""
def __init__(self):
"""Initializes both ROA tries"""
self.ipv4_trie = IPv4ROATrie()
self.ipv6_trie = IPv6ROATrie()
def insert(self, prefix: ip_network, origin: int, max_length: int):
"""Inserts a prefix into the tries"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
return trie.insert(prefix, origin, max_length)
def get_roa(self, prefix: ip_network, *args):
"""Gets the ROA covering prefix-origin pair"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
return trie.get_most_specific_trie_supernet(prefix)
def get_validity(self, prefix: ip_network, origin: int) -> ROAValidity:
"""Gets the validity of a prefix origin pair"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
return trie.get_validity(prefix, origin)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.