repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
The-WebOps-Club/odia-forum | pybbm_tag/views.py | 1 | 2818 | from django.shortcuts import render
from pybb.models import *
from pybbm_tag.models import Tag
from pybb.views import ForumView,AddPostView,EditPostView,TopicView
def add_tag(request,**kwargs):
# check permissions before calling this function
# in kwargs we expect the LABEL of the tag to add(not object) and the TOPIC object(not name).
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
if not lst.count() == 0:
lst[0].topics.add(topic)
lst[0].save()
else:
tag = Tag(label = tagname,desc="Empty")
tag.save()
tag.topics.add(topic)
def remove_all_tags(request,**kwargs):
topic = kwargs['topic']
for i in Tag.objects.filter(topics__in = [topic]):
i.topics.remove(topic)
def remove_tag(request,**kwargs):
# check permissions before calling this function.
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
lst[0].topics.remove(topic)
# tag additions to the views that are affected by tags.
class AddPostViewWrapper(AddPostView):
def post(self, request, *args, **kwargs):
try:
ret = super(AddPostViewWrapper, self).post(request, *args, **kwargs)
taglist = request.POST['taglist'].split('+')
#import pdb;pdb.set_trace()
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def get_context_data(self,**kwargs):
ctx = super(AddPostViewWrapper, self).get_context_data(**kwargs)
if ctx['forum']:
ctx['taglist_input'] = 1
return ctx
class ForumViewWrapper(ForumView):
def get_context_data(self):
ctx = super(ForumViewWrapper, self).get_context_data()
topic_list = ctx['topic_list']
tags = []
for i in topic_list:
tags.append(Tag.objects.filter(topics__in = [i]))
ctx['tags'] = Tag.objects.all()
return ctx
class TopicViewWrapper(TopicView):
def get_context_data(self):
ctx = super(TopicViewWrapper, self).get_context_data()
ctx['tags'] = Tag.objects.all()
return ctx
class EditPostViewWrapper(EditPostView):
def post(self, request, *args, **kwargs):
ret = super(EditPostViewWrapper, self).post(request, *args, **kwargs)
try:
taglist = request.POST['taglist'].split('+')
remove_all_tags(request, topic=self.object.topic)
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def make_tag_string(self,topic):
str = ""
for i in Tag.objects.filter(topics__in = [topic]):
str+=(i.label+"+")
if len(str) > 0:
str = str[:-1]
return str
def get_context_data(self, **kwargs):
ctx = super(EditPostViewWrapper, self).get_context_data(**kwargs)
post = ctx['post']
if post.topic.user == self.request.user:
ctx['taglist_input'] = 1
ctx['taglist_initial'] = self.make_tag_string(post.topic)
return ctx | gpl-2.0 | -5,345,528,752,191,889,000 | 28.061856 | 94 | 0.689851 | false | 2.982011 | false | false | false |
lichengshuang/createvhost | others/webvirtmgr/delServer.py | 1 | 1305 | #!/usr/bin/python
#-*-encoding:utf-8-*-
#author: asher
#date: 20160429 on train D909
# this scripts useed for add server ip to webvirtmgr
# if not , each server must add by website,it's too slow, and very not interesting.
# use this , it's make you feel very happy
import sqlite3
try:
conn = sqlite3.connect('../webvirtmgr.sqlite3')
cur = conn.cursor()
print "Input the server ip address like:"
ips = raw_input("Ips 172.23.32:").strip()
ips1 = int(raw_input("Input start last ip num: 1:>").strip())
ips2 = int(raw_input("Input end ip num: 100:>").strip())
# jifang = str(raw_input("DataCenter like:jxq:>").strip())
# login = str(raw_input("User:admin or others:>").strip())
# password = str(raw_input("Password:>").strip())
while True:
if ips1 <= ips2:
ips1 = str(ips1)
newip = ips + "." + ips1
# jifang1 = jifang + "_" + newip
print "Del %s into database\n" % newip
cur.execute("delete from servers_compute where hostname == '%s'" % newip)
ips1 = int(ips1)
ips1 += 1
conn.commit()
else:
break
finally:
allservers = cur.execute("select id,name,hostname,login,type from servers_compute").fetchall()
for i in allservers:
print i
conn.close()
| apache-2.0 | -621,791,100,471,414,800 | 33.342105 | 95 | 0.603065 | false | 3.246269 | false | false | false |
louisq/staticguru | utility/artifact_archiver.py | 1 | 4495 | """
The MIT License (MIT)
Copyright (c) 2016 Louis-Philippe Querel [email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import glob
import os
import shutil
from Logging import logger
"""
The purpose of this utility is to clone the artifacts that have been generated through the build process to preserve them
This version would probably only work for maven run projects
"""
FILTERED_EXTENSIONS = ('*.jar', '*.tar.*', '*.zip', '*.rpm')
# todo replace with an abstract solution that could be reused for the other modules to log the version that was ran
artifact_archiver_version = 1
def archive(repo_path, archive_path, repo_id, commit, filter_extensions=True):
# Determine if we can access the path where the archive should be
if not _determine_access(archive_path):
logger.error("Failed to save to archive %s" % archive_path)
return False
temp_archive = os.path.join(repo_path, "%s-temp" % commit)
temp_archive_compress_file_no_ext = os.path.join(temp_archive, commit)
temp_archive_compress_file = "%s.tar.gz" % temp_archive_compress_file_no_ext
archive_repo_path = os.path.join(archive_path, repo_id)
archive_compress_file = "%s.tar.gz" % os.path.join(archive_repo_path, commit)
_clear_archive(temp_archive, archive_compress_file)
target_directories = _identify_target_directories(repo_path)
_clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions=filter_extensions)
_compress_files(temp_archive, temp_archive_compress_file_no_ext)
_move_compress_file_to_archive(archive_repo_path, temp_archive_compress_file)
# Delete the temporary folder
_clear_archive_temp(temp_archive)
return True
def _determine_access(archive_path):
return os.path.exists(archive_path)
def _clear_archive(archive_temp, archive_compress_file):
_clear_archive_temp(archive_temp)
if os.path.exists(archive_compress_file):
os.remove(archive_compress_file)
def _clear_archive_temp(temp_archive):
if os.path.exists(temp_archive):
shutil.rmtree(temp_archive)
def _identify_target_directories(repo_path):
folder = "target"
nesting = "**/"
target_directories = glob.glob(r'%s%s' % (repo_path, folder))
compound_nesting = ""
# We need to navigate the repository to find project target folders
for count in range(5):
compound_nesting += nesting
target_directories += glob.glob(r'%s%s%s' % (repo_path, compound_nesting, folder))
return target_directories
def _clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions):
# Determine if we need to filter any of the files
if filter_extensions:
ignore = shutil.ignore_patterns(FILTERED_EXTENSIONS)
else:
ignore = None
for path in target_directories:
folder = path[len(repo_path):]
shutil.copytree(path, "%s/%s" % (temp_archive, folder), ignore=ignore, symlinks=True)
def _compress_files(archive_temp, temp_archive_compress_file_no_ext):
# If the compression is changed the file extension needs to be changed as well in the parent method
shutil._make_tarball(temp_archive_compress_file_no_ext, archive_temp, compress="gzip")
def _move_compress_file_to_archive(repo_archive_path, temp_archive_compress_file):
if not os.path.exists(repo_archive_path):
os.makedirs(repo_archive_path)
shutil.move(temp_archive_compress_file, repo_archive_path)
| mit | -329,184,961,511,472,600 | 34.96 | 121 | 0.729032 | false | 3.806097 | false | false | false |
MCFlowMace/Wordom | src/setup.py | 1 | 1423 | #! /usr/bin/env python
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy
import numpy
# in order to check whether lapack are present ...
import numpy.distutils.system_info as sysinfo
# Obtain the numpy include directory. This works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# wordom extension module
if len(sysinfo.get_info('lapack')) == 0:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
)
else:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
include_dirs = [numpy_include],
extra_compile_args = ["-D LAPACK"],
libraries = [ 'lapack', 'blas' ]
)
# NumyTypemapTests setup
setup( name = "wordom",
description = "wordom is a molecular structure and data manipulation program/library",
author = "Michele Seeber & colleagues",
url = "http://wordom.sf.net",
author_email= "[email protected]",
license = "GPL",
version = "0.23",
ext_modules = [_wordom],
py_modules = ['wordom']
)
| gpl-3.0 | 8,072,262,394,954,524,000 | 32.880952 | 96 | 0.575545 | false | 3.658098 | false | true | false |
montefra/dodocs | dodocs/__init__.py | 1 | 1068 | """Main function
Copyright (c) 2015 Francesco Montesano
MIT Licence
"""
import os
import sys
from dodocs.cmdline import parse
import dodocs.logger as dlog
__version__ = "0.0.1"
def main(argv=None):
"""
Main code
Parameters
----------
argv : list of strings, optional
command line arguments
"""
args = parse(argv=argv)
dlog.setLogger(args)
# make sure to reset the subcommand name
log = dlog.getLogger()
if "func" in args:
args.func(args)
log.debug("Finished")
return 0
else:
# defaults profile to list
if args.subparser_name == 'profile' and args.profile_cmd is None:
main(sys.argv[1:] + ["list"])
else:
# in the other cases suggest to run -h
msg = ("Please provide a valid command.\n"
"Type\n " + os.path.split(sys.argv[0])[1])
if args.subparser_name is not None:
msg += " " + args.subparser_name
msg += ' -h'
log.error(msg)
return 1
| mit | 1,274,893,821,913,090,000 | 21.723404 | 73 | 0.549625 | false | 3.695502 | false | false | false |
mozilla/kitsune | kitsune/wiki/permissions.py | 1 | 4844 | import logging
from django.conf import settings
log = logging.getLogger("k.wiki")
# Why is this a mixin if it can only be used for the Document model?
# Good question! My only good reason is to keep the permission related
# code organized and contained in one place.
class DocumentPermissionMixin(object):
"""Adds of permission checking methods to the Document model."""
def allows(self, user, action):
"""Check if the user has the permission on the document."""
# If this is kicking up a KeyError it's probably because you typoed!
return getattr(self, "_allows_%s" % action)(user)
def _allows_create_revision(self, user):
"""Can the user create a revision for the document?"""
# For now (ever?), creating revisions isn't restricted at all.
return True
def _allows_edit(self, user):
"""Can the user edit the document?"""
# Document editing isn't restricted until it has an approved
# revision.
if not self.current_revision:
return True
# Locale leaders and reviewers can edit in their locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# And finally, fallback to the actual django permission.
return user.has_perm("wiki.change_document")
def _allows_delete(self, user):
"""Can the user delete the document?"""
# Locale leaders can delete documents in their locale.
locale = self.locale
if _is_leader(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_document")
def _allows_archive(self, user):
"""Can the user archive the document?"""
# Just use the django permission.
return user.has_perm("wiki.archive_document")
def _allows_edit_keywords(self, user):
"""Can the user edit the document's keywords?"""
# If the document is in the default locale, just use the
# django permission.
# Editing keywords isn't restricted in other locales.
return self.locale != settings.WIKI_DEFAULT_LANGUAGE or user.has_perm("wiki.edit_keywords")
def _allows_edit_needs_change(self, user):
"""Can the user edit the needs change fields for the document?"""
# If the document is in the default locale, just use the
# django permission.
# Needs change isn't used for other locales (yet?).
return self.locale == settings.WIKI_DEFAULT_LANGUAGE and user.has_perm(
"wiki.edit_needs_change"
)
def _allows_mark_ready_for_l10n(self, user):
""""Can the user mark the document as ready for localization?"""
# If the document is localizable and the user has the django
# permission, then the user can mark as ready for l10n.
return self.is_localizable and user.has_perm("wiki.mark_ready_for_l10n")
def _allows_review_revision(self, user):
"""Can the user review a revision for the document?"""
# Locale leaders and reviewers can review revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.review_revision")
def _allows_delete_revision(self, user):
"""Can the user delete a document's revisions?"""
# Locale leaders and reviewers can delete revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_revision")
def _is_leader(locale, user):
"""Checks if the user is a leader for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.leaders.all()
def _is_reviewer(locale, user):
"""Checks if the user is a reviewer for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.reviewers.all()
| bsd-3-clause | 3,488,450,285,851,385,000 | 35.69697 | 99 | 0.652147 | false | 4.267841 | false | false | false |
aussendorf/bareos-fd-python-plugins | plugin/BareosFdPluginBaseclass.py | 1 | 5778 | #This file is now part of the main Bareos repo. Do not use this version, use the package bareos-filedaemon-python-plugin instead
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Baseclass for Bareos python plugins
# Functions taken and adapted from bareos-fd.py
# (c) Bareos GmbH & Co. KG, Maik Aussendorf
# AGPL v.3
from bareosfd import *
from bareos_fd_consts import *
from io import open
from os import O_WRONLY, O_CREAT
class BareosFdPluginBaseclass:
''' Bareos python plugin base class '''
def __init__(self, context, plugindef):
DebugMessage(context, 100, "Constructor called in module " + __name__ + "\n");
events = [];
events.append(bEventType['bEventJobEnd']);
events.append(bEventType['bEventEndBackupJob']);
events.append(bEventType['bEventEndFileSet']);
events.append(bEventType['bEventHandleBackupFile']);
RegisterEvents(context, events);
# get some static Bareos values
self.fdname = GetValue(context, bVariable['bVarFDName']);
self.jobId = GetValue(context, bVariable['bVarJobId']);
self.client = GetValue(context, bVariable['bVarClient']);
self.level = GetValue(context, bVariable['bVarLevel']);
self.jobName = GetValue(context, bVariable['bVarJobName']);
self.workingdir = GetValue(context, bVariable['bVarWorkingDir']);
DebugMessage(context, 100, "FDName = " + self.fdname + " - BareosFdPluginBaseclass\n");
DebugMessage(context, 100, "WorkingDir = " + self.workingdir + " jobId: " + str(self.jobId) + "\n");
def parse_plugin_definition(self,context, plugindef):
DebugMessage(context, 100, "plugin def parser called with " + plugindef + "\n");
# Parse plugin options into a dict
self.options = dict();
plugin_options = plugindef.split(":");
for current_option in plugin_options:
key,sep,val = current_option.partition("=");
DebugMessage(context, 100, "key:val: " + key + ':' + val + "\n");
if val == '':
continue;
else:
self.options[key] = val;
# you should overload this method with your own and do option checking here, return bRCs['bRC_Error'], if options are not ok
# or better call super.parse_plugin_definition in your own class and make sanity check on self.options afterwards
return bRCs['bRC_OK'];
def plugin_io(self, context, IOP):
DebugMessage(context, 100, "plugin_io called with " + str(IOP) + "\n");
FNAME = IOP.fname;
if IOP.func == bIOPS['IO_OPEN']:
try:
if IOP.flags & (O_CREAT | O_WRONLY):
self.file = open(FNAME, 'wb');
else:
self.file = open(FNAME, 'rb');
except:
IOP.status = -1;
return bRCs['bRC_Error'];
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_CLOSE']:
self.file.close();
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_SEEK']:
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_READ']:
IOP.buf = bytearray(IOP.count);
IOP.status = self.file.readinto(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_WRITE']:
IOP.status = self.file.write(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
def handle_plugin_event(self, context, event):
if event == bEventType['bEventJobEnd']:
DebugMessage(context, 100, "handle_plugin_event called with bEventJobEnd\n");
elif event == bEventType['bEventEndBackupJob']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndBackupJob\n");
elif event == bEventType['bEventEndFileSet']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndFileSet\n");
else:
DebugMessage(context, 100, "handle_plugin_event called with event" + str(event) + "\n");
return bRCs['bRC_OK'];
def start_backup_file(self,context, savepkt):
DebugMessage(context, 100, "start_backup called\n");
# Base method, we do not add anything, overload this method with your implementation to add files to backup fileset
return bRCs['bRC_Skip'];
def end_backup_file(self, context):
DebugMessage(context, 100, "end_backup_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def start_restore_file(self, context, cmd):
DebugMessage(context, 100, "start_restore_file() entry point in Python called with" + str(cmd) + "\n")
return bRCs['bRC_OK'];
def end_restore_file(self,context):
DebugMessage(context, 100, "end_restore_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def restore_object_data(self, context, ROP):
DebugMessage(context, 100, "restore_object_data called with " + str(ROP) + "\n");
return bRCs['bRC_OK'];
def create_file(self,context, restorepkt):
DebugMessage(context, 100, "create_file() entry point in Python called with" + str(restorepkt) + "\n")
restorepkt.create_status = bCFs['CF_EXTRACT'];
return bRCs['bRC_OK'];
def check_file(self,context, fname):
DebugMessage(context, 100, "check_file() entry point in Python called with" + str(fname) + "\n")
return bRCs['bRC_OK'];
def handle_backup_file(self,context, savepkt):
DebugMessage(context, 100, "handle_backup_file called with " + str(savepkt) + "\n");
return bRCs['bRC_OK'];
# vim: ts=4 tabstop=4 expandtab shiftwidth=4 softtabstop=4
| agpl-3.0 | -9,126,865,445,505,169,000 | 40.271429 | 132 | 0.607823 | false | 3.560074 | false | false | false |
juanka1331/VAN-applied-to-Nifti-images | final_scripts/tests_over_3dmask_generator.py | 1 | 1589 | import sys
import os
from lib.data_loader import utils_mask3d
sys.path.append(os.path.dirname(os.getcwd()))
from lib.utils import output_utils
from lib.data_loader import mri_atlas
from lib.data_loader import pet_atlas
from lib.data_loader import PET_stack_NORAD
from lib.data_loader import MRI_stack_NORAD
from lib.utils.os_aux import create_directories
import settings
region = 75
#images = "MRI"
images = "PET"
path_folder3D = os.path.join(settings.path_to_project, "folder3D")
path_folder_masks3d = os.path.join(path_folder3D, "masks3D")
path_mask = os.path.join(
path_folder_masks3d, "{1}_region:{0}".format(region, images))
create_directories([path_folder3D, path_folder_masks3d])
atlas = None
reshape_kind = None
colour_kind = None
stack_dict = None
if images == "MRI":
stack_dict = MRI_stack_NORAD.get_gm_stack()
reshape_kind = "A"
colour_kind = "Greys"
atlas = mri_atlas.load_atlas_mri()
elif images == "PET":
stack_dict = PET_stack_NORAD.get_full_stack()
reshape_kind = "F"
colour_kind = "jet"
total_size = stack_dict['total_size']
imgsize = stack_dict['imgsize']
voxels_index = stack_dict['voxel_index']
map_region_voxels = atlas[region] # index refered to nbground voxels
no_bg_region_voxels_index = voxels_index[map_region_voxels]
mask3d = utils_mask3d.generate_region_3dmaskatlas(
no_bg_region_voxels_index=no_bg_region_voxels_index,
reshape_kind=reshape_kind,
imgsize=imgsize,
totalsize=total_size)
output_utils.from_3d_image_to_nifti_file(
path_to_save=path_mask,
image3d=mask3d)
| gpl-2.0 | 1,206,515,291,192,381,200 | 27.375 | 69 | 0.713027 | false | 2.73494 | false | true | false |
stevenwudi/Kernelized_Correlation_Filter | CNN_training.py | 1 | 3640 | import numpy as np
from keras.optimizers import SGD
from models.CNN_CIFAR import cnn_cifar_batchnormalisation, cnn_cifar_small, cnn_cifar_nodropout, \
cnn_cifar_small_batchnormalisation
from models.DataLoader import DataLoader
from scripts.progress_bar import printProgress
from time import time, localtime
# this is a predefined dataloader
loader = DataLoader(batch_size=32)
# construct the model here (pre-defined model)
model = cnn_cifar_small_batchnormalisation(loader.image_shape)
print(model.name)
nb_epoch = 200
early_stopping = True
early_stopping_count = 0
early_stopping_wait = 3
train_loss = []
valid_loss = []
learning_rate = [0.0001, 0.001, 0.01]
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=learning_rate[-1], decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
# load validation data from the h5py file (heavy lifting here)
x_valid, y_valid = loader.get_valid()
best_valid = np.inf
for e in range(nb_epoch):
print("epoch %d" % e)
loss_list = []
time_list = []
time_start = time()
for i in range(loader.n_iter_train):
time_start_batch = time()
X_batch, Y_batch = loader.next_train_batch()
loss_list.append(model.train_on_batch(X_batch, Y_batch))
# calculate some time information
time_list.append(time() - time_start_batch)
eta = (loader.n_iter_train - i) * np.array(time_list).mean()
printProgress(i, loader.n_iter_train-1, prefix='Progress:', suffix='batch error: %0.5f, ETA: %0.2f sec.'%(np.array(loss_list).mean(), eta), barLength=50)
printProgress(i, loader.n_iter_train - 1, prefix='Progress:', suffix='batch error: %0.5f' % (np.array(loss_list).mean()), barLength=50)
train_loss.append(np.asarray(loss_list).mean())
print('training loss is %f, one epoch uses: %0.2f sec' % (train_loss[-1], time() - time_start))
valid_loss.append(model.evaluate(x_valid, y_valid))
print('valid loss is %f' % valid_loss[-1])
if best_valid > valid_loss[-1]:
early_stopping_count = 0
print('saving best valid result...')
best_valid = valid_loss[-1]
model.save('./models/CNN_Model_OBT100_multi_cnn_best_valid_'+model.name+'.h5')
else:
# we wait for early stopping loop until a certain time
early_stopping_count += 1
if early_stopping_count > early_stopping_wait:
early_stopping_count = 0
if len(learning_rate) > 1:
learning_rate.pop()
print('decreasing the learning rate to: %f'%learning_rate[-1])
model.optimizer.lr.set_value(learning_rate[-1])
else:
break
lt = localtime()
lt_str = str(lt.tm_year)+"."+str(lt.tm_mon).zfill(2)+"." \
+str(lt.tm_mday).zfill(2)+"."+str(lt.tm_hour).zfill(2)+"."\
+str(lt.tm_min).zfill(2)+"."+str(lt.tm_sec).zfill(2)
np.savetxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt', train_loss)
np.savetxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt', valid_loss)
model.save('./models/CNN_Model_OBT100_multi_cnn_'+model.name+'_final.h5')
print("done")
#### we show some visualisation here
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
train_loss = np.loadtxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt')
valid_loss = np.loadtxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt')
plt.plot(train_loss, 'b')
plt.plot(valid_loss, 'r')
blue_label = mpatches.Patch(color='blue', label='train_loss')
red_label = mpatches.Patch(color='red', label='valid_loss')
plt.legend(handles=[blue_label, red_label])
| gpl-3.0 | -7,064,727,878,876,511,000 | 39.898876 | 161 | 0.657143 | false | 3.033333 | false | false | false |
Cubitect/ASMModSuit | ASMVillageMarker.py | 1 | 5318 | import SRenderLib
from asmutils import *
def create_mod(util):
print '\nSearching for mappings for ASMVillageMarker...'
SRenderLib.setup_lib(util)
lines = util.readj('World')
pos = findOps(lines,0,[['.field','protected',';'],['.field','protected','Z'],['.field','protected',';'],['.field','protected',';']])
util.setmap('VillageCollection',betweenr(lines[pos],'L',';'))
util.setmap('World.villageCollectionObj',endw(lines[pos],2))
pos = findOps(lines,pos+1,[['.method','public','()L'+util.getmap('VillageCollection')]])
if pos is not None:
util.setmap('World.getVillageCollection',endw(lines[pos],3))
lines = util.readj('VillageCollection')
pos = findOps(lines,0,[['.method','public','()Ljava/util/List']])
util.setmap('VillageCollection.getVillageList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('Village',betweenr(lines[pos],')L',';'))
lines = util.readj('Village')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('Village.getCenter',endw(lines[pos],3))
util.setmap('BlockPos',betweenr(lines[pos],')L',';'))
pos = findOps(lines,pos+1,[['.method','public','()I']])
util.setmap('Village.getVillageRadius',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public','()Ljava/util/List']])
util.setmap('Village.getVillageDoorInfoList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('VillageDoorInfo',betweenr(lines[pos],')L',';'))
lines = util.readj('VillageDoorInfo')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('VillageDoorInfo.getDoorBlockPos',endw(lines[pos],3))
lines = util.readj('BlockPos')
pos = findOps(lines,0,[['.super']])
util.setmap('Vec3i',endw(lines[pos],1))
lines = util.readj('Vec3i')
pos = findOps(lines,0, [['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getX',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getY',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getZ',endw(lines[pos-1],3))
print 'Applying ASMVillageMarker patch...'
util.setmap('ASMVillageMarker','villagemarker/ASMVillageMarker')
lines = util.readt('ASMVillageMarker')
lines = '\1'.join(lines)
lines = lines.replace('net/minecraft/server/integrated/IntegratedServer', util.getmap('IntegratedServer'))
lines = lines.replace('net/minecraft/client/entity/EntityPlayerSP', util.getmap('EntityPlayerSP'))
lines = lines.replace('net/minecraft/client/Minecraft', util.getmap('Minecraft'))
lines = lines.replace('net/minecraft/world/WorldServer', util.getmap('WorldServer'))
lines = lines.replace('net/minecraft/util/math/BlockPos', util.getmap('BlockPos'))
lines = lines.replace('net/minecraft/village/VillageCollection', util.getmap('VillageCollection'))
lines = lines.replace('net/minecraft/village/VillageDoorInfo', util.getmap('VillageDoorInfo'))
lines = lines.replace('net/minecraft/village/Village', util.getmap('Village'))
lines = lines.replace('thePlayer', util.getmap('Minecraft.thePlayer'))
lines = lines.replace('dimension', util.getmap('Entity.dimension'))
lines = lines.replace('isSingleplayer', util.getmap('Minecraft.isSingleplayer'))
lines = lines.replace('worldServerForDimension', util.getmap('MinecraftServer.worldServerForDimension'))
lines = lines.replace('getVillageDoorInfoList', util.getmap('Village.getVillageDoorInfoList'))
lines = lines.replace('getVillageCollection', util.getmap('World.getVillageCollection'))
lines = lines.replace('getVillageRadius', util.getmap('Village.getVillageRadius'))
lines = lines.replace('getVillageList', util.getmap('VillageCollection.getVillageList'))
lines = lines.replace('getDoorBlockPos', util.getmap('VillageDoorInfo.getDoorBlockPos'))
lines = lines.replace('getIntegratedServer', util.getmap('Minecraft.getIntegratedServer'))
lines = lines.replace('getMinecraft', util.getmap('Minecraft.getMinecraft'))
lines = lines.replace('getCenter', util.getmap('Village.getCenter'))
lines = lines.replace('getX', util.getmap('Vec3i.getX'))
lines = lines.replace('getY', util.getmap('Vec3i.getY'))
lines = lines.replace('getZ', util.getmap('Vec3i.getZ'))
lines = lines.split('\1')
util.write2mod('ASMVillageMarker',lines)
print 'Injecting render call...'
lines = util.readj('EntityRenderer')
pos = 0
while True:
pos = findOps(lines,pos+1,[['ldc','culling']])
if pos is None:
break
pos = findOps(lines,pos+1,[['dload'],['dload'],['dload']])
playerX = endw(lines[pos-2],1)
playerY = endw(lines[pos-1],1)
playerZ = endw(lines[pos ],1)
pos = findOps(lines,pos+1,[['ldc','aboveClouds']])
pos = goBackTo(lines,pos,['invokevirtual'])
lines.insert(pos+1,'dload '+playerX+'\n')
lines.insert(pos+2,'dload '+playerY+'\n')
lines.insert(pos+3,'dload '+playerZ+'\n')
lines.insert(pos+4,'invokestatic Method '+util.getmap('ASMVillageMarker')+' render (DDD)V\n')
util.write2mod('EntityRenderer',lines)
| gpl-3.0 | 2,978,141,941,346,353,700 | 52.717172 | 136 | 0.668672 | false | 3.097263 | false | false | false |
shadowmint/nwidget | lib/cocos2d-0.5.5/test/test_menu_items.py | 1 | 2268 | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, q"
tags = "menu items, ToggleMenuItem, MultipleMenuItem, MenuItem, EntryMenuItem, ImageMenuItem, ColorMenuItem"
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from operator import setslice
def printf(*args):
sys.stdout.write(''.join([str(x) for x in args])+'\n')
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("Test Menu Items")
# then add the items
item1= ToggleMenuItem('ToggleMenuItem: ', self.on_toggle_callback, True )
resolutions = ['320x200','640x480','800x600', '1024x768', '1200x1024']
item2= MultipleMenuItem('MultipleMenuItem: ',
self.on_multiple_callback,
resolutions)
item3 = MenuItem('MenuItem', self.on_callback )
item4 = EntryMenuItem('EntryMenuItem:', self.on_entry_callback, 'value',
max_length=8)
item5 = ImageMenuItem('imagemenuitem.png', self.on_image_callback)
colors = [(255, 255, 255), (129, 255, 100), (50, 50, 100), (255, 200, 150)]
item6 = ColorMenuItem('ColorMenuItem:', self.on_color_callback, colors)
self.create_menu( [item1,item2,item3,item4,item5,item6] )
def on_quit( self ):
pyglet.app.exit()
def on_multiple_callback(self, idx ):
print 'multiple item callback', idx
def on_toggle_callback(self, b ):
print 'toggle item callback', b
def on_callback(self ):
print 'item callback'
def on_entry_callback (self, value):
print 'entry item callback', value
def on_image_callback (self):
print 'image item callback'
def on_color_callback(self, value):
print 'color item callback:', value
def main():
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
if __name__ == '__main__':
main()
| apache-2.0 | 510,545,519,956,225,660 | 28.648649 | 108 | 0.609788 | false | 3.705882 | false | false | false |
jpwhite3/python-whirlwind-tour | examples/lab4.py | 1 | 1539 | from __future__ import print_function
import sys
import re
import glob
import argparse
def eprint(*args, **kwargs):
# Print to STDERR instead of STDOUT
print(*args, file=sys.stderr, **kwargs)
def grep(expression, filepath, ignorecase=False, invert=False):
raw_expression = re.escape(expression)
with open(filepath) as file:
for line in file:
# Enable case matching?
if ignorecase:
matches = re.search(raw_expression, line, re.I)
else:
matches = re.search(raw_expression, line)
# Invert matches if need be and print
if matches and not invert:
print(line)
elif invert and not matches:
print(line)
def main():
parser = argparse.ArgumentParser(description='This is a pure Python based clone of the GREP command')
parser.add_argument('expression', action="store", type=str, help="Regular expression to match against")
parser.add_argument('filepath', action="store", type=str, help="Path to file to search in. supports wildcard globs")
parser.add_argument('-i', action="store_true", default=False, dest="ignorecase", help="Ignore case")
parser.add_argument('-v', action="store_true", default=False, dest="invert", help="Show lines that don't match")
args = parser.parse_args()
file_list = glob.glob(args.filepath)
for f in file_list:
if len(file_list) > 1:
eprint("\nResults for file: %s" % f)
eprint("-"*(len(f)+18))
grep(args.expression, f, ignorecase=args.ignorecase, invert=args.invert)
if __name__ == '__main__':
main()
| cc0-1.0 | -776,733,871,353,268,600 | 30.744681 | 117 | 0.680962 | false | 3.367615 | false | false | false |
bcarr092/pyCovertAudio | src/pyCovertAudio/BFSKModulator.py | 1 | 2146 | from pyCovertAudio_lib import *
from BaseModulator import BaseModulator
from SignalFunctions import SignalFunctions
class BFSKModulator(BaseModulator):
def __init__(
self, bitsPerSymbol, sampleRate, samplesPerSymbol,
symbolExpansionFactor, separationIntervals, configuration
):
BaseModulator.__init__(
self,
bitsPerSymbol,
sampleRate,
samplesPerSymbol,
symbolExpansionFactor,
separationIntervals,
configuration
)
(
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth
) = \
python_BFSK_determine_frequencies(
self.samplesPerSymbol,
self.sampleRate,
self.carrierFrequency,
self.separationIntervals
)
def modulate(self, symbolSequence, signal, sentinel=None):
symbolSignalLength = self.samplesPerSymbol * self.symbolExpansionFactor
for symbol in symbolSequence:
symbolFrequency = self.carrierFrequency
if(symbol == 1):
symbolFrequency += self.symbol1Frequency
else:
symbolFrequency += self.symbol0Frequency
x = \
SignalFunctions.modulateFSK(
symbolSignalLength, self.sampleRate, [symbolFrequency]
)
signal.extend(x[: self.samplesPerSymbol])
signal.extend(
[0.0 for i in range(
(self.symbolExpansionFactor - 1) * self.samplesPerSymbol)]
)
def toString(self):
return (
"Modulator:\n\tAlgorithm:\t\t\tBFSK\n\tSymbol 0 frequency:\t\t"
"%.02f\n\tSymbol 1 frequency:\t\t%.02f\n\tMin frequency"
" separation:\t%.02f\n\tBandwidth:\t\t\t%.02f\n%s"
% (
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth,
BaseModulator.toString(self)
)
)
| apache-2.0 | -7,536,725,190,212,510,000 | 29.225352 | 79 | 0.55685 | false | 4.615054 | false | false | false |
dhhagan/PAM | Python/PAM.py | 1 | 5037 | #PAM.py
import re
import glob, os, time
from numpy import *
from pylab import *
def analyzeFile(fileName,delim):
cols = {}
indexToName = {}
lineNum = 0
goodLines = 0
shortLines = 0
FILE = open(fileName,'r')
for line in FILE:
line = line.strip()
if lineNum < 1:
lineNum += 1
continue
elif lineNum == 1:
headings = line.split(delim)
i = 0
for heading in headings:
heading = heading.strip()
cols[heading] = []
indexToName[i] = heading
i += 1
lineNum += 1
lineLength = len(cols)
else:
data = line.split(delim)
if len(data) == lineLength:
goodLines += 1
i = 0
for point in data:
point = point.strip()
cols[indexToName[i]] += [point]
i += 1
lineNum += 1
else:
shortLines += 1
lineNum += 1
continue
FILE.close
return cols, indexToName, lineNum, shortLines
def numericalSort(value):
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def popDate(fileName):
run = fileName.split('.')[0]
runNo = run.split('_')[-1]
return runNo
def getFile(date,regex):#Works
files = []
files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False)
if date.lower() == 'last':
files = files.pop()
else:
files = [item for item in files if re.search(date,item)]
return files
def plotConc(data,ozone,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
#time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
legend1 = []
legend2 = []
fig = plt.figure('Gas Concentration Readings for East St.Louis')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key, value in ozone.items():
ax2.plot_date(x,ozone[key],'-.',xdate=True)
legend2.append(key)
title('Gas Concentrations for East St. Louis', fontsize = 12)
ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
xlabel(r"$Time \, Stamp$", fontsize = 12)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
grid(True)
return
def plotBankRelays(data,relays,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
#x1 = [date.strftime("%m-%d %H:%M:%S") for date in time]
legend1 = []
legend2 = []
#plt.locator_params(axis='x', nbins=4)
fig = plt.figure('VAPS Thermocouple Readings: Chart 2')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key,value in relays.items():
ax2.plot_date(x,relays[key],'--',xdate=True)
legend2.append(key)
title('VAPS Temperatures: Chart 2', fontsize = 12)
ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12)
ax2.set_ylabel(r'$Relay \, States$', fontsize = 12)
ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12)
#print [num2date(item) for item in ax1.get_xticks()]
#ax1.set_xticks(x)
#ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time])
#ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
#ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S")))
plt.subplots_adjust(bottom=0.15)
grid(True)
return
def goodFiles(files,goodHeaders,delim): # Good
irregFiles = 0
goodFiles = []
for file in files:
lineNo = 0
falseCount = 0
FILE = open(file,'r')
for line in FILE:
line = line.strip()
if lineNo == 5:
# Check all the headings to make sure the file is good
head = line.split(delim)
for item in head:
if item in goodHeaders:
continue
else:
falseCount += 1
if falseCount == 0:
goodFiles.append(file)
else:
irregFiles += 1
lineNo += 1
else:
lineNo += 1
continue
FILE.close
return goodFiles, irregFiles
| mit | -279,067,604,541,142,340 | 27.297753 | 97 | 0.561842 | false | 3.237147 | false | false | false |
ypid/series60-remote | pc/lib/log.py | 1 | 1490 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2009 Lukas Hetzenecker <[email protected]>
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import logging
class QtStreamHandler(logging.Handler):
def __init__(self, parent, main):
logging.Handler.__init__(self)
self.parent = parent
self.main = main
self.textWidget = parent
self.formater = logging.Formatter("%(message)s")
def setFormatter(self, format):
self.formater = format
def createLock(self):
self.mutex = QMutex()
def acquire(self):
self.mutex.lock()
def release(self):
self.mutex.unlock()
def emit(self,record):
self.textWidget.appendPlainText(self.formater.format(record))
self.textWidget.moveCursor(QTextCursor.StartOfLine)
self.textWidget.ensureCursorVisible()
class QtOutput(object):
def __init__(self, parent, out=None, color=None):
self.textWidget = parent
self.out = out
self.color = color
def write(self, m):
self.textWidget.moveCursor(QTextCursor.End)
if self.color:
tc = self.textWidget.textColor()
self.textWidget.setTextColor(self.color)
self.textWidget.insertPlainText( m )
if self.color:
self.textWidget.setTextColor(tc)
if self.out:
if isinstance(m, unicode):
self.out.write(m.encode("utf8"))
else:
self.out.write(m)
| gpl-2.0 | 9,074,983,590,830,688,000 | 25.140351 | 69 | 0.606711 | false | 3.706468 | false | false | false |
virt-who/virt-who | virtwho/manager/subscriptionmanager/subscriptionmanager.py | 1 | 16260 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
Module for communication with subscription-manager, part of virt-who
Copyright (C) 2011 Radek Novacek <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import os
import json
from six.moves.http_client import BadStatusLine
from six import string_types
import rhsm.connection as rhsm_connection
import rhsm.certificate as rhsm_certificate
import rhsm.config as rhsm_config
from virtwho.config import NotSetSentinel
from virtwho.manager import Manager, ManagerError, ManagerFatalError, ManagerThrottleError
from virtwho.virt import AbstractVirtReport
from virtwho.util import generate_correlation_id
class SubscriptionManagerError(ManagerError):
pass
class SubscriptionManagerUnregisteredError(ManagerFatalError):
pass
# Mapping between strings returned from getJob and report statuses
STATE_MAPPING = {
'FINISHED': AbstractVirtReport.STATE_FINISHED,
'CANCELED': AbstractVirtReport.STATE_CANCELED,
'FAILED': AbstractVirtReport.STATE_FAILED,
'RUNNING': AbstractVirtReport.STATE_PROCESSING,
'WAITING': AbstractVirtReport.STATE_PROCESSING,
'CREATED': AbstractVirtReport.STATE_PROCESSING,
}
class NamedOptions(object):
"""
Object used for compatibility with RHSM
"""
pass
class SubscriptionManager(Manager):
sm_type = "sam"
""" Class for interacting subscription-manager. """
def __init__(self, logger, options):
self.logger = logger
self.options = options
self.cert_uuid = None
self.rhsm_config = None
self.cert_file = None
self.key_file = None
self.readConfig()
self.connection = None
self.correlation_id = generate_correlation_id()
def readConfig(self):
""" Parse rhsm.conf in order to obtain consumer
certificate and key paths. """
self.rhsm_config = rhsm_config.initConfig(
rhsm_config.DEFAULT_CONFIG_PATH)
consumer_cert_dir = self.rhsm_config.get("rhsm", "consumerCertDir")
cert = 'cert.pem'
key = 'key.pem'
self.cert_file = os.path.join(consumer_cert_dir, cert)
self.key_file = os.path.join(consumer_cert_dir, key)
def _check_owner_lib(self, kwargs, config):
"""
Try to check values of env and owner. These values has to be
equal to values obtained from Satellite server.
:param kwargs: dictionary possibly containing valid username and
password used for connection to rhsm
:param config: Configuration of virt-who
:return: None
"""
if config is None:
return
# Check 'owner' and 'env' only in situation, when these values
# are set and rhsm_username and rhsm_password are not set
if 'username' not in kwargs and 'password' not in kwargs and \
'owner' in config.keys() and 'env' in config.keys():
pass
else:
return
uuid = self.uuid()
consumer = self.connection.getConsumer(uuid)
if 'environment' in consumer:
environment = consumer['environment']
else:
return
if environment:
environment_name = environment['name']
owner = self.connection.getOwner(uuid)
owner_id = owner['key']
if config['owner'] != owner_id:
raise ManagerError(
"Cannot send data to: %s, because owner from configuration: %s is different" %
(owner_id, config['owner'])
)
if config['env'] != environment_name:
raise ManagerError(
"Cannot send data to: %s, because Satellite env: %s differs from configuration: %s" %
(owner_id, environment_name, config['env'])
)
def _connect(self, config=None):
""" Connect to the subscription-manager. """
kwargs = {
'host': self.rhsm_config.get('server', 'hostname'),
'ssl_port': int(self.rhsm_config.get('server', 'port')),
'handler': self.rhsm_config.get('server', 'prefix'),
'proxy_hostname': self.rhsm_config.get('server', 'proxy_hostname'),
'proxy_port': self.rhsm_config.get('server', 'proxy_port'),
'proxy_user': self.rhsm_config.get('server', 'proxy_user'),
'proxy_password': self.rhsm_config.get('server', 'proxy_password'),
'insecure': self.rhsm_config.get('server', 'insecure')
}
kwargs_to_config = {
'host': 'rhsm_hostname',
'ssl_port': 'rhsm_port',
'handler': 'rhsm_prefix',
'proxy_hostname': 'rhsm_proxy_hostname',
'proxy_port': 'rhsm_proxy_port',
'proxy_user': 'rhsm_proxy_user',
'proxy_password': 'rhsm_proxy_password',
'insecure': 'rhsm_insecure'
}
rhsm_username = None
rhsm_password = None
if config:
try:
rhsm_username = config['rhsm_username']
rhsm_password = config['rhsm_password']
except KeyError:
pass
if rhsm_username == NotSetSentinel:
rhsm_username = None
if rhsm_password == NotSetSentinel:
rhsm_password = None
# Testing for None is necessary, it might be an empty string
for key, value in kwargs.items():
try:
from_config = config[kwargs_to_config[key]]
if from_config is not NotSetSentinel and from_config is \
not None:
if key is 'ssl_port':
from_config = int(from_config)
kwargs[key] = from_config
except KeyError:
continue
if rhsm_username and rhsm_password:
self.logger.debug("Authenticating with RHSM username %s", rhsm_username)
kwargs['username'] = rhsm_username
kwargs['password'] = rhsm_password
else:
self.logger.debug("Authenticating with certificate: %s", self.cert_file)
if not os.access(self.cert_file, os.R_OK):
raise SubscriptionManagerUnregisteredError(
"Unable to read certificate, system is not registered or you are not root")
kwargs['cert_file'] = self.cert_file
kwargs['key_file'] = self.key_file
self.logger.info("X-Correlation-ID: %s", self.correlation_id)
if self.correlation_id:
kwargs['correlation_id'] = self.correlation_id
self.connection = rhsm_connection.UEPConnection(**kwargs)
try:
if not self.connection.ping()['result']:
raise SubscriptionManagerError(
"Unable to obtain status from server, UEPConnection is likely not usable."
)
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
self._check_owner_lib(kwargs, config)
return self.connection
def sendVirtGuests(self, report, options=None):
"""
Update consumer facts with info about virtual guests.
`guests` is a list of `Guest` instances (or it children).
"""
guests = report.guests
self._connect()
# Sort the list
guests.sort(key=lambda item: item.uuid)
serialized_guests = [guest.toDict() for guest in guests]
self.logger.info('Sending update in guests lists for config '
'"%s": %d guests found',
report.config.name, len(guests))
self.logger.debug("Domain info: %s", json.dumps(serialized_guests, indent=4))
# Send list of guest uuids to the server
try:
self.connection.updateConsumer(self.uuid(), guest_uuids=serialized_guests, hypervisor_id=report.hypervisor_id)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
report.state = AbstractVirtReport.STATE_FINISHED
def hypervisorCheckIn(self, report, options=None):
""" Send hosts to guests mapping to subscription manager. """
connection = self._connect(report.config)
is_async = self._is_rhsm_server_async(report, connection)
serialized_mapping = self._hypervisor_mapping(report, is_async, connection)
self.logger.debug("Host-to-guest mapping being sent to '{owner}': {mapping}".format(
owner=report.config['owner'],
mapping=json.dumps(serialized_mapping, indent=4)))
# All subclasses of ConfigSection use dictionary like notation,
# but RHSM uses attribute like notation
if options:
named_options = NamedOptions()
for key, value in options['global'].items():
setattr(named_options, key, value)
else:
named_options = None
try:
try:
result = self.connection.hypervisorCheckIn(
report.config['owner'],
report.config['env'],
serialized_mapping,
options=named_options) # pylint:disable=unexpected-keyword-arg
except TypeError:
# This is temporary workaround until the options parameter gets implemented
# in python-rhsm
self.logger.debug(
"hypervisorCheckIn method in python-rhsm doesn't understand options parameter, ignoring"
)
result = self.connection.hypervisorCheckIn(report.config['owner'], report.config['env'], serialized_mapping)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
if is_async is True:
report.state = AbstractVirtReport.STATE_CREATED
report.job_id = result['id']
else:
report.state = AbstractVirtReport.STATE_FINISHED
return result
def _is_rhsm_server_async(self, report, connection=None):
"""
Check if server has capability 'hypervisor_async'.
"""
if connection is None:
self._connect(report.config)
self.logger.debug("Checking if server has capability 'hypervisor_async'")
is_async = hasattr(self.connection, 'has_capability') and self.connection.has_capability('hypervisors_async')
if is_async:
self.logger.debug("Server has capability 'hypervisors_async'")
else:
self.logger.debug("Server does not have 'hypervisors_async' capability")
return is_async
def _hypervisor_mapping(self, report, is_async, connection=None):
"""
Return mapping of hypervisor
"""
if connection is None:
self._connect(report.config)
mapping = report.association
serialized_mapping = {}
ids_seen = []
if is_async:
hosts = []
# Transform the mapping into the async version
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
hosts.append(hypervisor.toDict())
ids_seen.append(hypervisor.hypervisorId)
serialized_mapping = {'hypervisors': hosts}
else:
# Reformat the data from the mapping to make it fit with
# the old api.
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
guests = [g.toDict() for g in hypervisor.guestIds]
serialized_mapping[hypervisor.hypervisorId] = guests
ids_seen.append(hypervisor.hypervisorId)
return serialized_mapping
def check_report_state(self, report):
# BZ 1554228
job_id = str(report.job_id)
self._connect(report.config)
self.logger.debug('Checking status of job %s', job_id)
try:
result = self.connection.getJob(job_id)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
state = STATE_MAPPING.get(result['state'], AbstractVirtReport.STATE_FAILED)
report.state = state
if state not in (AbstractVirtReport.STATE_FINISHED,
AbstractVirtReport.STATE_CANCELED,
AbstractVirtReport.STATE_FAILED):
self.logger.debug('Job %s not finished', job_id)
else:
# log completed job status
result_data = result.get('resultData', {})
if not result_data:
self.logger.warning("Job status report without resultData: %s", result)
return
if isinstance(result_data, string_types):
self.logger.warning("Job status report encountered the following error: %s", result_data)
return
for fail in result_data.get('failedUpdate', []):
self.logger.error("Error during update list of guests: %s", str(fail))
self.logger.debug("Number of mappings unchanged: %d", len(result_data.get('unchanged', [])))
self.logger.info("Mapping for config \"%s\" updated", report.config.name)
def uuid(self):
""" Read consumer certificate and get consumer UUID from it. """
if not self.cert_uuid:
try:
certificate = rhsm_certificate.create_from_file(self.cert_file)
self.cert_uuid = certificate.subject["CN"]
except Exception as e:
raise SubscriptionManagerError("Unable to open certificate %s (%s):" % (self.cert_file, str(e)))
return self.cert_uuid
| gpl-2.0 | 2,120,471,579,209,106,700 | 40.692308 | 124 | 0.610701 | false | 4.460905 | true | false | false |
kata198/usrsvc | usrsvcmod/Monitoring/ActivityFile.py | 1 | 3670 | '''
Copyright (c) 2016 Tim Savannah All Rights Reserved.
This software is licensed under the terms of the GPLv3.
This may change at my discretion, retroactively, and without notice.
You should have received a copy of this with the source distribution as a file titled, LICENSE.
The most current license can be found at:
https://github.com/kata198/usrsvc/LICENSE
This location may need to be changed at some point in the future, in which case
you are may email Tim Savannah <kata198 at gmail dot com>, or find them on the
current website intended for distribution of usrsvc.
ActivityFileMonitor - Asserts that a specific file or directory should be modified within a certain threshold
'''
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
import os
import time
from func_timeout import FunctionTimedOut
from . import MonitoringBase
from ..logging import logMsg, logErr
# TODO: We need to implement the check here as launching and joining on a thread, so that we don't lockup all monitoring if someone
# uses an NFS file on a disconnected device or anything else that will result in an indefinite uninterruptable ("D") state.
class ActivityFileMonitor(MonitoringBase):
'''
ActivityFileMonitor - Class for doing activity file monitoring
'''
def __init__(self, programName, activityFile, activityFileLimit):
MonitoringBase.__init__(self)
self.programName = programName
self.activityFile = activityFile
self.activityFileLimit = activityFileLimit
@classmethod
def createFromConfig(cls, programConfig):
if not programConfig.Monitoring.activityfile:
return None
return cls(programConfig.name, programConfig.Monitoring.activityfile, programConfig.Monitoring.activityfile_limit)
def shouldRestart(self, program=None):
'''
Returns True if activity file has not been modified within the threshold specified by activityfile_limit (should restart), otherwise False.
@param program - unused.
'''
activityFile = self.activityFile
activityFileLimit = self.activityFileLimit
programName = self.programName
if not activityFile:
# Yes this is checked twice if created through createFromConfig, but it may be called otherwise so better safe.
return False
try:
# If activity file is not present, this is a fail and we restart.
if not os.path.exists(activityFile):
self.setReason('Restarting %s because activity file ( %s ) does not exist\n' %(programName, activityFile,))
return True
# Gather the mtime and see if we are past the threshold
lastModified = os.stat(activityFile).st_mtime
now = time.time()
threshold = float(now - self.activityFileLimit)
if lastModified < threshold:
self.setReason('Restarting %s because it has not modified activity file ( %s ) in %.4f seconds. Limit is %d seconds.\n' %(programName, activityFile, float(now - lastModified), activityFileLimit) )
return True
except FunctionTimedOut:
logErr('MONITOR: ActivityFile timed out on %s\n' %(programName,))
raise
except Exception as e:
# If we got an exception, just log and try again next round.
logErr('Got an exception in activity file monitoring. Not restarting program. Program="%s" activityfile="%s"\nlocals: %s\n' %(programName, activityFile, str(locals())))
return False
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
| lgpl-2.1 | 5,772,787,185,983,751,000 | 40.704545 | 212 | 0.687193 | false | 4.453883 | true | false | false |
stefanoteso/musm-adt17 | musm/pc.py | 1 | 4018 | import numpy as np
import gurobipy as gurobi
from .problem import Problem
class PC(Problem):
_ATTRIBUTES = [
('cpu', 37),
('hd', 10),
('manufacturer', 8),
('ram', 10),
('monitor', 8),
('pctype', 3),
]
_ATTR_TO_COSTS = {
'pctype': [50, 0, 80],
'manufacturer': [100, 0, 100, 50, 0, 0, 50, 50],
'cpu' : [
1.4*100, 1.4*130, 1.1*70, 1.1*90, 1.2*80, 1.2*50, 1.2*60, 1.2*80,
1.2*90, 1.2*100, 1.2*110, 1.2*120, 1.2*130, 1.2*140, 1.2*170,
1.5*50, 1.5*60, 1.5*80, 1.5*90, 1.5*100, 1.5*110, 1.5*130, 1.5*150,
1.5*160, 1.5*170, 1.5*180, 1.5*220, 1.4*27, 1.4*30, 1.4*40, 1.4*45,
1.4*50, 1.4*55, 1.4*60, 1.4*70, 1.6*70, 1.6*73,
],
'monitor': [
0.6*100, 0.6*104, 0.6*120, 0.6*133, 0.6*140, 0.6*150, 0.6*170,
0.6*210
],
'ram': [
0.8*64, 0.8*128, 0.8*160, 0.8*192, 0.8*256, 0.8*320, 0.8*384,
0.8*512, 0.8*1024, 0.8*2048
],
'hd': [
4*8, 4*10, 4*12, 4*15, 4*20, 4*30, 4*40, 4*60, 4*80, 4*120
],
}
def __init__(self, **kwargs):
super().__init__(sum(attr[1] for attr in self._ATTRIBUTES))
self.cost_matrix = np.hstack([
np.array(self._ATTR_TO_COSTS[attr], dtype=float)
for attr, _ in self._ATTRIBUTES
]).reshape((1, -1)) / 2754.4
def _add_constraints(self, model, x):
base, offs = 0, {}
for attr, size in self._ATTRIBUTES:
offs[attr] = base
x_attr = [x[z] for z in range(base, base + size)]
model.addConstr(gurobi.quicksum(x_attr) == 1)
base += size
def implies(head, body):
# NOTE here we subtract 1 from head and body bits because the bit
# numbers in the constraints were computed starting from one, to
# work in MiniZinc, while Gurobi expects them to start from zero
head = 1 - x[head - 1]
body = gurobi.quicksum([x[i - 1] for i in body])
return model.addConstr(head + body >= 1)
# Manufacturer -> Type
implies(offs['manufacturer'] + 2, [offs['pctype'] + i for i in [1, 2]])
implies(offs['manufacturer'] + 4, [offs['pctype'] + 1])
implies(offs['manufacturer'] + 6, [offs['pctype'] + 2])
implies(offs['manufacturer'] + 7, [offs['pctype'] + i for i in [1, 3]])
# Manufacturer -> CPU
implies(offs['manufacturer'] + 1, [offs['cpu'] + i for i in range(28, 37+1)])
implies(offs['manufacturer'] + 2, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 7, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 4, [offs['cpu'] + i for i in range(5, 27+1)])
implies(offs['manufacturer'] + 3, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 5, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 8, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 6, [offs['cpu'] + i for i in range(16, 27+1)])
# Type -> RAM
implies(offs['pctype'] + 1, [offs['ram'] + i for i in range(1, 9+1)])
implies(offs['pctype'] + 2, [offs['ram'] + i for i in [2, 5, 8, 9]])
implies(offs['pctype'] + 3, [offs['ram'] + i for i in [5, 8, 9, 10]])
# Type -> HD
implies(offs['pctype'] + 1, [offs['hd'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['hd'] + i for i in range(5, 10+1)])
implies(offs['pctype'] + 3, [offs['hd'] + i for i in range(5, 10+1)])
# Type -> Monitor
implies(offs['pctype'] + 1, [offs['monitor'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['monitor'] + i for i in range(6, 8+1)])
implies(offs['pctype'] + 3, [offs['monitor'] + i for i in range(6, 8+1)])
| mit | 4,937,620,061,646,593,000 | 43.153846 | 112 | 0.498507 | false | 2.735194 | false | false | false |
hikelee/launcher | launcher/templatetags/helpers.py | 1 | 6201 | """
sentry.templatetags.sentry_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
import os.path
from collections import namedtuple
from datetime import timedelta
import pytz
import six
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils import timezone
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from six.moves import range
from six.moves.urllib.parse import quote
from launcher.utils.strings import soft_break as _soft_break,soft_hyphenate,to_unicode,truncatechars
SentryVersion=namedtuple('SentryVersion',[
'current',
'latest',
'update_available',
'build',
])
register=template.Library()
truncatechars=register.filter(stringfilter(truncatechars))
truncatechars.is_safe=True
@register.filter
def multiply(x,y):
def coerce(value):
if isinstance(value,(six.integer_types,float)):
return value
try:
return int(value)
except ValueError:
return float(value)
return coerce(x)*coerce(y)
@register.filter
def pprint(value,break_after=10):
"""
break_after is used to define how often a <span> is
inserted (for soft wrapping).
"""
value=to_unicode(value)
return mark_safe(
u'<span></span>'.
join([escape(value[i:(i+break_after)]) for i in range(0,len(value),break_after)])
)
@register.filter
def is_url(value):
if not isinstance(value,six.string_types):
return False
if not value.startswith(('http://','https://')):
return False
if ' ' in value:
return False
return True
# seriously Django?
@register.filter
def subtract(value,amount):
return int(value)-int(amount)
@register.filter
def absolute_value(value):
return abs(int(value) if isinstance(value,six.integer_types) else float(value))
@register.filter
def has_charts(group):
from launcher.utils.db import has_charts
if hasattr(group,'_state'):
db=group._state.db or 'default'
else:
db='default'
return has_charts(db)
@register.filter
def as_sorted(value):
return sorted(value)
@register.filter
def small_count(v,precision=1):
if not v:
return 0
z=[
(1000000000,_('b')),
(1000000,_('m')),
(1000,_('k')),
]
v=int(v)
for x,y in z:
o,p=divmod(v,x)
if o:
if len(six.text_type(o))>2 or not p:
return '%d%s'%(o,y)
return ('%.{}f%s'.format(precision))%(v/float(x),y)
return v
@register.filter
def num_digits(value):
return len(six.text_type(value))
@register.filter
def to_str(data):
return six.text_type(data)
@register.filter
def is_none(value):
return value is None
@register.filter
def timesince(value,now=None):
from django.template.defaultfilters import timesince
if now is None:
now=timezone.now()
if not value:
return _('never')
if value<(now-timedelta(days=5)):
return value.date()
value=(' '.join(timesince(value,now).split(' ')[0:2])).strip(',')
if value==_('0 minutes'):
return _('just now')
if value==_('1 day'):
return _('yesterday')
return value+_(' ago')
@register.filter
def duration(value):
if not value:
return '0s'
# value is assumed to be in ms
value=value/1000.0
hours,minutes,seconds=0,0,0
if value>3600:
hours=value/3600
value=value%3600
if value>60:
minutes=value/60
value=value%60
seconds=value
output=[]
if hours:
output.append('%dh'%hours)
if minutes:
output.append('%dm'%minutes)
if seconds>1:
output.append('%0.2fs'%seconds)
elif seconds:
output.append('%dms'%(seconds*1000))
return ''.join(output)
@register.filter
def date(dt,arg=None):
from django.template.defaultfilters import date
if not timezone.is_aware(dt):
dt=dt.replace(tzinfo=timezone.utc)
return date(dt,arg)
@register.filter
def trim_schema(value):
return value.split('//',1)[-1]
@register.filter
def with_metadata(group_list,request):
group_list=list(group_list)
if request.user.is_authenticated() and group_list:
project=group_list[0].project
bookmarks=set(
project.bookmark_set.filter(
user=request.user,
group__in=group_list,
).values_list('group_id',flat=True)
)
else:
bookmarks=set()
# TODO(dcramer): this is obsolete and needs to pull from the tsdb backend
historical_data={}
for g in group_list:
yield g,{
'is_bookmarked':g.pk in bookmarks,
'historical_data':','.join(six.text_type(x[1]) for x in historical_data.get(g.id,[])),
}
@register.simple_tag
def percent(value,total,format=None):
if not (value and total):
result=0
else:
result=int(value)/float(total)*100
if format is None:
return int(result)
else:
return ('%%%s'%format)%result
@register.filter
def titlize(value):
return value.replace('_',' ').title()
@register.filter
def split(value,delim=''):
return value.split(delim)
@register.inclusion_tag('sentry/partial/github_button.html')
def github_button(user,repo):
return {
'user':user,
'repo':repo,
}
@register.filter
def urlquote(value,safe=''):
return quote(value.encode('utf8'),safe)
@register.filter
def basename(value):
return os.path.basename(value)
@register.filter
def user_display_name(user):
return user.name or user.username
@register.simple_tag(takes_context=True)
def localized_datetime(context,dt,format='DATETIME_FORMAT'):
request=context['request']
timezone=getattr(request,'timezone',None)
if not timezone:
timezone=pytz.timezone(settings.SENTRY_DEFAULT_TIME_ZONE)
dt=dt.astimezone(timezone)
return date(dt,format)
@register.filter
def format_userinfo(user):
parts=user.username.split('@')
if len(parts)==1:
username=user.username
else:
username=parts[0].lower()
return mark_safe('<span title="%s">%s</span>'%(escape(user.username),escape(username),))
@register.filter
def soft_break(value,length):
return _soft_break(
value,
length,
functools.partial(soft_hyphenate,length=max(length//10,10)),
)
| mit | 335,523,952,679,000,960 | 22.13806 | 100 | 0.687631 | false | 3.33029 | false | false | false |
vigneshkarthi/satireguru | satire-bot.py | 1 | 3178 | import twitter
import yaml
import time
import pickle
import re
global match, api, msg, oldID
import random
msg = ''
#RegEx for parsing twitter handle from retrived
keyword = '';
#UTF_CHARS = ur'a-z0-9_\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u00ff'
#TAG_EXP = ur'(^|[^0-9A-Z&/]+)(#|\uff03)([0-9A-Z_]*[A-Z_]+[%s]*)' % UTF_CHARS
#TAG_REGEX = re.compile(TAG_EXP, re.UNICODE | re.IGNORECASE)
#Performs OAuth authentication, place all the neccessary keys in access.yaml
def authenticate():
global api
data = yaml.load(open("access.yaml"))
api = twitter.Api(consumer_key=data['consumer-key'],consumer_secret=data['consumer-secret'],access_token_key=data['access-key'],access_token_secret=data['access-secret'])
#Parses response.yaml to search and reply with relevant messages according to twitterhandles, fill your responses in response.yaml
def choose_reply():
global match, msg
comments = yaml.load(open("response.yaml"))
for name in comments['name']:
if(name['keyword']==match):
msg = random.choice(name['response'])
#Module which checks for mentions and replies to the mentioner and the person mentioned
#current version supports only one mentioned person
def get_and_post_replies(old):
cache_msg_to_post = ' '
global match, api
while(1):
try:
i = 0
repl = api.GetMentions()
total = len(repl)
newID = int(repl[i].id)
while(newID != old):
print repl[i].text+", by @"+repl[i].user.screen_name
if "pm" in repl[i].text.lower():
match = 'PM'
print "Match is", match
choose_reply()
msg_to_post = "@"+repl[i].user.screen_name+" "+msg
if(msg_to_post == cache_msg_to_post):
msg_to_post = msg_to_post + random.randint(0,1000)
cache_msg_to_post = msg_to_post
try:
api.PostUpdate(msg_to_post, in_reply_to_status_id=repl[i].id)
print "Msg posted is", msg_to_post
i = i+1
if (total == i):
break
newID = int(repl[i].id)
except twitter.TwitterError:
print "Something happend.. Saving ID's to file.. Not to Worry"
fileObj = open("idstore",'r+')
old = repl[0].id
fileObj.seek(0)
fileObj.write(str(old))
fileObj.close()
return
else:
i = i + 1
if (total == i):
break
newId = int(repl[i].id)
old = int(repl[0].id)
print "No New Tweets !!"
print "Gonna sleep for a minute :)"
time.sleep(60)
except KeyboardInterrupt:
fileObj = open("idstore", 'r+')
fileObj.seek(0)
fileObj.write(str(old))
print "Saving ID's to file.. Exiting!!"
return
authenticate()
fileObj = open("idstore",'r+')
old = fileObj.read()
old = int(old)
get_and_post_replies(old)
| gpl-2.0 | -6,554,156,474,092,308,000 | 35.113636 | 174 | 0.538704 | false | 3.64868 | false | false | false |
shadowk29/cusumtools | legacy/minimal_psd.py | 1 | 12009 | ## COPYRIGHT
## Copyright (C) 2015 Kyle Briggs (kbrig035<at>uottawa.ca)
##
## This file is part of cusumtools.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import tkinter.filedialog
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import scipy.io as sio
from scipy.signal import bessel, filtfilt, welch
from scikits.samplerate import resample
import pylab as pl
import glob
import os
import time
import pandas as pd
from pandasql import sqldf
import re
def make_format(current, other):
# current and other are axes
def format_coord(x, y):
# x, y are data coordinates
# convert to display coords
display_coord = current.transData.transform((x,y))
inv = other.transData.inverted()
# convert back to data coords with respect to ax
ax_coord = inv.transform(display_coord)
coords = [ax_coord, (x, y)]
return ('Left: {:<40} Right: {:<}'
.format(*['({:.3f}, {:.3f})'.format(x, y) for x,y in coords]))
return format_coord
class App(tk.Frame):
def __init__(self, parent,file_path):
tk.Frame.__init__(self, parent)
parent.deiconify()
self.events_flag = False
self.baseline_flag = False
self.file_path = file_path
##### Trace plotting widgets #####
self.trace_frame = tk.LabelFrame(parent,text='Current Trace')
self.trace_fig = Figure(figsize=(7,5), dpi=100)
self.trace_canvas = FigureCanvasTkAgg(self.trace_fig, master=self.trace_frame)
self.trace_toolbar_frame = tk.Frame(self.trace_frame)
self.trace_toolbar = NavigationToolbar2TkAgg(self.trace_canvas, self.trace_toolbar_frame)
self.trace_toolbar.update()
self.trace_frame.grid(row=0,column=0,columnspan=6,sticky=tk.N+tk.S)
self.trace_toolbar_frame.grid(row=1,column=0,columnspan=6)
self.trace_canvas.get_tk_widget().grid(row=0,column=0,columnspan=6)
##### PSD plotting widgets #####
self.psd_frame = tk.LabelFrame(parent,text='Power Spectrum')
self.psd_fig = Figure(figsize=(7,5), dpi=100)
self.psd_canvas = FigureCanvasTkAgg(self.psd_fig, master=self.psd_frame)
self.psd_toolbar_frame = tk.Frame(self.psd_frame)
self.psd_toolbar = NavigationToolbar2TkAgg(self.psd_canvas, self.psd_toolbar_frame)
self.psd_toolbar.update()
self.psd_frame.grid(row=0,column=6,columnspan=6,sticky=tk.N+tk.S)
self.psd_toolbar_frame.grid(row=1,column=6,columnspan=6)
self.psd_canvas.get_tk_widget().grid(row=0,column=6,columnspan=6)
##### Control widgets #####
self.control_frame = tk.LabelFrame(parent, text='Controls')
self.control_frame.grid(row=2,column=0,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.start_entry = tk.Entry(self.control_frame)
self.start_entry.insert(0,'0')
self.start_label = tk.Label(self.control_frame, text='Start Time (s)')
self.start_label.grid(row=0,column=0,sticky=tk.E+tk.W)
self.start_entry.grid(row=0,column=1,sticky=tk.E+tk.W)
self.end_entry = tk.Entry(self.control_frame)
self.end_entry.insert(0,'10')
self.end_label = tk.Label(self.control_frame, text='End Time (s)')
self.end_label.grid(row=0,column=2,sticky=tk.E+tk.W)
self.end_entry.grid(row=0,column=3,sticky=tk.E+tk.W)
self.cutoff_entry = tk.Entry(self.control_frame)
self.cutoff_entry.insert(0,'')
self.cutoff_label = tk.Label(self.control_frame, text='Cutoff (Hz)')
self.cutoff_label.grid(row=1,column=0,sticky=tk.E+tk.W)
self.cutoff_entry.grid(row=1,column=1,sticky=tk.E+tk.W)
self.order_entry = tk.Entry(self.control_frame)
self.order_entry.insert(0,'')
self.order_label = tk.Label(self.control_frame, text='Filter Order')
self.order_label.grid(row=1,column=2,sticky=tk.E+tk.W)
self.order_entry.grid(row=1,column=3,sticky=tk.E+tk.W)
self.samplerate_entry = tk.Entry(self.control_frame)
self.samplerate_entry.insert(0,'250000')
self.samplerate_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.samplerate_label.grid(row=1,column=4,sticky=tk.E+tk.W)
self.samplerate_entry.grid(row=1,column=5,sticky=tk.E+tk.W)
self.savegain_entry = tk.Entry(self.control_frame)
self.savegain_entry.insert(0,'1')
self.savegain_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.savegain_label.grid(row=0,column=4,sticky=tk.E+tk.W)
self.savegain_entry.grid(row=0,column=5,sticky=tk.E+tk.W)
self.plot_trace = tk.Button(self.control_frame, text='Update Trace', command=self.update_trace)
self.plot_trace.grid(row=2,column=0,columnspan=2,sticky=tk.E+tk.W)
self.normalize = tk.IntVar()
self.normalize.set(0)
self.normalize_check = tk.Checkbutton(self.control_frame, text='Normalize', variable = self.normalize)
self.normalize_check.grid(row=2,column=2,sticky=tk.E+tk.W)
self.plot_psd = tk.Button(self.control_frame, text='Update PSD', command=self.update_psd)
self.plot_psd.grid(row=2,column=3,sticky=tk.E+tk.W)
##### Feedback Widgets #####
self.feedback_frame = tk.LabelFrame(parent, text='Status')
self.feedback_frame.grid(row=2,column=6,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.export_psd = tk.Button(self.feedback_frame, text='Export PSD',command=self.export_psd)
self.export_psd.grid(row=1,column=0,columnspan=6,sticky=tk.E+tk.W)
self.export_trace = tk.Button(self.feedback_frame, text='Export Trace',command=self.export_trace)
self.export_trace.grid(row=2,column=0,columnspan=6,sticky=tk.E+tk.W)
self.load_memmap()
self.initialize_samplerate()
def export_psd(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\PSDs for Sam')
np.savetxt(data_path,np.c_[self.f, self.Pxx, self.rms],delimiter=',')
except AttributeError:
self.wildcard.set('Plot the PSD first')
def export_trace(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\Analysis\Pores\NPN\PSDs')
np.savetxt(data_path,self.plot_data,delimiter=',')
except AttributeError:
self.wildcard.set('Plot the trace first')
def load_mapped_data(self):
self.total_samples = len(self.map)
self.samplerate = int(self.samplerate_entry.get())
if self.start_entry.get()!='':
self.start_time = float(self.start_entry.get())
start_index = int((float(self.start_entry.get())*self.samplerate))
else:
self.start_time = 0
start_index = 0
if self.end_entry.get()!='':
self.end_time = float(self.end_entry.get())
end_index = int((float(self.end_entry.get())*self.samplerate))
if end_index > self.total_samples:
end_index = self.total_samples
self.data = self.map[start_index:end_index]
self.data = float(self.savegain_entry.get()) * self.data
def load_memmap(self):
columntypes = np.dtype([('current', '>i2'), ('voltage', '>i2')])
self.map = np.memmap(self.file_path, dtype=columntypes, mode='r')['current']
def integrate_noise(self, f, Pxx):
df = f[1]-f[0]
return np.sqrt(np.cumsum(Pxx * df))
def filter_data(self):
cutoff = float(self.cutoff_entry.get())
order = int(self.order_entry.get())
Wn = 2.0 * cutoff/float(self.samplerate)
b, a = bessel(order,Wn,'low')
padding = 1000
padded = np.pad(self.data, pad_width=padding, mode='median')
self.filtered_data = filtfilt(b, a, padded, padtype=None)[padding:-padding]
def initialize_samplerate(self):
self.samplerate = float(self.samplerate_entry.get())
##### Plot Updating functions #####
def update_trace(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
self.trace_fig.clf()
a = self.trace_fig.add_subplot(111)
time = np.linspace(1.0/self.samplerate,len(self.plot_data)/float(self.samplerate),len(self.plot_data))+self.start_time
a.set_xlabel(r'Time ($\mu s$)')
a.set_ylabel('Current (pA)')
self.trace_fig.subplots_adjust(bottom=0.14,left=0.21)
a.plot(time*1e6,self.plot_data,'.',markersize=1)
self.trace_canvas.show()
def update_psd(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
maxf = 2*float(self.cutoff_entry.get())
else:
maxf = 2*float(self.samplerate_entry.get())
length = np.minimum(2**18,len(self.filtered_data))
end_index = int(np.floor(len(self.filtered_data)/length)*length)
current = np.average(self.filtered_data[:end_index])
f, Pxx = welch(self.filtered_data, plot_samplerate,nperseg=length)
self.rms = self.integrate_noise(f, Pxx)
if self.normalize.get():
Pxx /= current**2
Pxx *= maxf/2.0
self.rms /= np.absolute(current)
self.f = f
self.Pxx = Pxx
minf = 1
BW_index = np.searchsorted(f, maxf/2)
logPxx = np.log10(Pxx[1:BW_index])
minP = 10**np.floor(np.amin(logPxx))
maxP = 10**np.ceil(np.amax(logPxx))
self.psd_fig.clf()
a = self.psd_fig.add_subplot(111)
a.set_xlabel('Frequency (Hz)')
a.set_ylabel(r'Spectral Power ($\mathrm{pA}^2/\mathrm{Hz}$)')
a.set_xlim(minf, maxf)
a.set_ylim(minP, maxP)
self.psd_fig.subplots_adjust(bottom=0.14,left=0.21)
a.loglog(f[1:],Pxx[1:],'b-')
for tick in a.get_yticklabels():
tick.set_color('b')
a2 = a.twinx()
a2.semilogx(f, self.rms, 'r-')
a2.set_ylabel('RMS Noise (pA)')
a2.set_xlim(minf, maxf)
for tick in a2.get_yticklabels():
tick.set_color('r')
a2.format_coord = make_format(a2, a)
self.psd_canvas.show()
def main():
root=tk.Tk()
root.withdraw()
file_path = tkinter.filedialog.askopenfilename(initialdir='C:/Data/')
App(root,file_path).grid(row=0,column=0)
root.mainloop()
if __name__=="__main__":
main()
| gpl-3.0 | 2,097,180,032,333,189,600 | 38.503289 | 126 | 0.615955 | false | 3.252709 | false | false | false |
syhpoon/xyzcmd | libxyz/vfs/vfsobj.py | 1 | 8497 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <[email protected]> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
import os
from libxyz.core.utils import bstring, ustring
from libxyz.vfs import types, util
class VFSObject(object):
"""
Abstract interface for VFS objects
"""
def __init__(self, xyz, path, full_path, ext_path, driver, parent,
enc=None, **kwargs):
self.xyz = xyz
self.enc = enc or xyzenc
# Internal VFS path
self.path = bstring(path, self.enc)
# Full VFS path
self.full_path = bstring(full_path, self.enc)
# External VFS path
self.ext_path = bstring(ext_path, self.enc)
self.parent = parent
self.driver = driver
self.kwargs = kwargs
self.fileobj = None
# File name
self.name = os.path.basename(self.path)
# File type
self.ftype = None
# Access time
self.atime = None
# Modified time
self.mtime = None
# Changed time
self.ctime = None
# Size in bytes
self.size = None
# Owner UID
self.uid = None
# Group
self.gid = None
# Mode
self.mode = None
# Inode
self.inode = None
# Visual file type
self.vtype = None
# Visual file representation
self.visual = None
# File info
self.info = None
# Any type-specific data
self.data = None
# List of significant attributes
self.attributes = ()
self.__ni_msg = _(u"Feature not implemented")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_file(self):
"""
Return True if instance is representing regular file
"""
return isinstance(self.ftype, types.VFSTypeFile)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir(self):
"""
Return True if instance is representing directory
"""
return isinstance(self.ftype, types.VFSTypeDir)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir_empty(self):
"""
Return True if instance is representing directory and it is empty
"""
if not self.is_dir():
return False
_, _, objs = self.walk()
return len(objs) == 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_link(self):
"""
Return True if instance is representing soft link
"""
return isinstance(self.ftype, types.VFSTypeLink)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_char(self):
"""
Return True if instance is representing soft char device
"""
return isinstance(self.ftype, types.VFSTypeChar)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_block(self):
"""
Return True if instance is representing block device
"""
return isinstance(self.ftype, types.VFSTypeBlock)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_fifo(self):
"""
Return True if instance is representing FIFO
"""
return isinstance(self.ftype, types.VFSTypeFifo)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_socket(self):
"""
Return True if instance is representing socket
"""
return isinstance(self.ftype, types.VFSTypeSocket)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def copy(self, path, existcb=None, errorcb=None,
save_attrs=True, follow_links=False, cancel=None):
"""
Copy file to specified location
@param path: Local path to copy file to
@param existcb: Callback function to be called if there exists
an object in target directory with the same name.
Callback function receives VFSObject instance as an
argument and must return one of:
'override' - to override this very object
'override all' - to override any future collisions
'skip' - to skip the object
'skip all' - to skip all future collisions
'abort' - to abort the process.
If no existscb provided 'abort' is used as default
@param errorcb: Callback function to be called in case an error occured
during copying. Function receives VFSObject instance
and error string as arguments and must return one of:
'skip' - to continue the process
'skip all' - to skip all future errors
'abort' - to abort the process.
If no errorcb provided 'abort' is used as default
@param save_attrs: Whether to save object attributes
@param follow_links: Whether to follow symlinks
@param cancel: a threading.Event instance, if it is found set - abort
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def move(self, path, existcb=None, errorcb=None, save_attrs=True,
follow_links=False, cancel=None):
"""
Move object
Arguments are the same as for copy()
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def mkdir(self, newdir):
"""
Create new dir inside object (only valid for directory object types)
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def remove(self, recursive=True):
"""
[Recursively] remove object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def walk(self):
"""
Directory tree walker
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open(self, mode='r'):
"""
Open self object in provided mode
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def close(self):
"""
Close self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def read(self, bytes=None):
"""
Read bytes from self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tell(self):
"""
Tell file position
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def seek(self, offset, whence=None):
"""
Perform seek() on object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def in_dir(self, d, e):
"""
Filter only those archive entries which exist in the same
directory level
"""
if e.startswith(d.lstrip(os.sep)) and \
len(util.split_path(e)) == (len(util.split_path(d)) + 1):
return True
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __repr__(self):
return self.__str__()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __unicode__(self):
return ustring(self.__str__())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __del__(self):
if self.fileobj:
try:
self.close()
except Exception:
pass
| gpl-3.0 | -442,618,524,861,205,300 | 26.146965 | 79 | 0.485348 | false | 5.012979 | false | false | false |
317070/kaggle-heart | ira/configurations/gauss_roi10_maxout.py | 1 | 9185 | from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import utils_heart
import nn_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import utils
import data
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1 / 1.5, 1.5),
'do_flip': True,
'sequence_shift': False
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1., 1.),
'do_flip': True,
'sequence_shift': False
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 16
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=True, random=True, infinite=True,
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 150
learning_rate_schedule = {
0: 0.0002,
int(max_nchunks * 0.1): 0.0001,
int(max_nchunks * 0.3): 0.000075,
int(max_nchunks * 0.6): 0.00005,
int(max_nchunks * 0.9): 0.00001
}
validate_every = 2 * nchunks_per_epoch
save_every = 2 * nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d01 = nn.layers.FeaturePoolLayer(l_d01, pool_size=2)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d02 = nn.layers.FeaturePoolLayer(l_d02, pool_size=2)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(50), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0, sigma_logscale=False, mu_logscale=False)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d11 = nn.layers.FeaturePoolLayer(l_d11, pool_size=2)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d12 = nn.layers.FeaturePoolLayer(l_d12, pool_size=2)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1, sigma_logscale=False, mu_logscale=False)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
dense_layers = [l_d01, l_d02, l_d11, l_d12, mu0, sigma0, mu0, mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'dense_layers', 'mu_layers', 'sigma_layers'])(
[l_in], l_outs, l_targets,
l_top, dense_layers, mu_layers, sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in xrange(npredictions):
p, t = [], []
for j in xrange(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
| mit | 3,756,646,750,156,495,400 | 39.10917 | 119 | 0.552314 | false | 3.31948 | false | false | false |
SafeW3rd/Ciphers | primeSieve.py | 1 | 1139 | # Prime Number Sieve
# http://inventwithpython.com/hacking (BSD Licensed)
import math
def isPrime(num):
# Returns True if num is a prime number, otherwise False.
# Note: Generally, isPrime() is slower than primeSieve().
# all numbers less than 2 are not prime
if num < 2:
return False
# see if num is divisible by any number up to the square root of num
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
return True
def primeSieve(sieveSize):
# Returns a list of prime numbers calculated using
# the Sieve of Eratosthenes algorithm.
sieve = [True] * sieveSize
sieve[0] = False # zero and one are not prime numbers
sieve[1] = False
# create the sieve
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i * 2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
# compile the list of primes
primes = []
for i in range(sieveSize):
if sieve[i] == True:
primes.append(i)
return primes
| mit | -7,295,585,561,268,958,000 | 23.886364 | 72 | 0.587357 | false | 3.559375 | false | false | false |
iamaris/CMUAnalysis | Common/generateObjectTree.py | 1 | 11728 | import re
import os
objects = ['Photon', 'Electron', 'Muon', 'Jet', 'Vertex']
susyObjects = {'Photon': 'Photon', 'Electron': 'Electron', 'Muon': 'Muon', 'Jet': 'PFJet', 'Vertex': 'Vertex'}
objectVars = file('ObjectVars.h')
classPat = re.compile('^[ ]*class[ ]+([a-zA-Z0-9]+)Vars[ ]*{')
cTorPat = re.compile('^[ ]*[a-zA-Z0-9]+Vars\([^,]+(,[ ]+Event.*|)\);')
varPat = re.compile('^[ ]*((?:unsigned[ ]|)(?:bool|char|short|int|unsigned|long|float|double))[ ]+([a-zA-Z_][a-zA-Z0-9_]*);')
useEvent = dict()
varList = dict()
obj = ''
for line in objectVars:
if '};' in line:
obj = ''
if obj:
cTorMatch = cTorPat.match(line)
if cTorMatch:
useEvent[obj] = len(cTorMatch.group(1)) != 0
varMatch = varPat.match(line)
if varMatch:
varList[obj].append((varMatch.group(1), varMatch.group(2)))
lineMatch = classPat.match(line)
if lineMatch and lineMatch.group(1) in objects:
obj = lineMatch.group(1)
varList[obj] = []
objectVars.close()
# GENERATE HEADER
headerContent = '''/* Auto-generated header file */
#ifndef ObjectTree_h
#define ObjectTree_h
#include "ObjectVars.h"
#include "TTree.h"
#include "TString.h"
namespace susy {
unsigned const NMAX(512);
'''
for obj in objects:
headerContent += '''
class ''' + obj + '''VarsArray {
public:
''' + obj + '''VarsArray() {}
~''' + obj + '''VarsArray() {}
void setBranches(TTree&);
void setAddress(TTree&);
void push_back(''' + obj + '''Vars const&);
void clear() { size = 0; }
''' + obj + '''Vars at(unsigned) const;
unsigned size;
'''
for (type, name) in varList[obj]:
headerContent += '''
''' + type + ' ' + name + '[NMAX];'
headerContent += '''
};
'''
headerContent += '''
class ObjectTree {
public:
ObjectTree();
~ObjectTree();
void setOutput(TString const&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void setOutput(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
static void setBranchStatus(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void initEvent(Event const&);
void fill() { output_->Fill(); }'''
for obj in objects:
lowerName = obj.lower()
headerContent += '''
void save(''' + obj + 'Vars const& _vars) { ' + lowerName + 'Array_.push_back(_vars); }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
unsigned get''' + obj + 'Size() const { return ' + lowerName + 'Array_.size; }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + 'VarsArray const& get' + obj + 'Array() const { return ' + lowerName + 'Array_; }'
headerContent += '''
private:
void setBranches_('''
for i in range(len(objects)):
headerContent += 'bool'
if i != len(objects) - 1:
headerContent += ', '
else:
headerContent += ');'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + '''VarsArray ''' + lowerName + '''Array_;'''
headerContent += '''
unsigned runNumber_;
unsigned lumiNumber_;
unsigned eventNumber_;
TTree* output_;
bool ownOutput_;
};
}
#endif
'''
headerFile = file('ObjectTree.h', 'w')
headerFile.write(headerContent)
headerFile.close()
# GENERATE SRC
cTors = dict()
setBranches = dict()
setAddress = dict()
pushBack = dict()
at = dict()
for obj in objects:
lowerName = obj.lower()
cTorText = '''
''' + obj + 'Vars::' + obj + '''Vars() :'''
initList = ''
for (type, name) in varList[obj]:
initList += '''
''' + name + '('
if type == 'float' or type == 'double':
initList += '0.'
elif type == 'bool':
initList += 'false'
else:
initList += '0'
initList += '),'
initList = initList.rstrip(',')
cTorText += initList
cTorText += '''
{
}
'''
cTors[obj] = cTorText
setBranchText = '''
void
''' + obj + '''VarsArray::setBranches(TTree& _tree)
{
_tree.Branch("''' + lowerName + '.size", &size, "' + lowerName + '.size/i");'
for (type, name) in varList[obj]:
branch = '''
_tree.Branch("''' + lowerName + '.' + name + '", ' + name + ', "' + name + '[' + lowerName + '.size]/'
if type == 'char':
branch += 'B'
elif type == 'unsigned char':
branch += 'b'
elif type == 'short':
branch += 'S'
elif type == 'unsigned short':
branch += 's'
elif type == 'int':
branch += 'I'
elif type == 'unsigned' or type == 'unsigned int':
branch += 'i'
elif type == 'long':
branch += 'L'
elif type == 'unsigned long':
branch += 'l'
elif type == 'float':
branch += 'F'
elif type == 'double':
branch += 'D'
elif type == 'bool':
branch += 'O'
branch += '");'
setBranchText += branch
setBranchText += '''
}
'''
setBranches[obj] = setBranchText
setAddressText = '''
void
''' + obj + '''VarsArray::setAddress(TTree& _tree)
{
std::vector<TString> notFound;
_tree.SetBranchAddress("''' + lowerName + '.size", &size);'
for (type, name) in varList[obj]:
bName = lowerName + '.' + name
setAddressText += '''
if(_tree.GetBranch("''' + bName + '")) _tree.SetBranchAddress("' + bName + '", ' + name + ''');
else notFound.push_back("''' + bName + '");'
setAddressText += '''
for(unsigned iN(0); iN != notFound.size(); ++iN)
std::cerr << "Branch " << notFound[iN] << " not found in input" << std::endl;
}
'''
setAddress[obj] = setAddressText
pushBackText = '''
void
''' + obj + 'VarsArray::push_back(' + obj + '''Vars const& _vars)
{
if(size == NMAX - 1)
throw std::runtime_error("Too many ''' + obj + '''s");
'''
for (type, name) in varList[obj]:
pushBackText += '''
''' + name + '[size] = _vars.' + name + ';'
pushBackText += '''
++size;
}
'''
pushBack[obj] = pushBackText
atText = '''
''' + obj + '''Vars
''' + obj + '''VarsArray::at(unsigned _pos) const
{
if(_pos >= size)
throw std::runtime_error("''' + obj + '''Vars out-of-bounds");
''' + obj + '''Vars vars;
'''
for (type, name) in varList[obj]:
atText += '''
vars.''' + name + ' = ' + name + '[_pos];'
atText += '''
return vars;
}
'''
at[obj] = atText
preamble = '#include "ObjectVars.h"\n'
try:
originalSrc = file('ObjectVars.cc', 'r')
userDef = ''
copy = False
namespace = False
for line in originalSrc:
if 'namespace susy' in line:
namespace = True
if not namespace and 'ObjectVars.h' not in line and not re.match('^[ ]*/\*.*\*/[ ]*$', line):
preamble += line
if '/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = True
if copy:
userDef += line
if '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = False
originalSrc.close()
except:
userDef = '\n/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
for obj in objects:
userDef += '''
void
''' + obj + '''Vars::set(''' + susyObjects[obj] + ' const&'
if useEvent[obj]:
userDef += ', Event const&'
userDef += ''')
{
}
/*static*/
''' + obj + '''Vars::setBranchStatus(TTree&)
{
}
'''
userDef += '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
# ObjectTree.cc
objTreeContent = '''/* Auto-generated source file */
#include "ObjectTree.h"
#include "TFile.h"
#include <stdexcept>
#include <iostream>
namespace susy {
'''
for obj in objects:
objTreeContent += setBranches[obj]
objTreeContent += setAddress[obj]
objTreeContent += pushBack[obj]
objTreeContent += at[obj]
objTreeContent += '''
ObjectTree::ObjectTree() :'''
for obj in objects:
lowerName = obj.lower()
objTreeContent += '''
''' + lowerName + '''Array_(),'''
objTreeContent += '''
runNumber_(0),
lumiNumber_(0),
eventNumber_(0),
output_(0),
ownOutput_(false)
{
}
ObjectTree::~ObjectTree()
{
if(ownOutput_ && output_){
TFile* outFile(output_->GetCurrentFile());
outFile->cd();
output_->Write();
delete outFile;
}
}
void
ObjectTree::setOutput(TString const& _fileName'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
ownOutput_ = true;
TFile::Open(_fileName, "recreate");
output_ = new TTree("objectVars", "Object ID variables");
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
void
ObjectTree::setOutput(TTree& _tree'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
output_ = &_tree;
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
/*static*/
void
ObjectTree::setBranchStatus(TTree& _input'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
_input.SetBranchStatus("runNumber", 1);
_input.SetBranchStatus("luminosityBlockNumber", 1);
_input.SetBranchStatus("eventNumber", 1);
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj + 'Vars::setBranchStatus(_input);'
objTreeContent += '''
}
#ifdef STANDALONE
void
ObjectTree::initEvent(Event const&)
{
runNumber_ = 0;
lumiNumber_ = 0;
eventNumber_ = 0;
#else
void
ObjectTree::initEvent(Event const& _event)
{
runNumber_ = _event.runNumber;
lumiNumber_ = _event.luminosityBlockNumber;
eventNumber_ = _event.eventNumber;
#endif'''
for obj in objects:
objTreeContent += '''
''' + obj.lower() + 'Array_.clear();'
objTreeContent += '''
}
void
ObjectTree::setBranches_('''
for obj in objects:
objTreeContent += 'bool _set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ') + ')'
objTreeContent += '''
{
output_->Branch("runNumber", &runNumber_, "runNumber/i");
output_->Branch("lumiNumber", &lumiNumber_, "lumiNumber/i");
output_->Branch("eventNumber", &eventNumber_, "eventNumber/i");
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj.lower() + 'Array_.setBranches(*output_);'
objTreeContent += '''
}
'''
objTreeContent += '}\n'
objTreeFile = file('ObjectTree.cc', 'w')
objTreeFile.write(objTreeContent)
objTreeFile.close()
# ObjectVars.cc
objVarsContent = '''/* Partially auto-generated source file - edit where indicated */
/* Add necessary inclusions below */
''' + preamble + '''
namespace susy {
'''
for obj in objects:
objVarsContent += cTors[obj]
objVarsContent += '\n'
objVarsContent += userDef
objVarsContent += '''
}
'''
objVarsFile = file('ObjectVars.cc', 'w')
objVarsFile.write(objVarsContent)
objVarsFile.close()
| apache-2.0 | 6,525,613,012,333,786,000 | 21.339048 | 125 | 0.548857 | false | 3.303662 | false | false | false |
dsimic/taxsims | ss.py | 1 | 1112 | import pandas as pd
import numpy as np
def ss_calc(
contrib_yearly, inv_gwth_rt, num_years, safe_withdrw_rate, start_age=28
):
"""
inv_gwth_rt is infaltion adjusted.
contrib_yearly is in first years dollars
"""
tot_years = max(0, 62 - start_age - num_years) + num_years
df = pd.DataFrame({
'contrib_yearly': [contrib_yearly] * num_years + [0.] *
max(0, (62 - num_years - start_age)),
'inv_value': [0] * tot_years,
}, index=range(tot_years))
for year in range(0, tot_years):
print year
multiplier = np.array([
(1. + inv_gwth_rt) ** (year - y_) for y_ in range(year + 1)])
print multiplier
df['inv_value'][year] = np.sum(
np.array(df['contrib_yearly'][0: year + 1]) * multiplier)
df['monthly_inv_income'] = safe_withdrw_rate * df['inv_value'] / 12.
df['monthly_inv_income_w_spouse'] = df['monthly_inv_income'] * 1.5
return df
if __name__ == "__main__":
df = ss_calc(15e3, .03, 10, .03)
ss_benefit_monthly = 939.00
ss_benefit_w_spouse_monthly = 1.5 * ss_benefit_monthly
| gpl-2.0 | -1,677,530,639,553,851,100 | 31.705882 | 75 | 0.57554 | false | 2.949602 | false | false | false |
pacoqueen/bbinn | PyChart-1.39/demos/linestyles.py | 1 | 1258 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
#
# Copyright (C) 2000-2005 by Yasushi Saito ([email protected])
#
# Pychart is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Pychart is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from pychart import *
import pychart.doc_support
import chartdemo
import re
can = canvas.default_canvas()
x = 100
y = 500
def drawLine(style):
global x, y
name = pychart.doc_support.stringify_value(style)
name = re.sub("line_style\\.", "", name)
name = pychart.doc_support.break_string(name)
can.line(style, x, y, x+40, y)
#print "name=", name
height = font.text_height(name)[0] + 5
tb = text_box.T(text=name, loc=(x, y-height), line_style=None)
x = x + 60
tb.draw()
for style in line_style.standards.list():
drawLine(style)
if x >= chartdemo.MaxWidth:
x=100
y=y-40
| gpl-2.0 | 5,223,164,027,098,408,000 | 26.347826 | 72 | 0.68283 | false | 3.201018 | false | false | false |
Psycojoker/wanawana | wanawana/settings.py | 1 | 2687 | """
Django settings for wanawana project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w2=4yi@cyc@vsio@$tvz$%&_po6si@533=cwh5kr2dk#pd69)v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'django_extensions',
'debug_toolbar',
'django_pdb',
'wanawana',
'users',
'events',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
)
ROOT_URLCONF = 'wanawana.urls'
TEMPLATE_LOADERS = (
'hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
WSGI_APPLICATION = 'wanawana.wsgi.application'
# Email configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from settings_local import *
except ImportError:
pass
| gpl-3.0 | 4,815,851,771,319,772,000 | 23.87963 | 71 | 0.723483 | false | 3.305043 | false | false | false |
jasper-meyer/Platformer | platformer.py | 1 | 3751 | """
platformer.py
Author: Jasper Meyer
Credit: You, the internet, Brendan
Assignment:
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Platformer
"""
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
SCREEN_WIDTH = 1080
SCREEN_HEIGHT = 720
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
black = Color(0, 1)
backcol = Color(0xd9ffcc, 1.0)
purp = Color(0x9900cc, 1.0)
blue = Color(0x3399ff,1.0)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, backcol)
bg = Sprite(bg_asset, (0,0))
thinline = LineStyle(1, black)
sq = RectangleAsset (75,75, noline, black)
wub=0
pup=0
mousex=0
mousey=0
mousexround=0
mouseyround=0
play = RectangleAsset (25,50, noline, purp)
spr = RectangleAsset (20,10, noline, blue)
vy=0
player=0
acc = 0
ti = 0
rupx=0
lupx=0
vx=0
up=0
upup=0
stop = 0
shutup=0
spring = 0
sub = 0
springlist = []
def wup(event):
global wub
global mousexround
global mouseyround
wub = 1
if wub == 1:
mousexround=mousex-((mousex)%75)
mouseyround=mousey-((mousey)%75)
block = Sprite (sq, (mousexround, mouseyround))
def mousemo(event):
global mousex
global mousey
mousex=event.x
mousey=event.y
def spri(event):
global spring
global mousex
global mousey
global mouseyround
global sub
global springlist
sub =1
if sub == 1:
mouseyround=mousey-((mousey)%75)+65
springlist.append (Sprite (spr, (mousex, mouseyround)))
def pup(event):
global pub
global mousex
global mouseyround
global player
pub = 1
if pub == 1:
mouseyround=mousey-((mousey)%75)+25
if player == 0:
player = Sprite (play, (mousex, mouseyround))
def rup(event):
global rupx
rupx=1
def lup(event):
global lupx
lupx=1
def uup(event):
global up
up=1
def step():
if player != 0:
global vy
global acc
global ti
global rupx
global vx
global lupx
global up
global upup
global stop
global shutup
global springlist
global player
acc = 0.02
for s in springlist:
if player.collidingWith(s):
vy=-50+vy
vx=-vx
if stop == 0:
ti=ti+.5
if upup==4.5:
vy = (0.2*ti)-upup
else:
vy = (0.2*ti)
player.y=player.y+vy
player.x=player.x+vx
if rupx == 1:
vx=vx+1.5
lupx=0
rupx=0
if lupx == 1:
vx=vx-1.5
rupx=0
lupx=0
if vx > 3:
vx = 3
if vx < -3:
vx =-3
if up == 1:
upup = 4.5
up=0
if up == 0:
upup =4.5
col = player.collidingWithSprites(Sprite)
if len(col) > 1 and col[1].y<player.y+500:
stop=1
player.y=player.y-0.2
else:
stop=0
if stop == 1:
vy=0
ti=0
if len(col) > 1:
if col[1].y<player.y+50:
vx=-0.5*vx
if player.y > 2000:
player = 0
ti=0
myapp.listenKeyEvent('keyup', 's', spri)
myapp.listenKeyEvent('keydown', 'up arrow', uup)
myapp.listenKeyEvent('keydown', 'left arrow', lup)
myapp.listenKeyEvent('keydown', 'right arrow', rup)
myapp.listenKeyEvent('keyup', 'p', pup)
myapp.listenKeyEvent('keyup', 'w', wup)
myapp.listenMouseEvent('mousemove', mousemo)
myapp.run(step) | mit | 2,481,779,371,456,941,600 | 17.76 | 82 | 0.546254 | false | 3.084704 | false | false | false |
polaris-gslb/polaris-core | tests/test-polaris-pdns.py | 2 | 1937 | #!/usr/bin/env python3
import subprocess
import sys
import time
import json
POLARIS_PDNS_FILE = '/opt/polaris/bin/polaris-pdns'
def pretty_json(s):
d = json.loads(s)
return json.dumps(d, indent=4, separators=(',', ': '))
class TestPolarisPDNS:
def __init__(self, polaris_pdns_file):
self.proc = subprocess.Popen([ polaris_pdns_file ],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def execute_query(self, query):
query += '\n'
self.proc.stdin.write(query.encode())
self.proc.stdin.flush()
output = self.proc.stdout.readline().decode()
return pretty_json(output)
def prepare_query(self, method, params):
q = {
'method': method,
'parameters': {
'qtype': params['qtype'],
'qname': params['qname'],
'remote': params['remote'],
'local': params['local'],
'real-remote': params['real-remote'],
'zone-id': params['zone-id']
}
}
return json.dumps(q)
if __name__ == '__main__':
t = TestPolarisPDNS(POLARIS_PDNS_FILE)
method = 'lookup'
params = {
'qtype': 'A',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
method = 'lookup'
params = {
'qtype': 'SOA',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
| bsd-3-clause | 3,387,221,317,398,084,600 | 24.486842 | 62 | 0.497161 | false | 3.398246 | false | false | false |
daniel20162016/my-first | read_xml_all/calcul_matrix_compare_je_good_192matrix.py | 1 | 6357 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('je_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| mit | 1,603,875,107,597,510,700 | 38.484472 | 147 | 0.46028 | false | 2.768728 | false | false | false |
wholland/env | env.py | 1 | 6119 | #!/usr/bin/python
import argparse
import json
import shutil
import os
def copy_file(src, dest, backup):
success = True
if not backup is None:
(backup_folder, backup_file) = os.path.split(backup)
print("Creating backup file for " + dest + " at " + backup)
try:
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
shutil.copyfile(dest, backup)
except Exception as e:
print("Backup failed: " + str(e))
success = False
if success:
(dest_folder, dest_file) = os.path.split(dest)
print("Copy file " + src + " to " + dest)
try:
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
shutil.copyfile(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def copy_dir(src, dest, backup):
success = True
if not backup is None:
try:
print("Creating backup file for " + dest + " at " + backup)
rmtree(backup, ignore_errors=True)
shutil.copytree(dest, backup)
except IOError as e:
print("Backup failed: " + str(e))
success = False
if success:
try:
print("Copy directory " + src + " to " + dest)
shutil.copytree(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def push(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pushing " + definition["name"]);
src = os.path.expanduser(os.path.join(args.source, definition["source"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
backup = os.path.expanduser(os.path.join(args.backup, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if args.unsafe:
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_file(src, dest, backup)
else:
print("Would copy file. Src:" + src + " Dest:" + dest + " Backup:" + backup);
elif definition["type"].lower() == "d":
# Copy a directory
if args.verbose:
print(definition["name"] + ": Pushing directory from " + src + " to " + dest)
if args.unsafe:
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_dir(src, dest, backup)
else:
print("Would copy dir. Src:" + src + " Dest:" + dest + " Backup:" + backup);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def pull(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pulling " + definition["name"]);
src = os.path.expanduser(os.path.join(args.target, definition["target"]))
dest = os.path.expanduser(os.path.join(args.source, definition["source"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def revert(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
src = os.path.expanduser(os.path.join(args.backup, definition["target"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def main():
default_defs = "~/env/env.def"
default_source = "~/env/"
default_target = "~/"
default_backup = "~/.backup/env/"
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Increase Verbosity")
parser.add_argument("-f", "--file", default=default_defs, help="Definition File to use")
parser.add_argument("-s", "--source", default=default_source, help="Override source root")
parser.add_argument("-t", "--target", default=default_target, help="Override target root")
parser.add_argument("-w", "--wimp", action="store_true", help="Don't actually make any changes (implies -v)")
subparsers = parser.add_subparsers()
parser_push = subparsers.add_parser("push", help="Push configs into environment")
parser_push.add_argument("-u", "--unsafe", action="store_true", help="No backups Created")
parser_push.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_push.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_push.add_argument("categories", nargs=argparse.REMAINDER)
parser_push.set_defaults(func=push)
parser_pull = subparsers.add_parser("pull", help="Pull configs from environment")
parser_pull.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_pull.add_argument("categories", nargs=argparse.REMAINDER)
parser_pull.set_defaults(func=pull)
parser_revert = subparsers.add_parser("revert", help="Revert configs from backups")
parser_revert.add_argument("-c", "--cleanup", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_revert.add_argument("categories", nargs=argparse.REMAINDER)
parser_revert.set_defaults(func=revert)
args = parser.parse_args()
if args.wimp:
args.verbose = True
args.func(args)
if __name__ == "__main__":
main();
| mit | 4,431,598,936,096,710,000 | 34.575581 | 110 | 0.648962 | false | 3.113995 | false | false | false |
jailuthra/misc | python/quicksort.py | 1 | 1066 | import sys
import random
comparisons = 0
def main():
global comparisons
with open(sys.argv[1], 'r') as f:
arr = [int(x) for x in f.read().split()]
quicksort(arr, 0, len(arr)-1)
# print(arr)
print(comparisons)
def getPivot(arr, l, r):
first = arr[l]
mid = arr[(l+r)//2]
last = arr[r]
if first <= mid <= last or last <= mid <= first:
return (l+r)//2
elif mid <= first <= last or last <= first <= mid:
return l
else:
return r
def partition(arr, l, r):
k = getPivot(arr, l, r)
k = random.randint(l, r)
pivot = arr[k]
arr[k], arr[l] = arr[l], arr[k]
i = l+1
for j in range(l+1, r+1):
if arr[j] < pivot:
arr[j], arr[i] = arr[i], arr[j]
i += 1
arr[l], arr[i-1] = arr[i-1], arr[l]
return i-1
def quicksort(arr, l, r):
if r - l < 0:
return
global comparisons
comparisons += r - l
p = partition(arr, l, r)
quicksort(arr, l, p-1)
quicksort(arr, p+1, r)
if __name__ == '__main__':
main()
| mit | 7,158,383,021,174,650,000 | 21.208333 | 54 | 0.5 | false | 2.812665 | false | false | false |
mistercrunch/panoramix | superset/views/base_api.py | 2 | 21953 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type, Union
from apispec import APISpec
from apispec.exceptions import DuplicateComponentNameError
from flask import Blueprint, g, Response
from flask_appbuilder import AppBuilder, Model, ModelRestApi
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.filters import BaseFilter, Filters
from flask_appbuilder.models.sqla.filters import FilterStartsWith
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from marshmallow import fields, Schema
from sqlalchemy import and_, distinct, func
from sqlalchemy.orm.query import Query
from superset.extensions import db, event_logger, security_manager
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.schemas import error_payload_content
from superset.sql_lab import Query as SqllabQuery
from superset.stats_logger import BaseStatsLogger
from superset.typing import FlaskResponse
from superset.utils.core import time_function
logger = logging.getLogger(__name__)
get_related_schema = {
"type": "object",
"properties": {
"page_size": {"type": "integer"},
"page": {"type": "integer"},
"include_ids": {"type": "array", "items": {"type": "integer"}},
"filter": {"type": "string"},
},
}
class RelatedResultResponseSchema(Schema):
value = fields.Integer(description="The related item identifier")
text = fields.String(description="The related item string representation")
class RelatedResponseSchema(Schema):
count = fields.Integer(description="The total number of related values")
result = fields.List(fields.Nested(RelatedResultResponseSchema))
class DistinctResultResponseSchema(Schema):
text = fields.String(description="The distinct item")
class DistincResponseSchema(Schema):
count = fields.Integer(description="The total number of distinct values")
result = fields.List(fields.Nested(DistinctResultResponseSchema))
def statsd_metrics(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Handle sending all statsd metrics from the REST API
"""
def wraps(self: "BaseSupersetModelRestApi", *args: Any, **kwargs: Any) -> Response:
try:
duration, response = time_function(f, self, *args, **kwargs)
except Exception as ex:
self.incr_stats("error", f.__name__)
raise ex
self.send_stats_metrics(response, f.__name__, duration)
return response
return functools.update_wrapper(wraps, f)
class RelatedFieldFilter:
# data class to specify what filter to use on a /related endpoint
# pylint: disable=too-few-public-methods
def __init__(self, field_name: str, filter_class: Type[BaseFilter]):
self.field_name = field_name
self.filter_class = filter_class
class BaseFavoriteFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Base Custom filter for the GET list that filters all dashboards, slices
that a user has favored or not
"""
name = _("Is favorite")
arg_name = ""
class_name = ""
""" The FavStar class_name to user """
model: Type[Union[Dashboard, Slice, SqllabQuery]] = Dashboard
""" The SQLAlchemy model """
def apply(self, query: Query, value: Any) -> Query:
# If anonymous user filter nothing
if security_manager.current_user is None:
return query
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(
FavStar.user_id == g.user.get_id(),
FavStar.class_name == self.class_name,
)
)
if value:
return query.filter(and_(self.model.id.in_(users_favorite_query)))
return query.filter(and_(~self.model.id.in_(users_favorite_query)))
class BaseSupersetModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific superset generic functionality
"""
csrf_exempt = False
method_permission_name = {
"bulk_delete": "delete",
"data": "list",
"data_from_cache": "list",
"delete": "delete",
"distinct": "list",
"export": "mulexport",
"import_": "add",
"get": "show",
"get_list": "list",
"info": "list",
"post": "add",
"put": "edit",
"refresh": "edit",
"related": "list",
"related_objects": "list",
"schemas": "list",
"select_star": "list",
"table_metadata": "list",
"test_connection": "post",
"thumbnail": "list",
"viz_types": "list",
}
order_rel_fields: Dict[str, Tuple[str, str]] = {}
"""
Impose ordering on related fields query::
order_rel_fields = {
"<RELATED_FIELD>": ("<RELATED_FIELD_FIELD>", "<asc|desc>"),
...
}
""" # pylint: disable=pointless-string-statement
related_field_filters: Dict[str, Union[RelatedFieldFilter, str]] = {}
"""
Declare the filters for related fields::
related_fields = {
"<RELATED_FIELD>": <RelatedFieldFilter>)
}
""" # pylint: disable=pointless-string-statement
filter_rel_fields: Dict[str, BaseFilter] = {}
"""
Declare the related field base filter::
filter_rel_fields_field = {
"<RELATED_FIELD>": "<FILTER>")
}
""" # pylint: disable=pointless-string-statement
allowed_rel_fields: Set[str] = set()
"""
Declare a set of allowed related fields that the `related` endpoint supports
""" # pylint: disable=pointless-string-statement
text_field_rel_fields: Dict[str, str] = {}
"""
Declare an alternative for the human readable representation of the Model object::
text_field_rel_fields = {
"<RELATED_FIELD>": "<RELATED_OBJECT_FIELD>"
}
""" # pylint: disable=pointless-string-statement
allowed_distinct_fields: Set[str] = set()
openapi_spec_component_schemas: Tuple[Type[Schema], ...] = tuple()
"""
Add extra schemas to the OpenAPI component schemas section
""" # pylint: disable=pointless-string-statement
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
responses = {
"400": {"description": "Bad request", "content": error_payload_content},
"401": {"description": "Unauthorized", "content": error_payload_content},
"403": {"description": "Forbidden", "content": error_payload_content},
"404": {"description": "Not found", "content": error_payload_content},
"422": {
"description": "Could not process entity",
"content": error_payload_content,
},
"500": {"description": "Fatal error", "content": error_payload_content},
}
def __init__(self) -> None:
# Setup statsd
self.stats_logger = BaseStatsLogger()
# Add base API spec base query parameter schemas
if self.apispec_parameter_schemas is None: # type: ignore
self.apispec_parameter_schemas = {}
self.apispec_parameter_schemas["get_related_schema"] = get_related_schema
if self.openapi_spec_component_schemas is None:
self.openapi_spec_component_schemas = ()
self.openapi_spec_component_schemas = self.openapi_spec_component_schemas + (
RelatedResponseSchema,
DistincResponseSchema,
)
super().__init__()
def add_apispec_components(self, api_spec: APISpec) -> None:
"""
Adds extra OpenApi schema spec components, these are declared
on the `openapi_spec_component_schemas` class property
"""
for schema in self.openapi_spec_component_schemas:
try:
api_spec.components.schema(
schema.__name__, schema=schema,
)
except DuplicateComponentNameError:
pass
super().add_apispec_components(api_spec)
def create_blueprint(
self, appbuilder: AppBuilder, *args: Any, **kwargs: Any
) -> Blueprint:
self.stats_logger = self.appbuilder.get_app.config["STATS_LOGGER"]
return super().create_blueprint(appbuilder, *args, **kwargs)
def _init_properties(self) -> None:
model_id = self.datamodel.get_pk_name()
if self.list_columns is None and not self.list_model_schema:
self.list_columns = [model_id]
if self.show_columns is None and not self.show_model_schema:
self.show_columns = [model_id]
if self.edit_columns is None and not self.edit_model_schema:
self.edit_columns = [model_id]
if self.add_columns is None and not self.add_model_schema:
self.add_columns = [model_id]
super()._init_properties()
def _get_related_filter(
self, datamodel: SQLAInterface, column_name: str, value: str
) -> Filters:
filter_field = self.related_field_filters.get(column_name)
if isinstance(filter_field, str):
filter_field = RelatedFieldFilter(cast(str, filter_field), FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = datamodel.get_filters(search_columns)
base_filters = self.filter_rel_fields.get(column_name)
if base_filters:
filters.add_filter_list(base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_distinct_filter(self, column_name: str, value: str) -> Filters:
filter_field = RelatedFieldFilter(column_name, FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = self.datamodel.get_filters(search_columns)
filters.add_filter_list(self.base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_text_for_model(self, model: Model, column_name: str) -> str:
if column_name in self.text_field_rel_fields:
model_column_name = self.text_field_rel_fields.get(column_name)
if model_column_name:
return getattr(model, model_column_name)
return str(model)
def _get_result_from_rows(
self, datamodel: SQLAInterface, rows: List[Model], column_name: str
) -> List[Dict[str, Any]]:
return [
{
"value": datamodel.get_pk_value(row),
"text": self._get_text_for_model(row, column_name),
}
for row in rows
]
def _add_extra_ids_to_result(
self,
datamodel: SQLAInterface,
column_name: str,
ids: List[int],
result: List[Dict[str, Any]],
) -> None:
if ids:
# Filter out already present values on the result
values = [row["value"] for row in result]
ids = [id_ for id_ in ids if id_ not in values]
pk_col = datamodel.get_pk()
# Fetch requested values from ids
extra_rows = db.session.query(datamodel.obj).filter(pk_col.in_(ids)).all()
result += self._get_result_from_rows(datamodel, extra_rows, column_name)
def incr_stats(self, action: str, func_name: str) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
"""
self.stats_logger.incr(f"{self.__class__.__name__}.{func_name}.{action}")
def timing_stats(self, action: str, func_name: str, value: float) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
:param value: A float with the time it took for the endpoint to execute
"""
self.stats_logger.timing(
f"{self.__class__.__name__}.{func_name}.{action}", value
)
def send_stats_metrics(
self, response: Response, key: str, time_delta: Optional[float] = None
) -> None:
"""
Helper function to handle sending statsd metrics
:param response: flask response object, will evaluate if it was an error
:param key: The function name
:param time_delta: Optional time it took for the endpoint to execute
"""
if 200 <= response.status_code < 400:
self.incr_stats("success", key)
else:
self.incr_stats("error", key)
if time_delta:
self.timing_stats("time", key, time_delta)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.info",
object_ref=False,
log_to_statsd=False,
)
def info_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB _info endpoint
"""
duration, response = time_function(super().info_headless, **kwargs)
self.send_stats_metrics(response, self.info.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get",
object_ref=False,
log_to_statsd=False,
)
def get_headless(self, pk: int, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET endpoint
"""
duration, response = time_function(super().get_headless, pk, **kwargs)
self.send_stats_metrics(response, self.get.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_list",
object_ref=False,
log_to_statsd=False,
)
def get_list_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET list endpoint
"""
duration, response = time_function(super().get_list_headless, **kwargs)
self.send_stats_metrics(response, self.get_list.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
object_ref=False,
log_to_statsd=False,
)
def post_headless(self) -> Response:
"""
Add statsd metrics to builtin FAB POST endpoint
"""
duration, response = time_function(super().post_headless)
self.send_stats_metrics(response, self.post.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
object_ref=False,
log_to_statsd=False,
)
def put_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB PUT endpoint
"""
duration, response = time_function(super().put_headless, pk)
self.send_stats_metrics(response, self.put.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
object_ref=False,
log_to_statsd=False,
)
def delete_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB DELETE endpoint
"""
duration, response = time_function(super().delete_headless, pk)
self.send_stats_metrics(response, self.delete.__name__, duration)
return response
@expose("/related/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def related(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get related fields data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Related column data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/RelatedResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_rel_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._handle_page_args(args)
try:
datamodel = self.datamodel.get_related_interface(column_name)
except KeyError:
return self.response_404()
page, page_size = self._sanitize_page_args(page, page_size)
# handle ordering
order_field = self.order_rel_fields.get(column_name)
if order_field:
order_column, order_direction = order_field
else:
order_column, order_direction = "", ""
# handle filters
filters = self._get_related_filter(datamodel, column_name, args.get("filter"))
# Make the query
_, rows = datamodel.query(
filters, order_column, order_direction, page=page, page_size=page_size
)
# produce response
result = self._get_result_from_rows(datamodel, rows, column_name)
# If ids are specified make sure we fetch and include them on the response
ids = args.get("include_ids")
self._add_extra_ids_to_result(datamodel, column_name, ids, result)
return self.response(200, count=len(result), result=result)
@expose("/distinct/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def distinct(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get distinct values from field data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Distinct field data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/DistincResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_distinct_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._sanitize_page_args(*self._handle_page_args(args))
# Create generic base filters with added request filter
filters = self._get_distinct_filter(column_name, args.get("filter"))
# Make the query
query_count = self.appbuilder.get_session.query(
func.count(distinct(getattr(self.datamodel.obj, column_name)))
)
count = self.datamodel.apply_filters(query_count, filters).scalar()
if count == 0:
return self.response(200, count=count, result=[])
query = self.appbuilder.get_session.query(
distinct(getattr(self.datamodel.obj, column_name))
)
# Apply generic base filters with added request filter
query = self.datamodel.apply_filters(query, filters)
# Apply sort
query = self.datamodel.apply_order_by(query, column_name, "asc")
# Apply pagination
result = self.datamodel.apply_pagination(query, page, page_size).all()
# produce response
result = [
{"text": item[0], "value": item[0]}
for item in result
if item[0] is not None
]
return self.response(200, count=count, result=result)
| apache-2.0 | 3,086,351,560,622,024,000 | 36.398637 | 88 | 0.605475 | false | 4.016282 | false | false | false |
sippy/voiptests | test_cases/reinv_brkn2.py | 1 | 2000 | # Copyright (c) 2016 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.reinv_fail import a_test_reinv_fail
from test_cases.reinvite import b_test_reinvite
class a_test_reinv_brkn2(a_test_reinv_fail):
cld = 'bob_reinv_brkn2'
cli = 'alice_reinv_brkn2'
def reinvite(self, ua):
if not self.connect_done or self.disconnect_done:
return
sdp_body_bak = ua.lSDP
ua.lSDP = sdp_body_bak.getCopy()
for sect in ua.lSDP.content.sections:
sect.c_header = None
rval = a_test_reinv_fail.reinvite(self, ua)
ua.lSDP = sdp_body_bak
return rval
class b_test_reinv_brkn2(b_test_reinvite):
cli = 'bob_reinv_brkn2'
| bsd-2-clause | 1,664,678,858,462,998,500 | 43.444444 | 82 | 0.74 | false | 3.831418 | true | false | false |
csmart/jockey-yum | setup.py | 1 | 1204 | #!/usr/bin/env python
# (c) 2007 Canonical Ltd.
# Author: Martin Pitt <[email protected]>
# This script needs python-distutils-extra, an extension to the standard
# distutils which provides i18n, icon support, etc.
# https://launchpad.net/python-distutils-extra
from glob import glob
from distutils.version import StrictVersion
try:
import DistUtilsExtra.auto
except ImportError:
import sys
print >> sys.stderr, 'To build Jockey you need https://launchpad.net/python-distutils-extra'
sys.exit(1)
assert StrictVersion(DistUtilsExtra.auto.__version__) >= '2.4', 'needs DistUtilsExtra.auto >= 2.4'
DistUtilsExtra.auto.setup(
name='jockey',
version='0.9.3',
description='UI for managing third-party and non-free drivers',
url='https://launchpad.net/jockey',
license='GPL v2 or later',
author='Martin Pitt',
author_email='[email protected]',
data_files = [
('share/jockey', ['backend/jockey-backend']),
('share/jockey', ['gtk/jockey-gtk.ui']), # bug in DistUtilsExtra.auto 2.2
('share/jockey', glob('kde/*.ui')), # don't use pykdeuic4
],
scripts = ['gtk/jockey-gtk', 'kde/jockey-kde', 'text/jockey-text'],
)
| gpl-2.0 | 4,649,148,657,068,648,000 | 31.540541 | 98 | 0.680233 | false | 3.143603 | false | false | false |
gersolar/stations | stations_configuration/settings.py | 1 | 5198 | # Only Celery settings for stations project.
#import djcelery
#djcelery.setup_loader()
#BROKER_TRANSPORT = 'amqplib'
#BROKER_URL = 'django://'
##CELERY_RESULT_BACKEND = 'database'
#CELERY_DEFAULT_QUEUE = "default"
#CELERY_QUEUES = {
# "default": {
# "binding_key": "task.#",
# },
# "mailer": {
# "binding_key": "task.#",
# },
#}
#CELERY_ROUTES = {'downloader.tasks.check_email_schedule': {'queue': 'mailer'}}
#CELERY_TIMEZONE = 'UTC'
#CELERY_CONCURRENCY = 7
# Django settings for stations project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'stations.sqlite3',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC' # 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fax%_3d9oshwed$!3s)jdn876jpj#5u&50m$6naau#&=zpyn%0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stations_configuration.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'stations_configuration.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic',
'django.contrib.contenttypes',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'stations',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_DIRS='templates'
| mit | 4,052,354,373,346,144,000 | 28.039106 | 85 | 0.729127 | false | 3.263026 | false | false | false |
lizardsystem/lizard-measure | lizard_measure/migrations/0010_auto__del_score__del_measuringrod__del_field_measurestatusmoment_is_pl.py | 1 | 23606 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Score'
db.delete_table('lizard_measure_score')
# Deleting model 'MeasuringRod'
db.delete_table('lizard_measure_measuringrod')
# Deleting field 'MeasureStatusMoment.is_planning'
db.delete_column('lizard_measure_measurestatusmoment', 'is_planning')
# Deleting field 'MeasureStatusMoment.date'
db.delete_column('lizard_measure_measurestatusmoment', 'date')
# Adding field 'MeasureStatusMoment.planning_date'
db.add_column('lizard_measure_measurestatusmoment', 'planning_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'MeasureStatusMoment.realisation_date'
db.add_column('lizard_measure_measurestatusmoment', 'realisation_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.total_costs'
db.delete_column('lizard_measure_measure', 'total_costs')
# Adding field 'Measure.valid'
db.add_column('lizard_measure_measure', 'valid', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True), keep_default=False)
# Adding field 'Measure.geom'
db.add_column('lizard_measure_measure', 'geom', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding model 'Score'
db.create_table('lizard_measure_score', (
('gep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('area_ident', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('ascending', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('mep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_measure.MeasuringRod'])),
('limit_bad_insufficient', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_area.Area'], null=True, blank=True)),
('target_2027', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('limit_insufficient_moderate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('target_2015', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['Score'])
# Adding model 'MeasuringRod'
db.create_table('lizard_measure_measuringrod', (
('group', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sign', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sub_measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('unit', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['MeasuringRod'])
# Adding field 'MeasureStatusMoment.is_planning'
db.add_column('lizard_measure_measurestatusmoment', 'is_planning', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'MeasureStatusMoment.date'
db.add_column('lizard_measure_measurestatusmoment', 'date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'MeasureStatusMoment.planning_date'
db.delete_column('lizard_measure_measurestatusmoment', 'planning_date')
# Deleting field 'MeasureStatusMoment.realisation_date'
db.delete_column('lizard_measure_measurestatusmoment', 'realisation_date')
# Adding field 'Measure.total_costs'
db.add_column('lizard_measure_measure', 'total_costs', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.valid'
db.delete_column('lizard_measure_measure', 'valid')
# Deleting field 'Measure.geom'
db.delete_column('lizard_measure_measure', 'geom')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.fundingorganization': {
'Meta': {'object_name': 'FundingOrganization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Organization']"}),
'percentage': ('django.db.models.fields.FloatField', [], {})
},
'lizard_measure.krwstatus': {
'Meta': {'object_name': 'KRWStatus'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.krwwatertype': {
'Meta': {'object_name': 'KRWWatertype'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measure': {
'Meta': {'ordering': "('id',)", 'object_name': 'Measure'},
'aggregation_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'area_measure_set'", 'blank': 'True', 'to': "orm['lizard_area.Area']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'datetime_in_source': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'executive': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'executive_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'exploitation_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'funding_organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Organization']", 'through': "orm['lizard_measure.FundingOrganization']", 'symmetrical': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'geometry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObject']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'import_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'import_source': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'initiator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initiator_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'investment_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_KRW_measure': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_indicator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'measure_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureType']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']", 'null': 'True', 'blank': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasurePeriod']", 'null': 'True', 'blank': 'True'}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'responsible_department': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'status_moments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureStatus']", 'through': "orm['lizard_measure.MeasureStatusMoment']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Unit']"}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {}),
'waterbodies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.WaterBody']", 'symmetrical': 'False', 'blank': 'True'})
},
'lizard_measure.measurecategory': {
'Meta': {'object_name': 'MeasureCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measureperiod': {
'Meta': {'ordering': "('start_date', 'end_date')", 'object_name': 'MeasurePeriod'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measurestatus': {
'Meta': {'ordering': "('-value',)", 'object_name': 'MeasureStatus'},
'color': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'lizard_measure.measurestatusmoment': {
'Meta': {'ordering': "('measure__id', 'status__value')", 'object_name': 'MeasureStatusMoment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'exploitation_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investment_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'planning_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'realisation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureStatus']"})
},
'lizard_measure.measuretype': {
'Meta': {'ordering': "('code',)", 'object_name': 'MeasureType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'combined_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'harmonisation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'klass': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'subcategory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Unit']", 'symmetrical': 'False', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.organization': {
'Meta': {'ordering': "('description',)", 'unique_together': "(('source', 'code'),)", 'object_name': 'Organization'},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.steeringparameter': {
'Meta': {'object_name': 'SteeringParameter'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'fews_parameter': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_maximum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'target_minimum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.unit': {
'Meta': {'object_name': 'Unit'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'conversion_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.waterbody': {
'Meta': {'object_name': 'WaterBody'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'area_ident': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'krw_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWStatus']", 'null': 'True', 'blank': 'True'}),
'krw_watertype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWWatertype']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_measure']
| gpl-3.0 | 1,789,020,140,202,263,800 | 77.949833 | 219 | 0.57604 | false | 3.614454 | false | false | false |
konrad/kufpybio | kufpybiotools/generate_igr_gff.py | 1 | 1881 | #!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <[email protected]>"
__copyright__ = "2013 by Konrad Foerstner <[email protected]>"
__license__ = "ISC license"
__email__ = "[email protected]"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
parser.add_argument("--margin", type=int, default=0)
parser.add_argument("--plus_only", default=False, action="store_true")
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
strands = ["+", "-"]
if args.plus_only is True:
strands = ["+"]
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
start = start + args.margin
end = end - args.margin
if end <= start:
continue
for strand in strands:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
| isc | -7,492,041,156,369,239,000 | 30.35 | 70 | 0.61563 | false | 3.053571 | false | false | false |
jakobmoss/tsa | utils/makeweights.py | 1 | 2350 | # -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Generate statistical weigts from scatter
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
import bottleneck as bn
###############################################################################
# Functions
###############################################################################
def genweight(datname, dpath, wpath):
"""
Combine time series with statistical weights calculated from scatter
Arguments:
- `datname`: Identifier of data file
- `dpath` : Path to data file (time series).
- `wpath` : Path to scatter file (with same time points!)
"""
# Pretty print
print('Generating weights for {0} !'.format(dpath))
# Load data and weights
t, d = np.loadtxt(dpath, unpack=True)
tt, sig = np.loadtxt(wpath, unpack=True)
# Check that times are indeed the same
tdif = t - tt
if tdif.any() != 0:
print('Error! Not the same time points! Quitting!')
exit()
# Moving variance (Hans: M = 50 - 100)
M = 70
movstd = bn.move_std(sig, M, min_count=1)
movvar = np.square(movstd)
# Remove first point
x = 1
t = t[x:]
d = d[x:]
movvar = movvar[x:]
# Calculate weights from scatter (1 / variance)
w = np.divide(1.0, movvar)
# Save
outfile = star + '_with-weights.txt'
np.savetxt(outfile, np.transpose([t, d, w]), fmt='%.15e', delimiter='\t')
# Done!
print('Done!\n')
###############################################################################
# Script
###############################################################################
if __name__ == "__main__":
# Definitions
datdir = '../../data/'
ext = '.txt'
append = '-high'
# Run for star 1
star = 'star01'
genweight(star, datdir + star + ext, star + append + ext)
# Run for star 2
star = 'star02'
genweight(star, datdir + star + ext, star + append + ext)
| mit | 56,344,320,398,218,080 | 28.3625 | 79 | 0.43593 | false | 4.302198 | false | false | false |
erstis-go-botting/sexy-bot | misc.py | 1 | 1888 | import os
#checks if settings.ini should be generated. if not given universe, username and password it will generate a settings.ini with the default account
#This settings_generator will only work for universe 82 if the flag argument is given als True(to make sure that universe 82 is intended)
def settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if (os.path.isfile('settings/settings.ini')):
print("settings file found, stopping now.")
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
if not (os.path.isdir('settings')):
os.makedir('settings')
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
def force_settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if not (os.path.isfile('settings/settings.ini')):
settings_generator(universe, username, password, flag)
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
#settings_generator()
| mit | 7,923,967,946,050,228,000 | 50.027027 | 146 | 0.678496 | false | 4.16777 | false | false | false |
ActiveState/code | recipes/Python/275366_Email_address_leech/recipe-275366.py | 1 | 1624 | import re
def test():
text = \
''' You can contact us at [email protected]
or at yourname AT server DOT site DOT com.
Also at o u r n a m e @ s e r v e r dot s i t e dot c o m
and t.h.e.i.r.n.a.m.e at server dot s/i/t/e DOT COM.
'''
for email in emailLeech(text): print email
DOMAINS = ["com","edu","net","org","gov","us"] #.. and so on
FLAGS = re.IGNORECASE | re.VERBOSE
AT = r'(?: @ | \b A \s* T \b)'
ADDRESSPART = r'\b (?: \w+ | \w (?:(?:\s+|\W) \w)*) \b'
DOMAIN = r'(?:%s)' % '|'.join(["(?:\s*|\W)".join(domain) for domain in DOMAINS])
NONWORD = re.compile(r'\W+')
DOT_REGEX = re.compile(r'(?: \. | \b D \s* O \s* T \b)', FLAGS)
EMAIL_REGEX = re.compile(
(r'(?P<name>%s) \W* %s \W*' % (ADDRESSPART,AT)) +
r'(?P<site>(?: %s \W* %s \W*)+)' % (ADDRESSPART, DOT_REGEX.pattern) +
r'(?P<domain>%s)' % DOMAIN, FLAGS)
def emailLeech(text):
''' An iterator over recognized email addresses within text'''
while (True):
match = EMAIL_REGEX.search(text)
if not match: break
parts = [match.group("name")] + \
DOT_REGEX.split(match.group("site")) + \
[match.group("domain")]
# discard non word chars
parts = [NONWORD.sub('',part) for part in parts]
# discard all empty parts and make lowercase
parts = [part.lower() for part in parts if len(part)>0]
# join the parts
yield "%s@%s.%s" % (parts[0], '.'.join(parts[1:-1]), parts[-1])
text = text[match.end():]
if __name__ == '__main__': test()
| mit | 6,945,036,452,348,633,000 | 35.088889 | 80 | 0.513547 | false | 3.069943 | false | false | false |
mosdef-hub/foyer | foyer/tests/test_forcefield_parameters.py | 1 | 10029 | import numpy as np
import pytest
from foyer import Forcefield, forcefields
from foyer.exceptions import MissingForceError, MissingParametersError
from foyer.forcefield import get_available_forcefield_loaders
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn
@pytest.mark.skipif(
condition="load_GAFF"
not in map(lambda func: func.__name__, get_available_forcefield_loaders()),
reason="GAFF Plugin is not installed",
)
class TestForcefieldParameters(BaseTest):
@pytest.fixture(scope="session")
def gaff(self):
return forcefields.load_GAFF()
def test_gaff_missing_group(self, gaff):
with pytest.raises(ValueError):
gaff.get_parameters("missing", key=[])
def test_gaff_non_string_keys(self, gaff):
with pytest.raises(TypeError):
gaff.get_parameters("atoms", key=1)
def test_gaff_bond_parameters_gaff(self, gaff):
bond_params = gaff.get_parameters("harmonic_bonds", ["br", "ca"])
assert np.isclose(bond_params["length"], 0.19079)
assert np.isclose(bond_params["k"], 219827.36)
def test_gaff_bond_params_reversed(self, gaff):
assert gaff.get_parameters(
"harmonic_bonds", ["ca", "br"]
) == gaff.get_parameters("harmonic_bonds", ["ca", "br"])
def test_gaff_missing_bond_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_bonds", ["str1", "str2"])
def test_gaff_angle_parameters(self, gaff):
angle_params = gaff.get_parameters("harmonic_angles", ["f", "c1", "f"])
assert np.allclose(
[angle_params["theta"], angle_params["k"]],
[3.141592653589793, 487.0176],
)
def test_gaff_angle_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"harmonic_angles", ["f", "c2", "ha"]
).values()
),
list(
gaff.get_parameters(
"harmonic_angles", ["ha", "c2", "f"]
).values()
),
)
def test_gaff_missing_angle_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_angles", ["1", "2", "3"])
def test_gaff_periodic_proper_parameters(self, gaff):
periodic_proper_params = gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
)
assert np.allclose(periodic_proper_params["periodicity"], [2.0, 1.0])
assert np.allclose(
periodic_proper_params["k"], [9.414, 5.4392000000000005]
)
assert np.allclose(
periodic_proper_params["phase"],
[3.141592653589793, 3.141592653589793],
)
def test_gaff_periodic_proper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
).values()
),
list(
gaff.get_parameters(
"periodic_propers", ["hs", "sh", "c", "c3"]
).values()
),
)
def test_gaff_periodic_improper_parameters(self, gaff):
periodic_improper_params = gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
)
assert np.allclose(periodic_improper_params["periodicity"], [2.0])
assert np.allclose(periodic_improper_params["k"], [4.6024])
assert np.allclose(
periodic_improper_params["phase"], [3.141592653589793]
)
def test_gaff_periodic_improper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
).values()
),
list(
gaff.get_parameters(
"periodic_impropers", ["c", "o", "", "o"]
).values()
),
)
def test_gaff_proper_params_missing(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("periodic_impropers", ["a", "b", "c", "d"])
def test_gaff_scaling_factors(self, gaff):
assert gaff.lj14scale == 0.5
assert np.isclose(gaff.coulomb14scale, 0.833333333)
def test_opls_get_parameters_atoms(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", "opls_145")
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atoms_list(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", ["opls_145"])
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atom_class(self, oplsaa):
atom_params = oplsaa.get_parameters(
"atoms", "CA", keys_are_atom_classes=True
)
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_bonds(self, oplsaa):
bond_params = oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
)
assert bond_params["length"] == 0.146
assert bond_params["k"] == 334720.0
def test_opls_get_parameters_bonds_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_145", "opls_760"]
).values()
),
)
def test_opls_get_parameters_bonds_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["C_2", "O_2"], True
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["O_2", "C_2"], True
).values()
),
)
def test_opls_get_parameters_angle(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.0943950239, 585.76]
)
def test_opls_get_parameters_angle_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_167", "opls_772", "opls_166"]
).values()
),
)
def test_opls_get_parameters_angle_atom_classes(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["CA", "C_2", "CA"], keys_are_atom_classes=True
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.09439510239, 711.28]
)
def test_opls_get_parameters_angle_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles",
["CA", "C", "O"],
keys_are_atom_classes=True,
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles",
["O", "C", "CA"],
keys_are_atom_classes=True,
).values()
),
)
def test_opls_get_parameters_rb_proper(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["opls_215", "opls_215", "opls_235", "opls_269"]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[2.28446, 0.0, -2.28446, 0.0, 0.0, 0.0],
)
def test_get_parameters_rb_proper_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"rb_propers",
["opls_215", "opls_215", "opls_235", "opls_269"],
).values()
),
list(
oplsaa.get_parameters(
"rb_propers",
["opls_269", "opls_235", "opls_215", "opls_215"],
).values()
),
)
def test_opls_get_parameters_wildcard(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["", "opls_235", "opls_544", ""]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[30.334, 0.0, -30.334, 0.0, 0.0, 0.0],
)
def test_opls_missing_force(self, oplsaa):
with pytest.raises(MissingForceError):
oplsaa.get_parameters("periodic_propers", key=["a", "b", "c", "d"])
def test_opls_scaling_factors(self, oplsaa):
assert oplsaa.lj14scale == 0.5
assert oplsaa.coulomb14scale == 0.5
def test_missing_scaling_factors(self):
ff = Forcefield(forcefield_files=(get_fn("validate_customtypes.xml")))
with pytest.raises(AttributeError):
assert ff.lj14scale
with pytest.raises(AttributeError):
assert ff.coulomb14scale
| mit | 3,671,707,264,193,672,000 | 33.582759 | 79 | 0.5172 | false | 3.650892 | true | false | false |
ecell/ecell3 | ecell/pyecell/ecell/analysis/PathwayProxy.py | 1 | 13263 | #!/usr/bin/env python
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
"""
A program for handling and defining a pathway.
This program is the extension package for E-Cell System Version 3.
"""
__program__ = 'PathwayProxy'
__version__ = '1.0'
__author__ = 'Kazunari Kaizu <[email protected]>'
__coyright__ = ''
__license__ = ''
import ecell.eml
from ecell.ecssupport import *
from ecell.analysis.util import createVariableReferenceFullID
import copy
import numpy
class PathwayProxy:
def __init__( self, anEmlSupport, processList=None ):
'''
anEmlSupport: Eml support object
processList: (list) a list of process full path
'''
self.theEmlSupport = anEmlSupport
if processList:
self.setProcessList( processList )
else:
self.setProcessList( [] )
# end of __init__
def setProcessList( self, processList ):
'''
set and detect a pathway
processList: (list) a list of process full ID
'''
# check the existence of processes,
# and create relatedVariableList
self.__processList = []
self.__variableList = []
for processFullID in processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
self.__processList.append( processFullID )
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__processList.sort()
self.__variableList.sort()
# end of setProcessList
def getProcessList( self ):
'''
return processList
'''
return copy.copy( self.__processList )
# end of getProcessList
def addProcess( self, processFullID ):
'''
add a process to the pathway
processFullID: (str) a process full ID
'''
if not self.__processList.count( processFullID ) == 0:
return
# elif not ecell.eml.Eml.isEntityExist( processFullID ):
# return
# add process
self.__processList.append( processFullID )
self.__processList.sort()
# update the related variable list
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
return
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__variableList.sort()
# end of addProcess
def removeProcess( self, processIndexList ):
'''
remove processes from the pathway
processIndexList: (list) a list of indices of processes
'''
indexList = copy.copy( processIndexList )
indexList.sort()
indexList.reverse()
removedProcessList = []
for i in indexList:
if len( self.__processList ) > i:
removedProcessList.append( self.__processList.pop( i ) )
removedVariableList = []
for processFullID in removedProcessList:
# if not ecell.eml.Eml.isEntityExist( self.theEmlSupport, processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if removedVariableList.count( fullIDString ) == 0:
removedVariableList.append( fullIDString )
for processFullID in self.__processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if not removedVariableList.count( fullIDString ) == 0:
removedVariableList.remove( fullIDString )
for variableFullID in removedVariableList:
self.__variableList.remove( variableFullID )
# end of removeProcess
def take( self, processIndexList ):
'''
create and return a sub-pathway
processIndexList: (list) a list of indices of processes
return PathwayProxy
'''
processList = []
for i in processIndexList:
if len( self.__processList ) > i:
processList.append( self.__processList[ i ] )
subPathway = PathwayProxy( self.theEmlSupport, processList )
return subPathway
# end of removeProcess
def getVariableList( self ):
'''
return relatedVariableList
'''
return copy.copy( self.__variableList )
# end of getVariableList
def removeVariable( self, variableIndexList ):
'''
remove variables from the pathway
variableIndexList: (list) a list of indices of variables
'''
indexList = copy.copy( variableIndexList )
indexList.sort()
indexList.reverse()
for i in indexList:
if len( self.__variableList ) > i:
self.__variableList.pop( i )
# end of removeVariable
def addVariable( self, variableFullID ):
'''
recover a removed variable to the pathway
variableFullID: (str) a variable full ID
'''
if not self.__variableList.count( variableFullID ) == 0:
return 1
# elif not ecell.eml.Eml.isEntityExist( variableFullID ):
# return 0
for processFullID in self.__processList:
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = fullID[ 1 ] + ':' + fullID[ 2 ]
if fullIDString == variableFullID:
self.__variableList.append( variableFullID )
self.__variableList.sort()
return 1
return 0
# end of addProcess
def getIncidentMatrix( self, mode=0 ):
'''
create the incident matrix (array)
mode: (0 or 1) 0 means that only the \'write\' variables are checked. 0 is set as default.
return incidentMatrix
'''
incidentMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ) )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if mode:
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
incidentMatrix[ i ][ j ] = 1
else:
incidentMatrix[ i ][ j ] = 1
return incidentMatrix
# end of getIncidentMatrix
def getStoichiometryMatrix( self ):
'''
create the stoichiometry matrix (array)
return stoichiometryMatrix
'''
stoichiometryMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ), float )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
stoichiometryMatrix[ i ][ j ] += coeff
return stoichiometryMatrix
# end of getStoichiometryMatrix
def getReversibilityList( self ):
'''
check and return the reversibilities (isReversible) for processes
default value is 0, irreversible
return reversibilityList
'''
reversibilityList = []
for processFullID in self.__processList:
propertyList = self.theEmlSupport.getEntityPropertyList( processFullID )
if propertyList.count( 'isReversible' ) != 0:
# isReversible is handled as float
isReversible = float( self.theEmlSupport.getEntityProperty( processFullID + ':isReversible' )[ 0 ] )
reversibilityList.append( int( isReversible ) )
else:
# default value, irreversible
reversibilityList.append( 0 )
return reversibilityList
# end of getReversibilityList
# end of PathwayProxy
if __name__ == '__main__':
from emlsupport import EmlSupport
import sys
import os
def main( filename ):
anEmlSupport = EmlSupport( filename )
pathwayProxy = anEmlSupport.createPathwayProxy()
print 'process list ='
print pathwayProxy.getProcessList()
print 'related variable list ='
print pathwayProxy.getVariableList()
print 'incident matrix ='
print pathwayProxy.getIncidentMatrix()
print 'stoichiometry matrix ='
print pathwayProxy.getStoichiometryMatrix()
print 'reversibility list ='
print pathwayProxy.getReversibilityList()
# end of main
if len( sys.argv ) > 1:
main( sys.argv[ 1 ] )
else:
filename = '../../../../doc/samples/Heinrich/Heinrich.eml'
main( os.path.abspath( filename ) )
| lgpl-3.0 | 1,427,795,092,905,206,500 | 30.133803 | 121 | 0.586594 | false | 4.485289 | false | false | false |
Syralist/pixels_clock | clock.py | 1 | 3227 | # -*- coding: utf-8 -*-
import pygame, led, sys, os, random, csv
import smbus
from pygame.locals import *
from led.PixelEventHandler import *
from time import gmtime, strftime
""" A very simple arcade shooter demo :)
"""
random.seed()
BLACK = pygame.Color(0,0,0)
WHITE = pygame.Color(255, 255, 255)
RED = pygame.Color(255, 0, 0)
GREEN = pygame.Color(0, 255, 0)
adress = 0x48
LM75 = smbus.SMBus(1)
# detect if a serial/USB port is given as argument
hasSerialPortParameter = ( sys.argv.__len__() > 1 )
# use 90 x 20 matrix when no usb port for real display provided
fallbackSize = ( 90, 20 )
if hasSerialPortParameter:
serialPort = sys.argv[1]
print "INITIALIZING WITH USB-PORT: " + serialPort
ledDisplay = led.teensy.TeensyDisplay(serialPort, fallbackSize)
else:
print "INITIALIZING WITH SERVER DISPLAY AND SIMULATOR."
ledDisplay = led.dsclient.DisplayServerClientDisplay('localhost', 8123, fallbackSize)
# use same size for sim and real LED panel
size = ledDisplay.size()
simDisplay = led.sim.SimDisplay(size)
screen = pygame.Surface(size)
gamestate = 0 #1=alive; 0=dead
def main():
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
pygame.joystick.init()
gameover = False
# Initialize first joystick
if pygame.joystick.get_count() > 0:
stick = pygame.joystick.Joystick(0)
stick.init()
global gamestate
scored = False
# Clear event list before starting the game
pygame.event.clear()
while not gameover:
# Process event queue
for pgevent in pygame.event.get():
if pgevent.type == QUIT:
pygame.quit()
sys.exit()
event = process_event(pgevent)
# End the game
if event.button == EXIT:
gameover = True
# Keypresses on keyboard and joystick axis motions / button presses
elif event.type == PUSH:
# Movements
if event.button == UP:
pass
elif event.button == DOWN:
pass
elif event.button == RIGHT:
pass
elif event.button == LEFT:
pass
# Tower selection
elif event.button == B2:
pass
# Tower placement
elif event.button == P1:
gameover = True
# Only on Keyboard
elif pgevent.type == KEYDOWN and pgevent.key == K_ESCAPE:
gameover = True
screen.fill(BLACK)
font = pygame.font.SysFont("Arial", 12)
text1 = font.render(strftime("%H:%M:%S"), 0, RED)
text1pos = text1.get_rect()
text1pos.midtop = (screen.get_rect().centerx, -1)
screen.blit(text1,text1pos)
try:
temp = LM75.read_byte(adress)
except:
temp = -1
text2 = font.render("T: "+str(temp)+"'C", 0, GREEN)
text2pos = text2.get_rect()
text2pos.midbottom = (screen.get_rect().centerx, 23)
screen.blit(text2,text2pos)
simDisplay.update(screen)
ledDisplay.update(screen)
clock.tick(10)
main()
| gpl-3.0 | 2,032,024,441,789,479,700 | 26.117647 | 89 | 0.578866 | false | 3.700688 | false | false | false |
tylerclair/py3canvas | py3canvas/apis/modules.py | 1 | 54047 | """Modules API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ModulesAPI(BaseCanvasAPI):
"""Modules API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ModulesAPI."""
super(ModulesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ModulesAPI")
def list_modules(self, course_id, include=None, search_term=None, student_id=None):
"""
List modules.
List the modules in a course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial name of the modules (and module items, if include['items'] is
specified) to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, all_pages=True)
def show_module(self, id, course_id, include=None, student_id=None):
"""
Show module.
Get information about a single module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def create_module(self, course_id, module_name, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Create a module.
Create and return a new module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - module[name]
"""The name of the module"""
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of this module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked.
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
self.logger.debug("POST /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, single_item=True)
def update_module(self, id, course_id, module_name=None, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_published=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Update a module.
Update and return an existing module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module[name]
"""The name of the module"""
if module_name is not None:
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of the module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
# OPTIONAL - module[published]
"""Whether the module is published and visible to students"""
if module_published is not None:
data["module[published]"] = module_published
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def delete_module(self, id, course_id):
"""
Delete module.
Delete a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def re_lock_module_progressions(self, id, course_id):
"""
Re-lock module progressions.
Resets module progressions to their default locked state and
recalculates them based on the current requirements.
Adding progression requirements to an active course will not lock students
out of modules they have already unlocked unless this action is called.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id}/relock with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}/relock".format(**path), data=data, params=params, single_item=True)
def list_module_items(self, course_id, module_id, include=None, search_term=None, student_id=None):
"""
List module items.
List the items in a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with each item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial title of the items to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, all_pages=True)
def show_module_item(self, id, course_id, module_id, include=None, student_id=None):
"""
Show module item.
Get information about a single module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with this item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def create_module_item(self, course_id, module_id, module_item_type, module_item_content_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_new_tab=None, module_item_page_url=None, module_item_position=None, module_item_title=None):
"""
Create a module item.
Create and return a new module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - module_item[title]
"""The name of the module item and associated content"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# REQUIRED - module_item[type]
"""The type of content linked to the item"""
self._validate_enum(module_item_type, ["File", "Page", "Discussion", "Assignment", "Quiz", "SubHeader", "ExternalUrl", "ExternalTool"])
data["module_item[type]"] = module_item_type
# REQUIRED - module_item[content_id]
"""The id of the content to link to the module item. Required, except for
'ExternalUrl', 'Page', and 'SubHeader' types."""
data["module_item[content_id]"] = module_item_content_id
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)."""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[page_url]
"""Suffix for the linked wiki page (e.g. 'front-page'). Required for 'Page'
type."""
if module_item_page_url is not None:
data["module_item[page_url]"] = module_item_page_url
# OPTIONAL - module_item[external_url]
"""External url that the item points to. [Required for 'ExternalUrl' and
'ExternalTool' types."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete. Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, single_item=True)
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True)
def delete_module_item(self, id, course_id, module_id):
"""
Delete module item.
Delete a module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def mark_module_item_as_done_not_done(self, id, course_id, module_id):
"""
Mark module item as done/not done.
Mark a module item as done/not done. Use HTTP method PUT to mark as done,
and DELETE to mark as not done.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done".format(**path), data=data, params=params, no_data=True)
def get_module_item_sequence(self, course_id, asset_id=None, asset_type=None):
"""
Get module item sequence.
Given an asset in a course, find the ModuleItem it belongs to, and also the previous and next Module Items
in the course sequence.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - asset_type
"""The type of asset to find module sequence information for. Use the ModuleItem if it is known
(e.g., the user navigated from a module item), since this will avoid ambiguity if the asset
appears more than once in the module sequence."""
if asset_type is not None:
self._validate_enum(asset_type, ["ModuleItem", "File", "Page", "Discussion", "Assignment", "Quiz", "ExternalTool"])
params["asset_type"] = asset_type
# OPTIONAL - asset_id
"""The id of the asset (or the url in the case of a Page)"""
if asset_id is not None:
params["asset_id"] = asset_id
self.logger.debug("GET /api/v1/courses/{course_id}/module_item_sequence with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/module_item_sequence".format(**path), data=data, params=params, single_item=True)
def mark_module_item_read(self, id, course_id, module_id):
"""
Mark module item read.
Fulfills "must view" requirement for a module item. It is generally not necessary to do this explicitly,
but it is provided for applications that need to access external content directly (bypassing the html_url
redirect that normally allows Canvas to fulfill "must view" requirements).
This endpoint cannot be used to complete requirements on locked or unpublished module items.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read".format(**path), data=data, params=params, no_data=True)
class Contentdetails(BaseModel):
"""Contentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Contentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Contentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
class Moduleitemsequenceasset(BaseModel):
"""Moduleitemsequenceasset Model."""
def __init__(self, module_id=None, type=None, id=None, title=None):
"""Init method for Moduleitemsequenceasset class."""
self._module_id = module_id
self._type = type
self._id = id
self._title = title
self.logger = logging.getLogger('py3canvas.Moduleitemsequenceasset')
@property
def module_id(self):
"""module_id."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""id."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def title(self):
"""title."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
class Moduleitemcompletionrequirement(BaseModel):
"""Moduleitemcompletionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Moduleitemcompletionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Moduleitemcompletionrequirement')
@property
def min_score(self):
"""min_score."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""completed."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Module(BaseModel):
"""Module Model."""
def __init__(self, completed_at=None, items_count=None, unlock_at=None, workflow_state=None, items=None, prerequisite_module_ids=None, state=None, publish_final_grade=None, position=None, items_url=None, id=None, require_sequential_progress=None, name=None):
"""Init method for Module class."""
self._completed_at = completed_at
self._items_count = items_count
self._unlock_at = unlock_at
self._workflow_state = workflow_state
self._items = items
self._prerequisite_module_ids = prerequisite_module_ids
self._state = state
self._publish_final_grade = publish_final_grade
self._position = position
self._items_url = items_url
self._id = id
self._require_sequential_progress = require_sequential_progress
self._name = name
self.logger = logging.getLogger('py3canvas.Module')
@property
def completed_at(self):
"""the date the calling user completed the module (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed_at
@completed_at.setter
def completed_at(self, value):
"""Setter for completed_at property."""
self.logger.warn("Setting values on completed_at will NOT update the remote Canvas instance.")
self._completed_at = value
@property
def items_count(self):
"""The number of items in the module."""
return self._items_count
@items_count.setter
def items_count(self, value):
"""Setter for items_count property."""
self.logger.warn("Setting values on items_count will NOT update the remote Canvas instance.")
self._items_count = value
@property
def unlock_at(self):
"""(Optional) the date this module will unlock."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def workflow_state(self):
"""the state of the module: 'active', 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def items(self):
"""The contents of this module, as an array of Module Items. (Present only if requested via include[]=items AND the module is not deemed too large by Canvas.)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def prerequisite_module_ids(self):
"""IDs of Modules that must be completed before this one is unlocked."""
return self._prerequisite_module_ids
@prerequisite_module_ids.setter
def prerequisite_module_ids(self, value):
"""Setter for prerequisite_module_ids property."""
self.logger.warn("Setting values on prerequisite_module_ids will NOT update the remote Canvas instance.")
self._prerequisite_module_ids = value
@property
def state(self):
"""The state of this Module for the calling user one of 'locked', 'unlocked', 'started', 'completed' (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._state
@state.setter
def state(self, value):
"""Setter for state property."""
self.logger.warn("Setting values on state will NOT update the remote Canvas instance.")
self._state = value
@property
def publish_final_grade(self):
"""if the student's final grade for the course should be published to the SIS upon completion of this module."""
return self._publish_final_grade
@publish_final_grade.setter
def publish_final_grade(self, value):
"""Setter for publish_final_grade property."""
self.logger.warn("Setting values on publish_final_grade will NOT update the remote Canvas instance.")
self._publish_final_grade = value
@property
def position(self):
"""the position of this module in the course (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def items_url(self):
"""The API URL to retrive this module's items."""
return self._items_url
@items_url.setter
def items_url(self, value):
"""Setter for items_url property."""
self.logger.warn("Setting values on items_url will NOT update the remote Canvas instance.")
self._items_url = value
@property
def id(self):
"""the unique identifier for the module."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def require_sequential_progress(self):
"""Whether module items must be unlocked in order."""
return self._require_sequential_progress
@require_sequential_progress.setter
def require_sequential_progress(self, value):
"""Setter for require_sequential_progress property."""
self.logger.warn("Setting values on require_sequential_progress will NOT update the remote Canvas instance.")
self._require_sequential_progress = value
@property
def name(self):
"""the name of this module."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
class Moduleitemsequence(BaseModel):
"""Moduleitemsequence Model."""
def __init__(self, items=None, modules=None):
"""Init method for Moduleitemsequence class."""
self._items = items
self._modules = modules
self.logger = logging.getLogger('py3canvas.Moduleitemsequence')
@property
def items(self):
"""an array containing one hash for each appearence of the asset in the module sequence (up to 10 total)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def modules(self):
"""an array containing each Module referenced above."""
return self._modules
@modules.setter
def modules(self, value):
"""Setter for modules property."""
self.logger.warn("Setting values on modules will NOT update the remote Canvas instance.")
self._modules = value
class Completionrequirement(BaseModel):
"""Completionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Completionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Completionrequirement')
@property
def min_score(self):
"""minimum score required to complete (only present when type == 'min_score')."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""one of 'must_view', 'must_submit', 'must_contribute', 'min_score'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""whether the calling user has met this requirement (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Moduleitem(BaseModel):
"""Moduleitem Model."""
def __init__(self, indent=None, title=None, url=None, completion_requirement=None, html_url=None, content_details=None, new_tab=None, external_url=None, position=None, module_id=None, content_id=None, type=None, id=None, page_url=None):
"""Init method for Moduleitem class."""
self._indent = indent
self._title = title
self._url = url
self._completion_requirement = completion_requirement
self._html_url = html_url
self._content_details = content_details
self._new_tab = new_tab
self._external_url = external_url
self._position = position
self._module_id = module_id
self._content_id = content_id
self._type = type
self._id = id
self._page_url = page_url
self.logger = logging.getLogger('py3canvas.Moduleitem')
@property
def indent(self):
"""0-based indent level; module items may be indented to show a hierarchy."""
return self._indent
@indent.setter
def indent(self, value):
"""Setter for indent property."""
self.logger.warn("Setting values on indent will NOT update the remote Canvas instance.")
self._indent = value
@property
def title(self):
"""the title of this item."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def url(self):
"""(Optional) link to the Canvas API object, if applicable."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def completion_requirement(self):
"""Completion requirement for this module item."""
return self._completion_requirement
@completion_requirement.setter
def completion_requirement(self, value):
"""Setter for completion_requirement property."""
self.logger.warn("Setting values on completion_requirement will NOT update the remote Canvas instance.")
self._completion_requirement = value
@property
def html_url(self):
"""link to the item in Canvas."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def content_details(self):
"""(Present only if requested through include[]=content_details) If applicable, returns additional details specific to the associated object."""
return self._content_details
@content_details.setter
def content_details(self, value):
"""Setter for content_details property."""
self.logger.warn("Setting values on content_details will NOT update the remote Canvas instance.")
self._content_details = value
@property
def new_tab(self):
"""(only for 'ExternalTool' type) whether the external tool opens in a new tab."""
return self._new_tab
@new_tab.setter
def new_tab(self, value):
"""Setter for new_tab property."""
self.logger.warn("Setting values on new_tab will NOT update the remote Canvas instance.")
self._new_tab = value
@property
def external_url(self):
"""(only for 'ExternalUrl' and 'ExternalTool' types) external url that the item points to."""
return self._external_url
@external_url.setter
def external_url(self, value):
"""Setter for external_url property."""
self.logger.warn("Setting values on external_url will NOT update the remote Canvas instance.")
self._external_url = value
@property
def position(self):
"""the position of this item in the module (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def module_id(self):
"""the id of the Module this item appears in."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def content_id(self):
"""the id of the object referred to applies to 'File', 'Discussion', 'Assignment', 'Quiz', 'ExternalTool' types."""
return self._content_id
@content_id.setter
def content_id(self, value):
"""Setter for content_id property."""
self.logger.warn("Setting values on content_id will NOT update the remote Canvas instance.")
self._content_id = value
@property
def type(self):
"""the type of object referred to one of 'File', 'Page', 'Discussion', 'Assignment', 'Quiz', 'SubHeader', 'ExternalUrl', 'ExternalTool'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""the unique identifier for the module item."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def page_url(self):
"""(only for 'Page' type) unique locator for the linked wiki page."""
return self._page_url
@page_url.setter
def page_url(self, value):
"""Setter for page_url property."""
self.logger.warn("Setting values on page_url will NOT update the remote Canvas instance.")
self._page_url = value
class Moduleitemsequencenode(BaseModel):
"""Moduleitemsequencenode Model."""
def __init__(self, current=None, prev=None, next=None):
"""Init method for Moduleitemsequencenode class."""
self._current = current
self._prev = prev
self._next = next
self.logger = logging.getLogger('py3canvas.Moduleitemsequencenode')
@property
def current(self):
"""current."""
return self._current
@current.setter
def current(self, value):
"""Setter for current property."""
self.logger.warn("Setting values on current will NOT update the remote Canvas instance.")
self._current = value
@property
def prev(self):
"""prev."""
return self._prev
@prev.setter
def prev(self, value):
"""Setter for prev property."""
self.logger.warn("Setting values on prev will NOT update the remote Canvas instance.")
self._prev = value
@property
def next(self):
"""next."""
return self._next
@next.setter
def next(self, value):
"""Setter for next property."""
self.logger.warn("Setting values on next will NOT update the remote Canvas instance.")
self._next = value
class Moduleitemcontentdetails(BaseModel):
"""Moduleitemcontentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Moduleitemcontentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Moduleitemcontentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
| mit | 998,213,574,905,809,700 | 37.412935 | 354 | 0.624493 | false | 4.208285 | false | false | false |
kajgan/stbgui | lib/python/Components/Converter/ClientsStreaming.py | 1 | 3432 | from Converter import Converter
from Poll import Poll
from Components.Element import cached
from Components.Sources.StreamService import StreamServiceList
from enigma import eStreamServer
from ServiceReference import ServiceReference
import socket
class ClientsStreaming(Converter, Poll, object):
UNKNOWN = -1
REF = 0
IP = 1
NAME = 2
ENCODER = 3
NUMBER = 4
SHORT_ALL = 5
ALL = 6
INFO = 7
INFO_RESOLVE = 8
INFO_RESOLVE_SHORT = 9
EXTRA_INFO = 10
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.poll_interval = 30000
self.poll_enabled = True
if type == "REF":
self.type = self.REF
elif type == "IP":
self.type = self.IP
elif type == "NAME":
self.type = self.NAME
elif type == "ENCODER":
self.type = self.ENCODER
elif type == "NUMBER":
self.type = self.NUMBER
elif type == "SHORT_ALL":
self.type = self.SHORT_ALL
elif type == "ALL":
self.type = self.ALL
elif type == "INFO":
self.type = self.INFO
elif type == "INFO_RESOLVE":
self.type = self.INFO_RESOLVE
elif type == "INFO_RESOLVE_SHORT":
self.type = self.INFO_RESOLVE_SHORT
elif type == "EXTRA_INFO":
self.type = self.EXTRA_INFO
else:
self.type = self.UNKNOWN
self.streamServer = eStreamServer.getInstance()
@cached
def getText(self):
if self.streamServer is None:
return ""
clients = []
refs = []
ips = []
names = []
encoders = []
extrainfo = _("ClientIP") + "\t" + _("Transcode") + "\t" + _("Channel") + "\n"
info = ""
for x in self.streamServer.getConnectedClients():
refs.append((x[1]))
servicename = ServiceReference(x[1]).getServiceName() or "(unknown service)"
service_name = servicename
names.append((service_name))
ip = x[0]
ips.append((ip))
if int(x[2]) == 0:
strtype = "S"
encoder = _('NO')
else:
strtype = "T"
encoder = _('YES')
encoders.append((encoder))
if self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
try:
raw = socket.gethostbyaddr(ip)
ip = raw[0]
except:
pass
if self.type == self.INFO_RESOLVE_SHORT:
ip, sep, tail = ip.partition('.')
info += ("%s %-8s %s\n") % (strtype, ip, service_name)
clients.append((ip, service_name, encoder))
extrainfo += ("%-8s\t%s\t%s") % (ip, encoder, service_name) +"\n"
if self.type == self.REF:
return ' '.join(refs)
elif self.type == self.IP:
return ' '.join(ips)
elif self.type == self.NAME:
return ' '.join(names)
elif self.type == self.ENCODER:
return _("Transcoding: ") + ' '.join(encoders)
elif self.type == self.NUMBER:
return str(len(clients))
elif self.type == self.EXTRA_INFO:
return extrainfo
elif self.type == self.SHORT_ALL:
return _("Total clients streaming: %d (%s)") % (len(clients), ' '.join(names))
elif self.type == self.ALL:
return '\n'.join(' '.join(elems) for elems in clients)
elif self.type == self.INFO or self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
return info
else:
return "(unknown)"
return ""
text = property(getText)
@cached
def getBoolean(self):
if self.streamServer is None:
return False
return (self.streamServer.getConnectedClients() or StreamServiceList) and True or False
boolean = property(getBoolean)
def changed(self, what):
Converter.changed(self, (self.CHANGED_POLL,))
def doSuspend(self, suspended):
pass | gpl-2.0 | 5,481,053,068,278,078,000 | 23.347518 | 104 | 0.638986 | false | 2.925831 | false | false | false |
viaict/viaduct | app/forms/pimpy.py | 1 | 1268 | import datetime
from flask_babel import _
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, DateTimeField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import InputRequired, Optional
from app import constants
from app.service import group_service, pimpy_service
class AddTaskForm(FlaskForm):
name = StringField(_('Name'), validators=[InputRequired()])
content = TextAreaField(_('Content'), validators=[Optional()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
users = StringField(_('Users'))
status = SelectField(_('Status'), coerce=int,
choices=pimpy_service.get_task_status_choices())
class AddMinuteForm(FlaskForm):
content = TextAreaField(_('Minute content'), validators=[InputRequired()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
date = DateTimeField(_('Date'), format=constants.DATE_FORMAT,
default=datetime.date.today)
| mit | 2,035,201,200,967,457,500 | 36.294118 | 78 | 0.698738 | false | 4.171053 | false | false | false |
Cito/DBUtils | tests/mock_db.py | 1 | 3341 | """This module serves as a mock object for the DB-API 2 module"""
threadsafety = 2
class Error(Exception):
pass
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
def connect(database=None, user=None):
return Connection(database, user)
class Connection:
has_ping = False
num_pings = 0
def __init__(self, database=None, user=None):
self.database = database
self.user = user
self.valid = False
if database == 'error':
raise OperationalError
self.open_cursors = 0
self.num_uses = 0
self.num_queries = 0
self.num_pings = 0
self.session = []
self.valid = True
def close(self):
if not self.valid:
raise InternalError
self.open_cursors = 0
self.num_uses = 0
self.num_queries = 0
self.session = []
self.valid = False
def commit(self):
if not self.valid:
raise InternalError
self.session.append('commit')
def rollback(self):
if not self.valid:
raise InternalError
self.session.append('rollback')
def ping(self):
cls = self.__class__
cls.num_pings += 1
if not cls.has_ping:
raise AttributeError
if not self.valid:
raise OperationalError
def cursor(self, name=None):
if not self.valid:
raise InternalError
return Cursor(self, name)
class Cursor:
def __init__(self, con, name=None):
self.con = con
self.valid = False
if name == 'error':
raise OperationalError
self.result = None
self.inputsizes = []
self.outputsizes = {}
con.open_cursors += 1
self.valid = True
def close(self):
if not self.valid:
raise InternalError
self.con.open_cursors -= 1
self.valid = False
def execute(self, operation):
if not self.valid or not self.con.valid:
raise InternalError
self.con.num_uses += 1
if operation.startswith('select '):
self.con.num_queries += 1
self.result = operation[7:]
elif operation.startswith('set '):
self.con.session.append(operation[4:])
self.result = None
elif operation == 'get sizes':
self.result = (self.inputsizes, self.outputsizes)
self.inputsizes = []
self.outputsizes = {}
else:
raise ProgrammingError
def fetchone(self):
if not self.valid:
raise InternalError
result = self.result
self.result = None
return result
def callproc(self, procname):
if not self.valid or not self.con.valid or not procname:
raise InternalError
self.con.num_uses += 1
def setinputsizes(self, sizes):
if not self.valid:
raise InternalError
self.inputsizes = sizes
def setoutputsize(self, size, column=None):
if not self.valid:
raise InternalError
self.outputsizes[column] = size
def __del__(self):
if self.valid:
self.close()
| mit | -20,463,000,262,335,624 | 22.695035 | 65 | 0.567495 | false | 4.239848 | false | false | false |
bluestemscott/librarygadget | librarygadget/librarybot/migrations/0001_initial.py | 1 | 15532 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('librarybot_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('api_key', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('account_level', self.gf('django.db.models.fields.CharField')(default='free', max_length=10)),
('paid_last_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('paid_first_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal('librarybot', ['UserProfile'])
# Adding model 'Library'
db.create_table('librarybot_library', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('catalogurl', self.gf('django.db.models.fields.URLField')(max_length=200)),
('librarysystem', self.gf('django.db.models.fields.CharField')(max_length=20)),
('renew_supported_code', self.gf('django.db.models.fields.CharField')(default='untested', max_length=10)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('lastmodified', self.gf('django.db.models.fields.DateField')(auto_now=True, blank=True)),
))
db.send_create_signal('librarybot', ['Library'])
# Adding model 'Patron'
db.create_table('librarybot_patron', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('library', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Library'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('patronid', self.gf('django.db.models.fields.CharField')(max_length=40)),
('pin', self.gf('django.db.models.fields.CharField')(max_length=75)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150, null=True)),
('save_history', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('lastchecked', self.gf('django.db.models.fields.DateTimeField')()),
('batch_last_run', self.gf('django.db.models.fields.DateField')(null=True)),
))
db.send_create_signal('librarybot', ['Patron'])
# Adding model 'Item'
db.create_table('librarybot_item', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Patron'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('author', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('outDate', self.gf('django.db.models.fields.DateField')(null=True)),
('dueDate', self.gf('django.db.models.fields.DateField')(null=True)),
('timesRenewed', self.gf('django.db.models.fields.SmallIntegerField')(null=True)),
('isbn', self.gf('django.db.models.fields.CharField')(max_length=25, null=True)),
('asof', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('librarybot', ['Item'])
# Adding model 'AccessLog'
db.create_table('librarybot_accesslog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Patron'])),
('library', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Library'])),
('viewfunc', self.gf('django.db.models.fields.CharField')(max_length=50)),
('error', self.gf('django.db.models.fields.CharField')(max_length=150)),
('error_stacktrace', self.gf('django.db.models.fields.CharField')(max_length=3000)),
('date', self.gf('django.db.models.fields.DateField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('librarybot', ['AccessLog'])
# Adding model 'LibraryRequest'
db.create_table('librarybot_libraryrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('libraryname', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('catalogurl', self.gf('django.db.models.fields.URLField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('patronid', self.gf('django.db.models.fields.CharField')(max_length=40)),
('password', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('librarybot', ['LibraryRequest'])
# Adding model 'RenewalResponse'
db.create_table('librarybot_renewalresponse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('token', self.gf('django.db.models.fields.CharField')(max_length=36)),
('response', self.gf('django.db.models.fields.TextField')()),
('cachedate', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('librarybot', ['RenewalResponse'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('librarybot_userprofile')
# Deleting model 'Library'
db.delete_table('librarybot_library')
# Deleting model 'Patron'
db.delete_table('librarybot_patron')
# Deleting model 'Item'
db.delete_table('librarybot_item')
# Deleting model 'AccessLog'
db.delete_table('librarybot_accesslog')
# Deleting model 'LibraryRequest'
db.delete_table('librarybot_libraryrequest')
# Deleting model 'RenewalResponse'
db.delete_table('librarybot_renewalresponse')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'librarybot.accesslog': {
'Meta': {'object_name': 'AccessLog'},
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'error_stacktrace': ('django.db.models.fields.CharField', [], {'max_length': '3000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'library': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Library']"}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Patron']"}),
'viewfunc': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'librarybot.item': {
'Meta': {'object_name': 'Item'},
'asof': ('django.db.models.fields.DateField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dueDate': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'outDate': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Patron']"}),
'timesRenewed': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'librarybot.library': {
'Meta': {'object_name': 'Library'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'catalogurl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastmodified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'librarysystem': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'renew_supported_code': ('django.db.models.fields.CharField', [], {'default': "'untested'", 'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'librarybot.libraryrequest': {
'Meta': {'object_name': 'LibraryRequest'},
'catalogurl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'libraryname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'patronid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'librarybot.patron': {
'Meta': {'object_name': 'Patron'},
'batch_last_run': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastchecked': ('django.db.models.fields.DateTimeField', [], {}),
'library': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Library']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True'}),
'patronid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'save_history': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'librarybot.renewalresponse': {
'Meta': {'object_name': 'RenewalResponse'},
'cachedate': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'librarybot.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'account_level': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '10'}),
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_first_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'paid_last_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['librarybot']
| mit | 7,919,373,460,767,869,000 | 64.660944 | 163 | 0.563675 | false | 3.716679 | false | false | false |
rcmachado/pysuru | pysuru/tests/test_http.py | 1 | 1256 | # coding: utf-8
try:
from unittest import mock
except ImportError:
import mock
from pysuru.http import HttpClient
def test_headers_attribute_should_always_have_authorization_header_with_token():
client = HttpClient('TARGET', 'TOKEN')
assert 'Authorization' in client.headers
assert client.headers['Authorization'] == 'bearer TOKEN'
def test_urlopen_should_build_full_url_using_target_and_path():
client = HttpClient('example.com/api', 'TOKEN')
client.conn.request = mock.MagicMock()
client.urlopen('GET', '/sample')
expected_url = 'http://example.com/api/sample'
assert client.conn.request.call_args_list == [
mock.call('GET', expected_url, headers=mock.ANY, fields=None)]
def test_urlopen_should_merge_headers_argument_with_headers_attribute():
my_headers = {
'X-Custom-Header': 'custom value'
}
expected_headers = {
'Authorization': 'bearer TOKEN',
'X-Custom-Header': 'custom value'
}
client = HttpClient('TARGET', 'TOKEN')
client.conn.request = mock.MagicMock()
client.urlopen('GET', '/sample', headers=my_headers)
assert client.conn.request.call_args_list == [
mock.call('GET', mock.ANY, headers=expected_headers, fields=None)]
| mit | 8,830,630,026,472,141,000 | 28.904762 | 80 | 0.676752 | false | 3.64058 | false | false | false |
JaneliaSciComp/Neuroptikon | Source/lib/CrossPlatform/networkx/generators/small.py | 1 | 12813 | """
Various small and named graphs, together with some compact generators.
"""
__author__ ="""Aric Hagberg ([email protected])\nPieter Swart ([email protected])"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['make_small_graph',
'LCF_graph',
'bull_graph',
'chvatal_graph',
'cubical_graph',
'desargues_graph',
'diamond_graph',
'dodecahedral_graph',
'frucht_graph',
'heawood_graph',
'house_graph',
'house_x_graph',
'icosahedral_graph',
'krackhardt_kite_graph',
'moebius_kantor_graph',
'octahedral_graph',
'pappus_graph',
'petersen_graph',
'sedgewick_maze_graph',
'tetrahedral_graph',
'truncated_cube_graph',
'truncated_tetrahedron_graph',
'tutte_graph']
from networkx.generators.classic import empty_graph, cycle_graph, path_graph, complete_graph
from networkx.exception import NetworkXError
#------------------------------------------------------------------------------
# Tools for creating small graphs
#------------------------------------------------------------------------------
def make_small_undirected_graph(graph_description, create_using=None):
"""
Return a small undirected graph described by graph_description.
See make_small_graph.
"""
if create_using is not None and create_using.is_directed():
raise NetworkXError("Directed Graph not supported")
return make_small_graph(graph_description, create_using)
def make_small_graph(graph_description, create_using=None):
"""
Return the small graph described by graph_description.
graph_description is a list of the form [ltype,name,n,xlist]
Here ltype is one of "adjacencylist" or "edgelist",
name is the name of the graph and n the number of nodes.
This constructs a graph of n nodes with integer labels 0,..,n-1.
If ltype="adjacencylist" then xlist is an adjacency list
with exactly n entries, in with the j'th entry (which can be empty)
specifies the nodes connected to vertex j.
e.g. the "square" graph C_4 can be obtained by
>>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[1,3],[2,4],[1,3]]])
or, since we do not need to add edges twice,
>>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[3],[4],[]]])
If ltype="edgelist" then xlist is an edge list
written as [[v1,w2],[v2,w2],...,[vk,wk]],
where vj and wj integers in the range 1,..,n
e.g. the "square" graph C_4 can be obtained by
>>> G=nx.make_small_graph(["edgelist","C_4",4,[[1,2],[3,4],[2,3],[4,1]]])
Use the create_using argument to choose the graph class/type.
"""
ltype=graph_description[0]
name=graph_description[1]
n=graph_description[2]
G=empty_graph(n, create_using)
nodes=G.nodes()
if ltype=="adjacencylist":
adjlist=graph_description[3]
if len(adjlist) != n:
raise NetworkXError,"invalid graph_description"
G.add_edges_from([(u-1,v) for v in nodes for u in adjlist[v]])
elif ltype=="edgelist":
edgelist=graph_description[3]
for e in edgelist:
v1=e[0]-1
v2=e[1]-1
if v1<0 or v1>n-1 or v2<0 or v2>n-1:
raise NetworkXError,"invalid graph_description"
else:
G.add_edge(v1,v2)
G.name=name
return G
def LCF_graph(n,shift_list,repeats,create_using=None):
"""
Return the cubic graph specified in LCF notation.
LCF notation (LCF=Lederberg-Coxeter-Fruchte) is a compressed
notation used in the generation of various cubic Hamiltonian
graphs of high symmetry. See, for example, dodecahedral_graph,
desargues_graph, heawood_graph and pappus_graph below.
n (number of nodes)
The starting graph is the n-cycle with nodes 0,...,n-1.
(The null graph is returned if n < 0.)
shift_list = [s1,s2,..,sk], a list of integer shifts mod n,
repeats
integer specifying the number of times that shifts in shift_list
are successively applied to each v_current in the n-cycle
to generate an edge between v_current and v_current+shift mod n.
For v1 cycling through the n-cycle a total of k*repeats
with shift cycling through shiftlist repeats times connect
v1 with v1+shift mod n
The utility graph K_{3,3}
>>> G=nx.LCF_graph(6,[3,-3],3)
The Heawood graph
>>> G=nx.LCF_graph(14,[5,-5],7)
See http://mathworld.wolfram.com/LCFNotation.html for a description
and references.
"""
if create_using is not None and create_using.is_directed():
raise NetworkXError("Directed Graph not supported")
if n <= 0:
return empty_graph(0, create_using)
# start with the n-cycle
G=cycle_graph(n, create_using)
G.name="LCF_graph"
nodes=G.nodes()
n_extra_edges=repeats*len(shift_list)
# edges are added n_extra_edges times
# (not all of these need be new)
if n_extra_edges < 1:
return G
for i in range(n_extra_edges):
shift=shift_list[i%len(shift_list)] #cycle through shift_list
v1=nodes[i%n] # cycle repeatedly through nodes
v2=nodes[(i + shift)%n]
G.add_edge(v1, v2)
return G
#-------------------------------------------------------------------------------
# Various small and named graphs
#-------------------------------------------------------------------------------
def bull_graph(create_using=None):
"""Return the Bull graph. """
description=[
"adjacencylist",
"Bull Graph",
5,
[[2,3],[1,3,4],[1,2,5],[2],[3]]
]
G=make_small_undirected_graph(description, create_using)
return G
def chvatal_graph(create_using=None):
"""Return the Chvatal graph."""
description=[
"adjacencylist",
"Chvatal Graph",
12,
[[2,5,7,10],[3,6,8],[4,7,9],[5,8,10],
[6,9],[11,12],[11,12],[9,12],
[11],[11,12],[],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def cubical_graph(create_using=None):
"""Return the 3-regular Platonic Cubical graph."""
description=[
"adjacencylist",
"Platonic Cubical Graph",
8,
[[2,4,5],[1,3,8],[2,4,7],[1,3,6],
[1,6,8],[4,5,7],[3,6,8],[2,5,7]]
]
G=make_small_undirected_graph(description, create_using)
return G
def desargues_graph(create_using=None):
""" Return the Desargues graph."""
G=LCF_graph(20, [5,-5,9,-9], 5, create_using)
G.name="Desargues Graph"
return G
def diamond_graph(create_using=None):
"""Return the Diamond graph. """
description=[
"adjacencylist",
"Diamond Graph",
4,
[[2,3],[1,3,4],[1,2,4],[2,3]]
]
G=make_small_undirected_graph(description, create_using)
return G
def dodecahedral_graph(create_using=None):
""" Return the Platonic Dodecahedral graph. """
G=LCF_graph(20, [10,7,4,-4,-7,10,-4,7,-7,4], 2, create_using)
G.name="Dodecahedral Graph"
return G
def frucht_graph(create_using=None):
"""Return the Frucht Graph.
The Frucht Graph is the smallest cubical graph whose
automorphism group consists only of the identity element.
"""
G=cycle_graph(7, create_using)
G.add_edges_from([[0,7],[1,7],[2,8],[3,9],[4,9],[5,10],[6,10],
[7,11],[8,11],[8,9],[10,11]])
G.name="Frucht Graph"
return G
def heawood_graph(create_using=None):
""" Return the Heawood graph, a (3,6) cage. """
G=LCF_graph(14, [5,-5], 7, create_using)
G.name="Heawood Graph"
return G
def house_graph(create_using=None):
"""Return the House graph (square with triangle on top)."""
description=[
"adjacencylist",
"House Graph",
5,
[[2,3],[1,4],[1,4,5],[2,3,5],[3,4]]
]
G=make_small_undirected_graph(description, create_using)
return G
def house_x_graph(create_using=None):
"""Return the House graph with a cross inside the house square."""
description=[
"adjacencylist",
"House-with-X-inside Graph",
5,
[[2,3,4],[1,3,4],[1,2,4,5],[1,2,3,5],[3,4]]
]
G=make_small_undirected_graph(description, create_using)
return G
def icosahedral_graph(create_using=None):
"""Return the Platonic Icosahedral graph."""
description=[
"adjacencylist",
"Platonic Icosahedral Graph",
12,
[[2,6,8,9,12],[3,6,7,9],[4,7,9,10],[5,7,10,11],
[6,7,11,12],[7,12],[],[9,10,11,12],
[10],[11],[12],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def krackhardt_kite_graph(create_using=None):
"""
Return the Krackhardt Kite Social Network.
A 10 actor social network introduced by David Krackhardt
to illustrate: degree, betweenness, centrality, closeness, etc.
The traditional labeling is:
Andre=1, Beverley=2, Carol=3, Diane=4,
Ed=5, Fernando=6, Garth=7, Heather=8, Ike=9, Jane=10.
"""
description=[
"adjacencylist",
"Krackhardt Kite Social Network",
10,
[[2,3,4,6],[1,4,5,7],[1,4,6],[1,2,3,5,6,7],[2,4,7],
[1,3,4,7,8],[2,4,5,6,8],[6,7,9],[8,10],[9]]
]
G=make_small_undirected_graph(description, create_using)
return G
def moebius_kantor_graph(create_using=None):
"""Return the Moebius-Kantor graph."""
G=LCF_graph(16, [5,-5], 8, create_using)
G.name="Moebius-Kantor Graph"
return G
def octahedral_graph(create_using=None):
"""Return the Platonic Octahedral graph."""
description=[
"adjacencylist",
"Platonic Octahedral Graph",
6,
[[2,3,4,5],[3,4,6],[5,6],[5,6],[6],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def pappus_graph():
""" Return the Pappus graph."""
G=LCF_graph(18,[5,7,-7,7,-7,-5],3)
G.name="Pappus Graph"
return G
def petersen_graph(create_using=None):
"""Return the Petersen graph."""
description=[
"adjacencylist",
"Petersen Graph",
10,
[[2,5,6],[1,3,7],[2,4,8],[3,5,9],[4,1,10],[1,8,9],[2,9,10],
[3,6,10],[4,6,7],[5,7,8]]
]
G=make_small_undirected_graph(description, create_using)
return G
def sedgewick_maze_graph(create_using=None):
"""
Return a small maze with a cycle.
This is the maze used in Sedgewick,3rd Edition, Part 5, Graph
Algorithms, Chapter 18, e.g. Figure 18.2 and following.
Nodes are numbered 0,..,7
"""
G=empty_graph(0, create_using)
G.add_nodes_from(range(8))
G.add_edges_from([[0,2],[0,7],[0,5]])
G.add_edges_from([[1,7],[2,6]])
G.add_edges_from([[3,4],[3,5]])
G.add_edges_from([[4,5],[4,7],[4,6]])
G.name="Sedgewick Maze"
return G
def tetrahedral_graph(create_using=None):
""" Return the 3-regular Platonic Tetrahedral graph."""
G=complete_graph(4, create_using)
G.name="Platonic Tetrahedral graph"
return G
def truncated_cube_graph(create_using=None):
"""Return the skeleton of the truncated cube."""
description=[
"adjacencylist",
"Truncated Cube Graph",
24,
[[2,3,5],[12,15],[4,5],[7,9],
[6],[17,19],[8,9],[11,13],
[10],[18,21],[12,13],[15],
[14],[22,23],[16],[20,24],
[18,19],[21],[20],[24],
[22],[23],[24],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
def truncated_tetrahedron_graph(create_using=None):
"""Return the skeleton of the truncated Platonic tetrahedron."""
G=path_graph(12, create_using)
# G.add_edges_from([(1,3),(1,10),(2,7),(4,12),(5,12),(6,8),(9,11)])
G.add_edges_from([(0,2),(0,9),(1,6),(3,11),(4,11),(5,7),(8,10)])
G.name="Truncated Tetrahedron Graph"
return G
def tutte_graph(create_using=None):
"""Return the Tutte graph."""
description=[
"adjacencylist",
"Tutte's Graph",
46,
[[2,3,4],[5,27],[11,12],[19,20],[6,34],
[7,30],[8,28],[9,15],[10,39],[11,38],
[40],[13,40],[14,36],[15,16],[35],
[17,23],[18,45],[19,44],[46],[21,46],
[22,42],[23,24],[41],[25,28],[26,33],
[27,32],[34],[29],[30,33],[31],
[32,34],[33],[],[],[36,39],
[37],[38,40],[39],[],[],
[42,45],[43],[44,46],[45],[],[]]
]
G=make_small_undirected_graph(description, create_using)
return G
| bsd-3-clause | -5,615,257,374,291,116,000 | 30.25122 | 92 | 0.570124 | false | 3.109197 | false | false | false |
looopTools/sw9-source | .waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/extras/wurf/dependency.py | 1 | 2578 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import hashlib
import json
import collections
import pprint
class Dependency(object):
def __init__(self,**kwargs):
assert"sha1"not in kwargs
if'recurse'not in kwargs:
kwargs['recurse']=True
if'optional'not in kwargs:
kwargs['optional']=False
if'internal'not in kwargs:
kwargs['internal']=False
hash_attributes=kwargs.copy()
hash_attributes.pop('optional',None)
hash_attributes.pop('internal',None)
s=json.dumps(hash_attributes,sort_keys=True)
sha1=hashlib.sha1(s.encode('utf-8')).hexdigest()
object.__setattr__(self,'info',kwargs)
self.info['sha1']=sha1
self.info['hash']=None
object.__setattr__(self,'read_write',dict())
object.__setattr__(self,'audit',list())
self.error_messages=[]
def rewrite(self,attribute,value,reason):
if value==None:
self.__delete(attribute=attribute,reason=reason)
elif attribute not in self.info:
self.__create(attribute=attribute,value=value,reason=reason)
else:
self.__modify(attribute=attribute,value=value,reason=reason)
def __delete(self,attribute,reason):
if attribute not in self.info:
raise AttributeError("Cannot delete non existing attribute {}".format(attribute))
audit='Deleting "{}". Reason: {}'.format(attribute,reason)
del self.info[attribute]
self.audit.append(audit)
def __create(self,attribute,value,reason):
audit='Creating "{}" value "{}". Reason: {}'.format(attribute,value,reason)
self.audit.append(audit)
self.info[attribute]=value
def __modify(self,attribute,value,reason):
audit='Modifying "{}" from "{}" to "{}". Reason: {}'.format(attribute,self.info[attribute],value,reason)
self.audit.append(audit)
self.info[attribute]=value
def __getattr__(self,attribute):
if attribute in self.info:
return self.info[attribute]
elif attribute in self.read_write:
return self.read_write[attribute]
else:
return None
def __setattr__(self,attribute,value):
if attribute in self.info:
raise AttributeError("Attribute {} read-only.".format(attribute))
else:
self.read_write[attribute]=value
def __contains__(self,attribute):
return(attribute in self.info)or(attribute in self.read_write)
def __str__(self):
return"Dependency info:\n{}\nread_write: {}\naudit: {}".format(pprint.pformat(self.info,indent=2),pprint.pformat(self.read_write,indent=2),pprint.pformat(self.audit,indent=2))
def __hash__(self):
if not self.info['hash']:
self.info['hash']=hash(self.info['sha1'])
return self.info['hash']
| mit | 4,564,975,259,940,651,000 | 36.362319 | 177 | 0.713732 | false | 3.230576 | false | false | false |
Artanicus/python-cozify | util/device-fade-test.py | 1 | 1301 | #!/usr/bin/env python3
from cozify import hub
import numpy, time
from absl import flags, app
FLAGS = flags.FLAGS
flags.DEFINE_string('device', None, 'Device to operate on.')
flags.DEFINE_float('delay', 0.5, 'Step length in seconds.')
flags.DEFINE_float('steps', 20, 'Amount of steps to divide into.')
flags.DEFINE_bool('verify', False, 'Verify if value went through as-is.')
green = '\u001b[32m'
yellow = '\u001b[33m'
red = '\u001b[31m'
reset = '\u001b[0m'
def main(argv):
del argv
previous = None
for step in numpy.flipud(numpy.linspace(0.0, 1.0, num=FLAGS.steps)):
hub.light_brightness(FLAGS.device, step)
time.sleep(FLAGS.delay)
read = 'N/A'
result = '?'
if FLAGS.verify:
devs = hub.devices()
read = devs[FLAGS.device]['state']['brightness']
if step == read:
result = '✔'
color = green
else:
result = '✖'
if read == previous:
color = yellow
else:
color = red
previous = step
print('{3}[{2}] set: {0} vs. read: {1}{4}'.format(step, read, result, color, reset))
if __name__ == "__main__":
flags.mark_flag_as_required('device')
app.run(main)
| mit | -6,221,275,703,317,243,000 | 27.822222 | 92 | 0.54973 | false | 3.458667 | false | false | false |
v1k45/django-notify-x | notify/models.py | 1 | 14033 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.conf import settings
from django.db.models import QuerySet
from jsonfield.fields import JSONField
from six import python_2_unicode_compatible
from django.utils.html import escape
from django.utils.timesince import timesince
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from .utils import prefetch_relations
class NotificationQueryset(QuerySet):
"""
Chain-able QuerySets using ```.as_manager()``.
"""
def prefetch(self):
"""
Marks the current queryset to prefetch all generic relations.
"""
qs = self.select_related()
qs._prefetch_relations = True
return qs
def _fetch_all(self):
if self._result_cache is None:
if hasattr(self, '_prefetch_relations'):
# removes the flag since prefetch_relations is recursive
del self._prefetch_relations
prefetch_relations(self)
self._prefetch_relations = True
return super(NotificationQueryset, self)._fetch_all()
def _clone(self, **kwargs):
clone = super(NotificationQueryset, self)._clone(**kwargs)
if hasattr(self, '_prefetch_relations'):
clone._prefetch_relations = True
return clone
def active(self):
"""
QuerySet filter() for retrieving both read and unread notifications
which are not soft-deleted.
:return: Non soft-deleted notifications.
"""
return self.filter(deleted=False)
def read(self):
"""
QuerySet filter() for retrieving read notifications.
:return: Read and active Notifications filter().
"""
return self.filter(deleted=False, read=True)
def unread(self):
"""
QuerySet filter() for retrieving unread notifications.
:return: Unread and active Notifications filter().
"""
return self.filter(deleted=False, read=False)
def unread_all(self, user=None):
"""
Marks all notifications as unread for a user (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as unread.
"""
qs = self.read()
if user:
qs = qs.filter(recipient=user)
qs.update(read=False)
def read_all(self, user=None):
"""
Marks all notifications as read for a user (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as read.
"""
qs = self.unread()
if user:
qs = qs.filter(recipient=user)
qs.update(read=True)
def delete_all(self, user=None):
"""
Method to soft-delete all notifications of a User (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as soft-deleted.
"""
qs = self.active()
if user:
qs = qs.filter(recipient=user)
soft_delete = getattr(settings, 'NOTIFY_SOFT_DELETE', True)
if soft_delete:
qs.update(deleted=True)
else:
qs.delete()
def active_all(self, user=None):
"""
Method to soft-delete all notifications of a User (if supplied)
:param user: Notification recipient.
:return: Updates QuerySet as soft-deleted.
"""
qs = self.deleted()
if user:
qs = qs.filter(recipient=user)
qs.update(deleted=False)
def deleted(self):
"""
QuerySet ``filter()`` for retrieving soft-deleted notifications.
:return: Soft deleted notification filter()
"""
return self.filter(deleted=True)
@python_2_unicode_compatible
class Notification(models.Model):
"""
**Notification Model for storing notifications. (Yeah, too obvious)**
This model is pretty-much a replica of ``django-notifications``'s
model. The newly added fields just adds a feature to allow anonymous
``actors``, ``targets`` and ``object``.
**Attributes**:
:recipient: The user who receives notification.
:verb: Action performed by actor (not necessarily).
:description: Option description for your notification.
:actor_text: Anonymous actor who is not in content-type.
:actor_url: Since the actor is not in content-type,
a custom URL for it.
*...Same for target and obj*.
:nf_type: | Each notification is different, they must be formatted
| differently during HTML rendering. For this, each
| notification gets to carry it own *notification type*.
|
| This notification type will be used to search
| the special template for the notification located at
| ``notifications/includes/NF_TYPE.html`` of your
| template directory.
|
| The main reason to add this field is to save you
| from the pain of writing ``if...elif...else`` blocks
| in your template file just for handling how
| notifications will get rendered.
|
| With this, you can just save template for an individual
| notification type and call the *template-tag* to render
| all notifications for you without writing a single
| ``if...elif...else block``.
|
| You'll just need to do a
| ``{% render_notifications using NOTIFICATION_OBJ %}``
| and you'll get your notifications rendered.
|
| By default, every ``nf_type`` is set to ``default``.
:extra: **JSONField**, holds other optional data you want the
notification to carry in JSON format.
:deleted: Useful when you want to *soft delete* your notifications.
"""
recipient = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='notifications',
on_delete=models.CASCADE,
verbose_name=_('Notification receiver'))
# actor attributes.
actor_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='notify_actor', on_delete=models.CASCADE,
verbose_name=_('Content type of actor object'))
actor_object_id = models.PositiveIntegerField(
null=True, blank=True,
verbose_name=_('ID of the actor object'))
actor_content_object = GenericForeignKey('actor_content_type',
'actor_object_id')
actor_text = models.CharField(
max_length=50, blank=True, null=True,
verbose_name=_('Anonymous text for actor'))
actor_url_text = models.CharField(
blank=True, null=True, max_length=200,
verbose_name=_('Anonymous URL for actor'))
# basic details.
verb = models.CharField(max_length=100,
verbose_name=_('Verb of the action'))
description = models.CharField(
max_length=255, blank=True, null=True,
verbose_name=_('Description of the notification'))
nf_type = models.CharField(max_length=20, default='default',
verbose_name=_('Type of notification'))
# TODO: Add a field to store notification cover images.
# target attributes.
target_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='notify_target', on_delete=models.CASCADE,
verbose_name=_('Content type of target object'))
target_object_id = models.PositiveIntegerField(
null=True, blank=True,
verbose_name=_('ID of the target object'))
target_content_object = GenericForeignKey('target_content_type',
'target_object_id')
target_text = models.CharField(
max_length=50, blank=True, null=True,
verbose_name=_('Anonymous text for target'))
target_url_text = models.CharField(
blank=True, null=True, max_length=200,
verbose_name=_('Anonymous URL for target'))
# obj attributes.
obj_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='notify_object', on_delete=models.CASCADE,
verbose_name=_('Content type of action object'))
obj_object_id = models.PositiveIntegerField(
null=True, blank=True,
verbose_name=_('ID of the target object'))
obj_content_object = GenericForeignKey('obj_content_type', 'obj_object_id')
obj_text = models.CharField(
max_length=50, blank=True, null=True,
verbose_name=_('Anonymous text for action object'))
obj_url_text = models.CharField(
blank=True, null=True, max_length=200,
verbose_name=_('Anonymous URL for action object'))
extra = JSONField(null=True, blank=True,
verbose_name=_('JSONField to store addtional data'))
# Advanced details.
created = models.DateTimeField(auto_now=False, auto_now_add=True)
read = models.BooleanField(default=False,
verbose_name=_('Read status'))
deleted = models.BooleanField(default=False,
verbose_name=_('Soft delete status'))
objects = NotificationQueryset.as_manager()
class Meta(object):
ordering = ('-created', )
def __str__(self):
ctx = {
'actor': self.actor or self.actor_text,
'verb': self.verb,
'description': self.description,
'target': self.target or self.target_text,
'obj': self.obj or self.obj_text,
'at': timesince(self.created),
}
if ctx['actor']:
if not ctx['target']:
return _("{actor} {verb} {at} ago").format(**ctx)
elif not ctx['obj']:
return _("{actor} {verb} on {target} {at} ago").format(**ctx)
elif ctx['obj']:
return _(
"{actor} {verb} {obj} on {target} {at} ago").format(**ctx)
return _("{description} -- {at} ago").format(**ctx)
def mark_as_read(self):
"""
Marks notification as read
"""
self.read = True
self.save()
def mark_as_unread(self):
"""
Marks notification as unread.
"""
self.read = False
self.save()
@cached_property
def actor(self):
"""
Property to return actor object/text to keep things DRY.
:return: Actor object or Text or None.
"""
return self.actor_content_object or self.actor_text
@cached_property
def actor_url(self):
"""
Property to return permalink of the actor.
Uses ``get_absolute_url()``.
If ``get_absolute_url()`` method fails, it tries to grab URL
from ``actor_url_text``, if it fails again, returns a "#".
:return: URL for the actor.
"""
try:
url = self.actor_content_object.get_absolute_url()
except AttributeError:
url = self.actor_url_text or "#"
return url
@cached_property
def target(self):
"""
See ``actor`` property
:return: Target object or Text or None
"""
return self.target_content_object or self.target_text
@cached_property
def target_url(self):
"""
See ``actor_url`` property.
:return: URL for the target.
"""
try:
url = self.target_content_object.get_absolute_url()
except AttributeError:
url = self.target_url_text or "#"
return url
@cached_property
def obj(self):
"""
See ``actor`` property.
:return: Action Object or Text or None.
"""
return self.obj_content_object or self.obj_text
@cached_property
def obj_url(self):
"""
See ``actor_url`` property.
:return: URL for Action Object.
"""
try:
url = self.obj_content_object.get_absolute_url()
except AttributeError:
url = self.obj_url_text or "#"
return url
@staticmethod
def do_escape(obj):
"""
Method to HTML escape an object or set it to None conditionally.
performs ``force_text()`` on the argument so that a foreignkey gets
serialized? and spit out the ``__str__`` output instead of an Object.
:param obj: Object to escape.
:return: HTML escaped and JSON-friendly data.
"""
return escape(force_text(obj)) if obj else None
def as_json(self):
"""
Notification data in a Python dictionary to which later gets
supplied to JSONResponse so that it gets JSON serialized
the *django-way*
:return: Dictionary format of the QuerySet object.
"""
data = {
"id": self.id,
"actor": self.do_escape(self.actor),
"actor_url": self.do_escape(self.actor_url),
"verb": self.do_escape(self.verb),
"description": self.do_escape(self.description),
"read": self.read,
"nf_type": self.do_escape(self.nf_type),
"target": self.do_escape(self.target),
"target_url": self.do_escape(self.target_url),
"obj": self.do_escape(self.obj),
"obj_url": self.do_escape(self.obj_url),
"created": self.created,
"data": self.extra,
}
return data
| mit | 1,351,748,947,462,464,800 | 31.941315 | 79 | 0.575643 | false | 4.522398 | false | false | false |
GoogleCloudDataproc/cloud-dataproc | codelabs/spark-nlp/topic_model.py | 1 | 8715 | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code accompanies this codelab: https://codelabs.developers.google.com/codelabs/spark-nlp.
# In this example, we will build a topic model using spark-nlp and Spark ML.
# In order for this code to work properly, a bucket name must be provided.
# Python imports
import sys
# spark-nlp components. Each one is incorporated into our pipeline.
from sparknlp.annotator import Lemmatizer, Stemmer, Tokenizer, Normalizer
from sparknlp.base import DocumentAssembler, Finisher
# A Spark Session is how we interact with Spark SQL to create Dataframes
from pyspark.sql import SparkSession
# These allow us to create a schema for our data
from pyspark.sql.types import StructField, StructType, StringType, LongType
# Spark Pipelines allow us to sequentially add components such as transformers
from pyspark.ml import Pipeline
# These are components we will incorporate into our pipeline.
from pyspark.ml.feature import StopWordsRemover, CountVectorizer, IDF
# LDA is our model of choice for topic modeling
from pyspark.ml.clustering import LDA
# Some transformers require the usage of other Spark ML functions. We import them here
from pyspark.sql.functions import col, lit, concat, regexp_replace
# This will help catch some PySpark errors
from pyspark.sql.utils import AnalysisException
# Assign bucket where the data lives
try:
bucket = sys.argv[1]
except IndexError:
print("Please provide a bucket name")
sys.exit(1)
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit topic model").getOrCreate()
# Create a three column schema consisting of two strings and a long integer
fields = [StructField("title", StringType(), True),
StructField("body", StringType(), True),
StructField("created_at", LongType(), True)]
schema = StructType(fields)
# We'll attempt to process every year / month combination below.
years = ['2016', '2017', '2018', '2019']
months = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
# This is the subreddit we're working with.
subreddit = "food"
# Create a base dataframe.
reddit_data = spark.createDataFrame([], schema)
# Keep a running list of all files that will be processed
files_read = []
for year in years:
for month in months:
# In the form of <project-id>.<dataset>.<table>
gs_uri = f"gs://{bucket}/reddit_posts/{year}/{month}/{subreddit}.csv.gz"
# If the table doesn't exist we will simply continue and not
# log it into our "tables_read" list
try:
reddit_data = (
spark.read.format('csv')
.options(codec="org.apache.hadoop.io.compress.GzipCodec")
.load(gs_uri, schema=schema)
.union(reddit_data)
)
files_read.append(gs_uri)
except AnalysisException:
continue
if len(files_read) == 0:
print('No files read')
sys.exit(1)
# Replacing null values with their respective typed-equivalent is usually
# easier to work with. In this case, we'll replace nulls with empty strings.
# Since some of our data doesn't have a body, we can combine all of the text
# for the titles and bodies so that every row has useful data.
df_train = (
reddit_data
# Replace null values with an empty string
.fillna("")
.select(
# Combine columns
concat(
# First column to concatenate. col() is used to specify that we're referencing a column
col("title"),
# Literal character that will be between the concatenated columns.
lit(" "),
# Second column to concatenate.
col("body")
# Change the name of the new column
).alias("text")
)
# The text has several tags including [REMOVED] or [DELETED] for redacted content.
# We'll replace these with empty strings.
.select(
regexp_replace(col("text"), "\[.*?\]", "")
.alias("text")
)
)
# Now, we begin assembling our pipeline. Each component here is used to some transformation to the data.
# The Document Assembler takes the raw text data and convert it into a format that can
# be tokenized. It becomes one of spark-nlp native object types, the "Document".
document_assembler = DocumentAssembler().setInputCol("text").setOutputCol("document")
# The Tokenizer takes data that is of the "Document" type and tokenizes it.
# While slightly more involved than this, this is effectively taking a string and splitting
# it along ths spaces, so each word is its own string. The data then becomes the
# spark-nlp native type "Token".
tokenizer = Tokenizer().setInputCols(["document"]).setOutputCol("token")
# The Normalizer will group words together based on similar semantic meaning.
normalizer = Normalizer().setInputCols(["token"]).setOutputCol("normalizer")
# The Stemmer takes objects of class "Token" and converts the words into their
# root meaning. For instance, the words "cars", "cars'" and "car's" would all be replaced
# with the word "car".
stemmer = Stemmer().setInputCols(["normalizer"]).setOutputCol("stem")
# The Finisher signals to spark-nlp allows us to access the data outside of spark-nlp
# components. For instance, we can now feed the data into components from Spark MLlib.
finisher = Finisher().setInputCols(["stem"]).setOutputCols(["to_spark"]).setValueSplitSymbol(" ")
# Stopwords are common words that generally don't add much detail to the meaning
# of a body of text. In English, these are mostly "articles" such as the words "the"
# and "of".
stopword_remover = StopWordsRemover(inputCol="to_spark", outputCol="filtered")
# Here we implement TF-IDF as an input to our LDA model. CountVectorizer (TF) keeps track
# of the vocabulary that's being created so we can map our topics back to their
# corresponding words.
# TF (term frequency) creates a matrix that counts how many times each word in the
# vocabulary appears in each body of text. This then gives each word a weight based
# on it's frequency.
tf = CountVectorizer(inputCol="filtered", outputCol="raw_features")
# Here we implement the IDF portion. IDF (Inverse document frequency) reduces
# the weights of commonly-appearing words.
idf = IDF(inputCol="raw_features", outputCol="features")
# LDA creates a statistical representation of how frequently words appear
# together in order to create "topics" or groups of commonly appearing words.
# In this case, we'll create 5 topics.
lda = LDA(k=5)
# We add all of the transformers into a Pipeline object. Each transformer
# will execute in the ordered provided to the "stages" parameter
pipeline = Pipeline(
stages = [
document_assembler,
tokenizer,
normalizer,
stemmer,
finisher,
stopword_remover,
tf,
idf,
lda
]
)
# We fit the data to the model.
model = pipeline.fit(df_train)
# Now that we have completed a pipeline, we want to output the topics as human-readable.
# To do this, we need to grab the vocabulary generated from our pipeline, grab the topic
# model and do the appropriate mapping. The output from each individual component lives
# in the model object. We can access them by referring to them by their position in
# the pipeline via model.stages[<ind>]
# Let's create a reference our vocabulary.
vocab = model.stages[-3].vocabulary
# Next, let's grab the topics generated by our LDA model via describeTopics(). Using collect(),
# we load the output into a Python array.
raw_topics = model.stages[-1].describeTopics(maxTermsPerTopic=5).collect()
# Lastly, let's get the indices of the vocabulary terms from our topics
topic_inds = [ind.termIndices for ind in raw_topics]
# The indices we just grab directly map to the term at position <ind> from our vocabulary.
# Using the below code, we can generate the mappings from our topic indicies to our vocabulary.
topics = []
for topic in topic_inds:
_topic = []
for ind in topic:
_topic.append(vocab[ind])
topics.append(_topic)
# Let's see our topics!
for i, topic in enumerate(topics, start=1):
print(f"topic {i}: {topic}")
| apache-2.0 | 3,045,869,450,984,424,400 | 38.256757 | 104 | 0.715089 | false | 3.864745 | false | false | false |
Spiderlover/Toontown | toontown/suit/SuitBase.py | 1 | 3300 | import SuitDNA
from SuitLegList import *
import SuitTimings
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from pandac.PandaModules import *
from pandac.PandaModules import Point3
from toontown.battle import SuitBattleGlobals
from toontown.toonbase import TTLocalizer
TIME_BUFFER_PER_WPT = 0.25
TIME_DIVISOR = 100
DISTRIBUTE_TASK_CREATION = 0
class SuitBase:
notify = DirectNotifyGlobal.directNotify.newCategory('SuitBase')
def __init__(self):
self.dna = None
self.level = 0
self.maxHP = 10
self.currHP = 10
self.isSkelecog = 0
self.isWaiter = 0
self.isVirtual = 0
self.isRental = 0
return
def delete(self):
if hasattr(self, 'legList'):
del self.legList
def getCurrHp(self):
if hasattr(self, 'currHP') and self.currHP:
return self.currHP
else:
self.notify.error('currHP is None')
return 'unknown'
def getMaxHp(self):
if hasattr(self, 'maxHP') and self.maxHP:
return self.maxHP
else:
self.notify.error('maxHP is None')
return 'unknown'
def getStyleName(self):
if hasattr(self, 'dna') and self.dna:
return self.dna.name
else:
self.notify.error('called getStyleName() before dna was set!')
return 'unknown'
def getStyleDept(self):
if hasattr(self, 'dna') and self.dna:
return SuitDNA.getDeptFullname(self.dna.dept)
else:
self.notify.error('called getStyleDept() before dna was set!')
return 'unknown'
def getLevel(self):
return self.level
def setLevel(self, level):
self.level = level
nameWLevel = TTLocalizer.SuitBaseNameWithLevel % {'name': self.name,
'dept': self.getStyleDept(),
'level': self.getActualLevel()}
self.setDisplayName(nameWLevel)
attributes = SuitBattleGlobals.SuitAttributes[self.dna.name]
self.maxHP = attributes['hp'][self.level]
self.currHP = self.maxHP
def getSkelecog(self):
return self.isSkelecog
def setSkelecog(self, flag):
self.isSkelecog = flag
def setWaiter(self, flag):
self.isWaiter = flag
def setVirtual(self, flag):
self.isVirtual = flag
def setRental(self, flag):
self.isRental = flag
def getActualLevel(self):
if hasattr(self, 'dna'):
return SuitBattleGlobals.getActualFromRelativeLevel(self.getStyleName(), self.level) + 1
else:
self.notify.warning('called getActualLevel with no DNA, returning 1 for level')
return 1
def setPath(self, path):
self.path = path
self.pathLength = self.path.getNumPoints()
def getPath(self):
return self.path
def printPath(self):
print '%d points in path' % self.pathLength
for currPathPt in xrange(self.pathLength):
indexVal = self.path.getPointIndex(currPathPt)
print '\t', self.sp.dnaStore.getSuitPointWithIndex(indexVal)
def makeLegList(self):
self.legList = SuitLegList(self.path, self.sp.dnaStore)
| mit | 2,053,050,987,002,632,700 | 28.72973 | 100 | 0.617576 | false | 3.670745 | false | false | false |
pattywgm/funny-spider | douban/douban/spiders/movie_awards.py | 1 | 2888 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@file: movie_awards.py
@time: 17/10/19 下午10:35
@desc: 电影获奖数据抓取
28items/每分钟 被ban
"""
import re
from copy import deepcopy
from os.path import exists
import scrapy
from douban.items import AwardsItem
from douban.utils.my_utils import load_obj, replace_dot
_META_VERSION = 'v1.0'
_AWARDS = 'https://movie.douban.com/subject/{}/awards/'
class MovieAwards(scrapy.Spider):
name = 'movie_awards'
meta_version = _META_VERSION
def __init__(self):
"""
:param urls:
:param done: 已经抓取完成的,用于断点续爬
:return:
"""
self.urls = load_obj('./records/urls.pkl')
self.done = list()
if exists('./records/{}_done.pkl'.format(self.name)):
self.done = load_obj('./records/{}_done.pkl'.format(self.name))
self.new_done = deepcopy(self.done)
def start_requests(self):
req = list()
for url in self.urls:
movie_code = re.findall('\d+', url)[0]
award_url = _AWARDS.format(movie_code)
if award_url not in self.done:
req.append(scrapy.Request(award_url, callback=self.parse, meta={'movie_code': movie_code}))
return req
def parse(self, response):
url = response.url
self.logger.info('Crawl {}'.format(url))
item = AwardsItem()
item['url'] = url
item['movie_code'] = response.meta['movie_code']
award_divs = response.xpath('//div[@class="awards"]')
item['awards'] = [self.parse_award_detail(div) for div in award_divs]
yield item
def parse_award_detail(self, award_div):
"""
解析获奖详细信息
:param award_div:
:return:
"""
award_detail = dict()
# 颁奖方及年份
url = award_div.xpath('.//h2/a/@href').extract_first()
name = award_div.xpath('.//h2/a/text()').extract_first()
year = award_div.xpath('.//h2/span/text()').extract_first().replace('(', '').replace(')', '').strip()
award_detail.update({'award_provider': {name: url}, 'year': year})
# 具体奖项名及获奖者
awards = list()
for ul in award_div.xpath('.//ul[@class="award"]'):
award_name = ul.xpath('./li[1]/text()').extract_first()
award_persons = list()
for person in ul.xpath('./li[position()>1]'):
if person.xpath('./a').extract_first() is None:
break
p_name = replace_dot(person.xpath('./a/text()').extract())
p_url = person.xpath('./a/@href').extract()
award_persons.append(dict(zip(p_name, p_url)))
awards.append({award_name: award_persons})
award_detail.update({'awards': awards})
return award_detail
| gpl-3.0 | 2,291,034,261,660,360,200 | 31.8 | 109 | 0.558465 | false | 3.132584 | false | false | false |
rsignell-usgs/yaml2ncml | setup.py | 1 | 1966 | import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--verbose']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def extract_version():
version = None
fdir = os.path.dirname(__file__)
fnme = os.path.join(fdir, 'yaml2ncml', '__init__.py')
with open(fnme) as fd:
for line in fd:
if (line.startswith('__version__')):
_, version = line.split('=')
version = version.strip()[1:-1]
break
return version
rootpath = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return open(os.path.join(rootpath, *parts), 'r').read()
long_description = '{}\n{}'.format(read('README.rst'), read('CHANGES.txt'))
LICENSE = read('LICENSE.txt')
with open('requirements.txt') as f:
require = f.readlines()
install_requires = [r.strip() for r in require]
setup(name='yaml2ncml',
version=extract_version(),
packages=['yaml2ncml'],
license=LICENSE,
description='ncML aggregation from YAML specifications',
long_description=long_description,
author='Rich Signell',
author_email='[email protected]',
install_requires=install_requires,
entry_points=dict(console_scripts=[
'yaml2ncml = yaml2ncml.yaml2ncml:main']
),
url='https://github.com/rsignell-usgs/yaml2ncml',
keywords=['YAML', 'ncml'],
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License'],
tests_require=['pytest'],
cmdclass=dict(test=PyTest),
zip_safe=False)
| mit | 5,773,066,131,003,179,000 | 29.71875 | 75 | 0.60529 | false | 3.773512 | true | false | false |
opencord/voltha | ofagent/main.py | 1 | 9975 | #!/usr/bin/env python
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import yaml
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from common.structlog_setup import setup_logging
from common.utils.dockerhelpers import get_my_containers_name
from common.utils.nethelpers import get_my_primary_local_ipv4
from connection_mgr import ConnectionManager
defs = dict(
config=os.environ.get('CONFIG', './ofagent.yml'),
logconfig=os.environ.get('LOGCONFIG', './logconfig.yml'),
consul=os.environ.get('CONSUL', 'localhost:8500'),
controller=os.environ.get('CONTROLLER', 'localhost:6653'),
external_host_address=os.environ.get('EXTERNAL_HOST_ADDRESS',
get_my_primary_local_ipv4()),
grpc_endpoint=os.environ.get('GRPC_ENDPOINT', 'localhost:50055'),
instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
internal_host_address=os.environ.get('INTERNAL_HOST_ADDRESS',
get_my_primary_local_ipv4()),
work_dir=os.environ.get('WORK_DIR', '/tmp/ofagent'),
key_file=os.environ.get('KEY_FILE', '/ofagent/pki/voltha.key'),
cert_file=os.environ.get('CERT_FILE', '/ofagent/pki/voltha.crt')
)
def parse_args():
parser = argparse.ArgumentParser()
_help = ('Path to ofagent.yml config file (default: %s). '
'If relative, it is relative to main.py of ofagent.'
% defs['config'])
parser.add_argument('-c', '--config',
dest='config',
action='store',
default=defs['config'],
help=_help)
_help = ('Path to logconfig.yml config file (default: %s). '
'If relative, it is relative to main.py of voltha.'
% defs['logconfig'])
parser.add_argument('-l', '--logconfig',
dest='logconfig',
action='store',
default=defs['logconfig'],
help=_help)
_help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
parser.add_argument(
'-C', '--consul', dest='consul', action='store',
default=defs['consul'],
help=_help)
_help = '<hostname1>:<port1> <hostname2>:<port2> <hostname3>:<port3> ... <hostnamen>:<portn> to openflow controller (default: %s)' % \
defs['controller']
parser.add_argument(
'-O', '--controller',nargs = '*', dest='controller', action='store',
default=defs['controller'],
help=_help)
_help = ('<hostname> or <ip> at which ofagent is reachable from outside '
'the cluster (default: %s)' % defs['external_host_address'])
parser.add_argument('-E', '--external-host-address',
dest='external_host_address',
action='store',
default=defs['external_host_address'],
help=_help)
_help = ('gRPC end-point to connect to. It can either be a direct'
'definition in the form of <hostname>:<port>, or it can be an'
'indirect definition in the form of @<service-name> where'
'<service-name> is the name of the grpc service as registered'
'in consul (example: @voltha-grpc). (default: %s'
% defs['grpc_endpoint'])
parser.add_argument('-G', '--grpc-endpoint',
dest='grpc_endpoint',
action='store',
default=defs['grpc_endpoint'],
help=_help)
_help = ('<hostname> or <ip> at which ofagent is reachable from inside'
'the cluster (default: %s)' % defs['internal_host_address'])
parser.add_argument('-H', '--internal-host-address',
dest='internal_host_address',
action='store',
default=defs['internal_host_address'],
help=_help)
_help = ('unique string id of this ofagent instance (default: %s)'
% defs['instance_id'])
parser.add_argument('-i', '--instance-id',
dest='instance_id',
action='store',
default=defs['instance_id'],
help=_help)
_help = 'omit startup banner log lines'
parser.add_argument('-n', '--no-banner',
dest='no_banner',
action='store_true',
default=False,
help=_help)
_help = "suppress debug and info logs"
parser.add_argument('-q', '--quiet',
dest='quiet',
action='count',
help=_help)
_help = 'enable verbose logging'
parser.add_argument('-v', '--verbose',
dest='verbose',
action='count',
help=_help)
_help = ('work dir to compile and assemble generated files (default=%s)'
% defs['work_dir'])
parser.add_argument('-w', '--work-dir',
dest='work_dir',
action='store',
default=defs['work_dir'],
help=_help)
_help = ('use docker container name as ofagent instance id'
' (overrides -i/--instance-id option)')
parser.add_argument('--instance-id-is-container-name',
dest='instance_id_is_container_name',
action='store_true',
default=False,
help=_help)
_help = ('Specify this option to enable TLS security between ofagent \
and onos.')
parser.add_argument('-t', '--enable-tls',
dest='enable_tls',
action='store_true',
help=_help)
_help = ('key file to be used for tls security (default=%s)'
% defs['key_file'])
parser.add_argument('-k', '--key-file',
dest='key_file',
action='store',
default=defs['key_file'],
help=_help)
_help = ('certificate file to be used for tls security (default=%s)'
% defs['cert_file'])
parser.add_argument('-r', '--cert-file',
dest='cert_file',
action='store',
default=defs['cert_file'],
help=_help)
args = parser.parse_args()
# post-processing
if args.instance_id_is_container_name:
args.instance_id = get_my_containers_name()
return args
def load_config(args, configname='config'):
argdict = vars(args)
path = argdict[configname]
if path.startswith('.'):
dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(dir, path)
path = os.path.abspath(path)
with open(path) as fd:
config = yaml.load(fd)
return config
banner = r'''
___ _____ _ _
/ _ \| ___/ \ __ _ ___ _ __ | |_
| | | | |_ / _ \ / _` |/ _ \ '_ \| __|
| |_| | _/ ___ \ (_| | __/ | | | |_
\___/|_|/_/ \_\__, |\___|_| |_|\__|
|___/
'''
def print_banner(log):
for line in banner.strip('\n').splitlines():
log.info(line)
log.info('(to stop: press Ctrl-C)')
class Main(object):
def __init__(self):
self.args = args = parse_args()
self.config = load_config(args)
self.logconfig = load_config(args, 'logconfig')
# May want to specify the gRPC timeout as an arg (in future)
# Right now, set a default value
self.grpc_timeout = 120
verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
self.log = setup_logging(self.logconfig,
args.instance_id,
verbosity_adjust=verbosity_adjust)
# components
self.connection_manager = None
self.exiting = False
if not args.no_banner:
print_banner(self.log)
self.startup_components()
def start(self):
self.start_reactor() # will not return except Keyboard interrupt
@inlineCallbacks
def startup_components(self):
self.log.info('starting-internal-components')
args = self.args
self.connection_manager = yield ConnectionManager(
args.consul, args.grpc_endpoint, self.grpc_timeout,
args.controller, args.instance_id,
args.enable_tls, args.key_file, args.cert_file).start()
self.log.info('started-internal-services')
@inlineCallbacks
def shutdown_components(self):
"""Execute before the reactor is shut down"""
self.log.info('exiting-on-keyboard-interrupt')
self.exiting = True
if self.connection_manager is not None:
yield self.connection_manager.stop()
def start_reactor(self):
reactor.callWhenRunning(
lambda: self.log.info('twisted-reactor-started'))
reactor.addSystemEventTrigger('before', 'shutdown',
self.shutdown_components)
reactor.suggestThreadPoolSize(30)
reactor.run()
if __name__ == '__main__':
Main().start()
| apache-2.0 | 8,610,598,011,498,533,000 | 35.944444 | 140 | 0.536441 | false | 4.157982 | true | false | false |
ludojmj/treelud | server/paramiko/sftp_client.py | 1 | 32863 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of Paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from binascii import hexlify
import errno
import os
import stat
import threading
import time
import weakref
from paramiko import util
from paramiko.channel import Channel
from paramiko.message import Message
from paramiko.common import INFO, DEBUG, o777
from paramiko.py3compat import bytestring, b, u, long, string_types, bytes_types
from paramiko.sftp import BaseSFTP, CMD_OPENDIR, CMD_HANDLE, SFTPError, CMD_READDIR, \
CMD_NAME, CMD_CLOSE, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_CREATE, \
SFTP_FLAG_TRUNC, SFTP_FLAG_APPEND, SFTP_FLAG_EXCL, CMD_OPEN, CMD_REMOVE, \
CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_STAT, CMD_ATTRS, CMD_LSTAT, \
CMD_SYMLINK, CMD_SETSTAT, CMD_READLINK, CMD_REALPATH, CMD_STATUS, SFTP_OK, \
SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED
from paramiko.sftp_attr import SFTPAttributes
from paramiko.ssh_exception import SSHException
from paramiko.sftp_file import SFTPFile
from paramiko.util import ClosingContextManager
def _to_unicode(s):
"""
decode a string as ascii or utf8 if possible (as required by the sftp
protocol). if neither works, just return a byte string because the server
probably doesn't know the filename's encoding.
"""
try:
return s.encode('ascii')
except (UnicodeError, AttributeError):
try:
return s.decode('utf-8')
except UnicodeError:
return s
b_slash = b'/'
class SFTPClient(BaseSFTP, ClosingContextManager):
"""
SFTP client object.
Used to open an SFTP session across an open SSH `.Transport` and perform
remote file operations.
Instances of this class may be used as context managers.
"""
def __init__(self, sock):
"""
Create an SFTP client from an existing `.Channel`. The channel
should already have requested the ``"sftp"`` subsystem.
An alternate way to create an SFTP client context is by using
`from_transport`.
:param .Channel sock: an open `.Channel` using the ``"sftp"`` subsystem
:raises SSHException: if there's an exception while negotiating
sftp
"""
BaseSFTP.__init__(self)
self.sock = sock
self.ultra_debug = False
self.request_number = 1
# lock for request_number
self._lock = threading.Lock()
self._cwd = None
# request # -> SFTPFile
self._expecting = weakref.WeakValueDictionary()
if type(sock) is Channel:
# override default logger
transport = self.sock.get_transport()
self.logger = util.get_logger(transport.get_log_channel() + '.sftp')
self.ultra_debug = transport.get_hexdump()
try:
server_version = self._send_version()
except EOFError:
raise SSHException('EOF during negotiation')
self._log(INFO, 'Opened sftp connection (server version %d)' % server_version)
def from_transport(cls, t, window_size=None, max_packet_size=None):
"""
Create an SFTP client channel from an open `.Transport`.
Setting the window and packet sizes might affect the transfer speed.
The default settings in the `.Transport` class are the same as in
OpenSSH and should work adequately for both files transfers and
interactive sessions.
:param .Transport t: an open `.Transport` which is already authenticated
:param int window_size:
optional window size for the `.SFTPClient` session.
:param int max_packet_size:
optional max packet size for the `.SFTPClient` session..
:return:
a new `.SFTPClient` object, referring to an sftp session (channel)
across the transport
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
chan = t.open_session(window_size=window_size,
max_packet_size=max_packet_size)
if chan is None:
return None
chan.invoke_subsystem('sftp')
return cls(chan)
from_transport = classmethod(from_transport)
def _log(self, level, msg, *args):
if isinstance(msg, list):
for m in msg:
self._log(level, m, *args)
else:
# escape '%' in msg (they could come from file or directory names) before logging
msg = msg.replace('%','%%')
super(SFTPClient, self)._log(level, "[chan %s] " + msg, *([self.sock.get_name()] + list(args)))
def close(self):
"""
Close the SFTP session and its underlying channel.
.. versionadded:: 1.4
"""
self._log(INFO, 'sftp session closed.')
self.sock.close()
def get_channel(self):
"""
Return the underlying `.Channel` object for this SFTP session. This
might be useful for doing things like setting a timeout on the channel.
.. versionadded:: 1.7.1
"""
return self.sock
def listdir(self, path='.'):
"""
Return a list containing the names of the entries in the given ``path``.
The list is in arbitrary order. It does not include the special
entries ``'.'`` and ``'..'`` even if they are present in the folder.
This method is meant to mirror ``os.listdir`` as closely as possible.
For a list of full `.SFTPAttributes` objects, see `listdir_attr`.
:param str path: path to list (defaults to ``'.'``)
"""
return [f.filename for f in self.listdir_attr(path)]
def listdir_attr(self, path='.'):
"""
Return a list containing `.SFTPAttributes` objects corresponding to
files in the given ``path``. The list is in arbitrary order. It does
not include the special entries ``'.'`` and ``'..'`` even if they are
present in the folder.
The returned `.SFTPAttributes` objects will each have an additional
field: ``longname``, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will probably
depend on the SFTP server implementation.
:param str path: path to list (defaults to ``'.'``)
:return: list of `.SFTPAttributes` objects
.. versionadded:: 1.2
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'listdir(%r)' % path)
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_binary()
filelist = []
while True:
try:
t, msg = self._request(CMD_READDIR, handle)
except EOFError:
# done with handle
break
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
for i in range(count):
filename = msg.get_text()
longname = msg.get_text()
attr = SFTPAttributes._from_msg(msg, filename, longname)
if (filename != '.') and (filename != '..'):
filelist.append(attr)
self._request(CMD_CLOSE, handle)
return filelist
def listdir_iter(self, path='.', read_aheads=50):
"""
Generator version of `.listdir_attr`.
See the API docs for `.listdir_attr` for overall details.
This function adds one more kwarg on top of `.listdir_attr`:
``read_aheads``, an integer controlling how many
``SSH_FXP_READDIR`` requests are made to the server. The default of 50
should suffice for most file listings as each request/response cycle
may contain multiple files (dependant on server implementation.)
.. versionadded:: 1.15
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'listdir(%r)' % path)
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
nums = list()
while True:
try:
# Send out a bunch of readdir requests so that we can read the
# responses later on Section 6.7 of the SSH file transfer RFC
# explains this
# http://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
for i in range(read_aheads):
num = self._async_request(type(None), CMD_READDIR, handle)
nums.append(num)
# For each of our sent requests
# Read and parse the corresponding packets
# If we're at the end of our queued requests, then fire off
# some more requests
# Exit the loop when we've reached the end of the directory
# handle
for num in nums:
t, pkt_data = self._read_packet()
msg = Message(pkt_data)
new_num = msg.get_int()
if num == new_num:
if t == CMD_STATUS:
self._convert_status(msg)
count = msg.get_int()
for i in range(count):
filename = msg.get_text()
longname = msg.get_text()
attr = SFTPAttributes._from_msg(
msg, filename, longname)
if (filename != '.') and (filename != '..'):
yield attr
# If we've hit the end of our queued requests, reset nums.
nums = list()
except EOFError:
self._request(CMD_CLOSE, handle)
return
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote server. The arguments are the same as for
Python's built-in `python:file` (aka `python:open`). A file-like
object is returned, which closely mimics the behavior of a normal
Python file object, including the ability to be used as a context
manager.
The mode indicates how the file is to be opened: ``'r'`` for reading,
``'w'`` for writing (truncating an existing file), ``'a'`` for
appending, ``'r+'`` for reading/writing, ``'w+'`` for reading/writing
(truncating an existing file), ``'a+'`` for reading/appending. The
Python ``'b'`` flag is ignored, since SSH treats all files as binary.
The ``'U'`` flag is supported in a compatible way.
Since 1.5.2, an ``'x'`` flag indicates that the operation should only
succeed if the file was created and did not previously exist. This has
no direct mapping to Python's file flags, but is commonly known as the
``O_EXCL`` flag in posix.
The file will be buffered in standard Python style by default, but
can be altered with the ``bufsize`` parameter. ``0`` turns off
buffering, ``1`` uses line buffering, and any number greater than 1
(``>1``) uses that specific buffer size.
:param str filename: name of the file to open
:param str mode: mode (Python-style) to open in
:param int bufsize: desired buffering (-1 = default buffer size)
:return: an `.SFTPFile` object representing the open file
:raises IOError: if the file could not be opened.
"""
filename = self._adjust_cwd(filename)
self._log(DEBUG, 'open(%r, %r)' % (filename, mode))
imode = 0
if ('r' in mode) or ('+' in mode):
imode |= SFTP_FLAG_READ
if ('w' in mode) or ('+' in mode) or ('a' in mode):
imode |= SFTP_FLAG_WRITE
if 'w' in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_TRUNC
if 'a' in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_APPEND
if 'x' in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_EXCL
attrblock = SFTPAttributes()
t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_binary()
self._log(DEBUG, 'open(%r, %r) -> %s' % (filename, mode, hexlify(handle)))
return SFTPFile(self, handle, mode, bufsize)
# Python continues to vacillate about "open" vs "file"...
file = open
def remove(self, path):
"""
Remove the file at the given path. This only works on files; for
removing folders (directories), use `rmdir`.
:param str path: path (absolute or relative) of the file to remove
:raises IOError: if the path refers to a folder (directory)
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'remove(%r)' % path)
self._request(CMD_REMOVE, path)
unlink = remove
def rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder
:raises IOError: if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, 'rename(%r, %r)' % (oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath)
def mkdir(self, path, mode=o777):
"""
Create a folder (directory) named ``path`` with numeric mode ``mode``.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
:param str path: name of the folder to create
:param int mode: permissions (posix-style) for the newly-created folder
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'mkdir(%r, %r)' % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_MKDIR, path, attr)
def rmdir(self, path):
"""
Remove the folder named ``path``.
:param str path: name of the folder to remove
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'rmdir(%r)' % path)
self._request(CMD_RMDIR, path)
def stat(self, path):
"""
Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
Python's ``stat`` structure as returned by ``os.stat``, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a Python `python:stat` object, the result may not be accessed as
a tuple. This is mostly due to the author's slack factor.
The fields supported are: ``st_mode``, ``st_size``, ``st_uid``,
``st_gid``, ``st_atime``, and ``st_mtime``.
:param str path: the filename to stat
:return:
an `.SFTPAttributes` object containing attributes about the given
file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'stat(%r)' % path)
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def lstat(self, path):
"""
Retrieve information about a file on the remote system, without
following symbolic links (shortcuts). This otherwise behaves exactly
the same as `stat`.
:param str path: the filename to stat
:return:
an `.SFTPAttributes` object containing attributes about the given
file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'lstat(%r)' % path)
t, msg = self._request(CMD_LSTAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def symlink(self, source, dest):
"""
Create a symbolic link (shortcut) of the ``source`` path at
``destination``.
:param str source: path of the original file
:param str dest: path of the newly created symlink
"""
dest = self._adjust_cwd(dest)
self._log(DEBUG, 'symlink(%r, %r)' % (source, dest))
source = bytestring(source)
self._request(CMD_SYMLINK, source, dest)
def chmod(self, path, mode):
"""
Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by Python's `os.chmod`
function.
:param str path: path of the file to change the permissions of
:param int mode: new permissions
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chmod(%r, %r)' % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr)
def chown(self, path, uid, gid):
"""
Change the owner (``uid``) and group (``gid``) of a file. As with
Python's `os.chown` function, you must pass both arguments, so if you
only want to change one, use `stat` first to retrieve the current
owner and group.
:param str path: path of the file to change the owner and group of
:param int uid: new owner's uid
:param int gid: new group id
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chown(%r, %r, %r)' % (path, uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self._request(CMD_SETSTAT, path, attr)
def utime(self, path, times):
"""
Set the access and modified times of the file specified by ``path``. If
``times`` is ``None``, then the file's access and modified times are set
to the current time. Otherwise, ``times`` must be a 2-tuple of numbers,
of the form ``(atime, mtime)``, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from Python
for the sake of consistency -- I apologize.
:param str path: path of the file to modify
:param tuple times:
``None`` or a tuple of (access time, modified time) in standard
internet epoch time (seconds since 01 January 1970 GMT)
"""
path = self._adjust_cwd(path)
if times is None:
times = (time.time(), time.time())
self._log(DEBUG, 'utime(%r, %r)' % (path, times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self._request(CMD_SETSTAT, path, attr)
def truncate(self, path, size):
"""
Change the size of the file specified by ``path``. This usually
extends or shrinks the size of the file, just like the `~file.truncate`
method on Python file objects.
:param str path: path of the file to modify
:param size: the new size of the file
:type size: int or long
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'truncate(%r, %r)' % (path, size))
attr = SFTPAttributes()
attr.st_size = size
self._request(CMD_SETSTAT, path, attr)
def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
`symlink` to create these. The result may be either an absolute or
relative pathname.
:param str path: path of the symbolic link file
:return: target path, as a `str`
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink(%r)' % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned %d results' % count)
return _to_unicode(msg.get_string())
def normalize(self, path):
"""
Return the normalized path (on the server) of a given path. This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the "current folder" (by passing ``'.'``
as ``path``).
:param str path: path to be normalized
:return: normalized form of the given path (as a `str`)
:raises IOError: if the path can't be resolved on the server
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'normalize(%r)' % path)
t, msg = self._request(CMD_REALPATH, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count != 1:
raise SFTPError('Realpath returned %d results' % count)
return msg.get_text()
def chdir(self, path=None):
"""
Change the "current directory" of this SFTP session. Since SFTP
doesn't really have the concept of a current working directory, this is
emulated by Paramiko. Once you use this method to set a working
directory, all operations on this `.SFTPClient` object will be relative
to that path. You can pass in ``None`` to stop using a current working
directory.
:param str path: new current working directory
:raises IOError: if the requested path doesn't exist on the server
.. versionadded:: 1.4
"""
if path is None:
self._cwd = None
return
if not stat.S_ISDIR(self.stat(path).st_mode):
raise SFTPError(errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path))
self._cwd = b(self.normalize(path))
def getcwd(self):
"""
Return the "current working directory" for this SFTP session, as
emulated by Paramiko. If no directory has been set with `chdir`,
this method will return ``None``.
.. versionadded:: 1.4
"""
return self._cwd and u(self._cwd)
def putfo(self, fl, remotepath, file_size=0, callback=None, confirm=True):
"""
Copy the contents of an open file object (``fl``) to the SFTP server as
``remotepath``. Any exception raised by operations will be passed
through.
The SFTP operations use pipelining for speed.
:param file fl: opened file or file-like object to copy
:param str remotepath: the destination path on the SFTP server
:param int file_size:
optional size parameter passed to callback. If none is specified,
size defaults to 0
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
(since 1.7.4)
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size (since 1.7.7)
:return:
an `.SFTPAttributes` object containing attributes about the given
file.
.. versionadded:: 1.10
"""
with self.file(remotepath, 'wb') as fr:
fr.set_pipelined(True)
size = 0
while True:
data = fl.read(32768)
fr.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
if len(data) == 0:
break
if confirm:
s = self.stat(remotepath)
if s.st_size != size:
raise IOError('size mismatch in put! %d != %d' % (s.st_size, size))
else:
s = SFTPAttributes()
return s
def put(self, localpath, remotepath, callback=None, confirm=True):
"""
Copy a local file (``localpath``) to the SFTP server as ``remotepath``.
Any exception raised by operations will be passed through. This
method is primarily provided as a convenience.
The SFTP operations use pipelining for speed.
:param str localpath: the local file to copy
:param str remotepath: the destination path on the SFTP server. Note
that the filename should be included. Only specifying a directory
may result in an error.
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:return: an `.SFTPAttributes` object containing attributes about the given file
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
``callback`` and rich attribute return value added.
.. versionchanged:: 1.7.7
``confirm`` param added.
"""
file_size = os.stat(localpath).st_size
with open(localpath, 'rb') as fl:
return self.putfo(fl, remotepath, file_size, callback, confirm)
def getfo(self, remotepath, fl, callback=None):
"""
Copy a remote file (``remotepath``) from the SFTP server and write to
an open file or file-like object, ``fl``. Any exception raised by
operations will be passed through. This method is primarily provided
as a convenience.
:param object remotepath: opened file or file-like object to copy to
:param str fl:
the destination path on the local host or open file object
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:return: the `number <int>` of bytes written to the opened file object
.. versionadded:: 1.10
"""
with self.open(remotepath, 'rb') as fr:
file_size = self.stat(remotepath).st_size
fr.prefetch()
size = 0
while True:
data = fr.read(32768)
fl.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
if len(data) == 0:
break
return size
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (``remotepath``) from the SFTP server to the local
host as ``localpath``. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
:param str remotepath: the remote file to copy
:param str localpath: the destination path on the local host
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
Added the ``callback`` param
"""
file_size = self.stat(remotepath).st_size
with open(localpath, 'wb') as fl:
size = self.getfo(remotepath, fl, callback)
s = os.stat(localpath)
if s.st_size != size:
raise IOError('size mismatch in get! %d != %d' % (s.st_size, size))
### internals...
def _request(self, t, *arg):
num = self._async_request(type(None), t, *arg)
return self._read_response(num)
def _async_request(self, fileobj, t, *arg):
# this method may be called from other threads (prefetch)
self._lock.acquire()
try:
msg = Message()
msg.add_int(self.request_number)
for item in arg:
if isinstance(item, long):
msg.add_int64(item)
elif isinstance(item, int):
msg.add_int(item)
elif isinstance(item, (string_types, bytes_types)):
msg.add_string(item)
elif isinstance(item, SFTPAttributes):
item._pack(msg)
else:
raise Exception('unknown type for %r type %r' % (item, type(item)))
num = self.request_number
self._expecting[num] = fileobj
self._send_packet(t, msg)
self.request_number += 1
finally:
self._lock.release()
return num
def _read_response(self, waitfor=None):
while True:
try:
t, data = self._read_packet()
except EOFError as e:
raise SSHException('Server connection dropped: %s' % str(e))
msg = Message(data)
num = msg.get_int()
if num not in self._expecting:
# might be response for a file that was closed before responses came back
self._log(DEBUG, 'Unexpected response #%d' % (num,))
if waitfor is None:
# just doing a single check
break
continue
fileobj = self._expecting[num]
del self._expecting[num]
if num == waitfor:
# synchronous
if t == CMD_STATUS:
self._convert_status(msg)
return t, msg
if fileobj is not type(None):
fileobj._async_response(t, msg, num)
if waitfor is None:
# just doing a single check
break
return None, None
def _finish_responses(self, fileobj):
while fileobj in self._expecting.values():
self._read_response()
fileobj._check_exception()
def _convert_status(self, msg):
"""
Raises EOFError or IOError on error status; otherwise does nothing.
"""
code = msg.get_int()
text = msg.get_text()
if code == SFTP_OK:
return
elif code == SFTP_EOF:
raise EOFError(text)
elif code == SFTP_NO_SUCH_FILE:
# clever idea from john a. meinel: map the error codes to errno
raise IOError(errno.ENOENT, text)
elif code == SFTP_PERMISSION_DENIED:
raise IOError(errno.EACCES, text)
else:
raise IOError(text)
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
path = b(path)
if self._cwd is None:
return path
if len(path) and path[0:1] == b_slash:
# absolute path
return path
if self._cwd == b_slash:
return self._cwd + path
return self._cwd + b_slash + path
class SFTP(SFTPClient):
"""
An alias for `.SFTPClient` for backwards compatability.
"""
pass
| mit | 1,443,517,696,664,535,600 | 37.689614 | 107 | 0.564982 | false | 4.341789 | false | false | false |
praekelt/txtalert | txtalert/apps/gateway/migrations/0002_auto__add_field_sendsms_group__add_field_pleasecallme_group.py | 1 | 4637 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# all previous data belongs to Temba Lethu Clinic
from django.contrib.auth.models import Group
group, created = Group.objects.get_or_create(name="Temba Lethu")
# Adding field 'SendSMS.group'
db.add_column('gateway_sendsms', 'group', self.gf('django.db.models.fields.related.ForeignKey')(default=group.pk, to=orm['auth.Group']), keep_default=False)
# Adding field 'PleaseCallMe.group'
db.add_column('gateway_pleasecallme', 'group', self.gf('django.db.models.fields.related.ForeignKey')(default=group.pk, related_name='gateway_pleasecallme_set', to=orm['auth.Group']), keep_default=False)
def backwards(self, orm):
# Deleting field 'SendSMS.group'
db.delete_column('gateway_sendsms', 'group_id')
# Deleting field 'PleaseCallMe.group'
db.delete_column('gateway_pleasecallme', 'group_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gateway.pleasecallme': {
'Meta': {'ordering': "['created_at']", 'object_name': 'PleaseCallMe'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gateway_pleasecallme_set'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'recipient_msisdn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender_msisdn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_id': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'gateway.sendsms': {
'Meta': {'object_name': 'SendSMS'},
'delivery': ('django.db.models.fields.DateTimeField', [], {}),
'delivery_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'expiry': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'msisdn': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'priority': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'receipt': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'smstext': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
}
}
complete_apps = ['gateway']
| gpl-3.0 | -8,851,398,235,679,534,000 | 58.448718 | 210 | 0.569334 | false | 3.688942 | false | false | false |
lizardsystem/freq | freq/lizard_connector.py | 1 | 30266 | import copy
import datetime as dt
import json
import logging
from pprint import pprint # left here for debugging purposes
from time import time
from time import sleep
import urllib
import numpy as np
import django.core.exceptions
from freq import jsdatetime
try:
from django.conf import settings
USR, PWD = settings.USR, settings.PWD
except django.core.exceptions.ImproperlyConfigured:
print('WARNING: no USR and PWD found in settings. USR and PWD should have'
'been set beforehand')
USR = None
PWD = None
# When you use this script stand alone, please set your login information here:
# USR = ****** # Replace the stars with your user name.
# PWD = ****** # Replace the stars with your password.
logger = logging.getLogger(__name__)
def join_urls(*args):
return '/'.join(args)
class LizardApiError(Exception):
pass
class Base(object):
"""
Base class to connect to the different endpoints of the lizard-api.
:param data_type: endpoint of the lizard-api one wishes to connect to.
:param username: login username
:param password: login password
:param use_header: no login and password is send with the query when set
to False
:param extra_queries: In case one wishes to set default queries for a
certain data type this is the plase.
:param max_results:
"""
username = USR
password = PWD
max_results = 1000
@property
def extra_queries(self):
"""
Overwrite class to add queries
:return: dictionary with extra queries
"""
return {}
def organisation_query(self, organisation, add_query_string='location__'):
org_query = {}
if isinstance(organisation, str):
org_query.update({add_query_string + "organisation__unique_id":
organisation})
elif organisation:
org_query.update({
add_query_string + "organisation__unique_id": ','.join(
org for org in organisation)
})
if org_query:
return dict([urllib.parse.urlencode(org_query).split('=')])
else:
return {}
def __init__(self, base="https://ggmn.lizard.net", use_header=False,
data_type=None):
"""
:param base: the site one wishes to connect to. Defaults to the
Lizard ggmn production site.
"""
if data_type:
self.data_type = data_type
self.use_header = use_header
self.queries = {}
self.results = []
if base.startswith('http'):
self.base = base
else:
self.base = join_urls('https:/', base)
# without extra '/' ^^, this is added in join_urls
self.base_url = join_urls(self.base, 'api/v2', self.data_type) + '/'
def get(self, count=True, uuid=None, **queries):
"""
Query the api.
For possible queries see: https://nxt.staging.lizard.net/doc/api.html
Stores the api-response as a dict in the results attribute.
:param queries: all keyword arguments are used as queries.
:return: a dictionary of the api-response.
"""
if self.max_results:
queries.update({'page_size': self.max_results, 'format': 'json'})
queries.update(self.extra_queries)
queries.update(getattr(self, "queries", {}))
query = '?' + '&'.join(str(key) + '=' +
(('&' + str(key) + '=').join(value)
if isinstance(value, list) else str(value))
for key, value in queries.items())
url = urllib.parse.urljoin(self.base_url, str(uuid)) if uuid else \
self.base_url + query
try:
self.fetch(url)
except urllib.error.HTTPError: # TODO remove hack to prevent 420 error
self.json = {'results': [], 'count': 0}
try:
logger.debug('Number found %s : %s with URL: %s', self.data_type,
self.json.get('count', 0), url)
except (KeyError, AttributeError):
logger.debug('Got results from %s with URL: %s',
self.data_type, url)
self.parse()
return self.results
def fetch(self, url):
"""
GETs parameters from the api based on an url in a JSON format.
Stores the JSON response in the json attribute.
:param url: full query url: should be of the form:
[base_url]/api/v2/[endpoint]/?[query_key]=[query_value]&...
:return: the JSON from the response
"""
if self.use_header:
request_obj = urllib.request.Request(url, headers=self.header)
else:
request_obj = urllib.request.Request(url)
try:
with urllib.request.urlopen(request_obj) as resp:
encoding = resp.headers.get_content_charset()
encoding = encoding if encoding else 'UTF-8'
content = resp.read().decode(encoding)
self.json = json.loads(content)
except Exception:
logger.exception("got error from: %s", url)
raise
return self.json
def parse(self):
"""
Parse the json attribute and store it to the results attribute.
All pages of a query are parsed. If the max_results attribute is
exceeded an ApiError is raised.
"""
while True:
try:
if self.json['count'] > self.max_results:
raise LizardApiError(
'Too many results: {} found, while max {} are '
'accepted'.format(self.json['count'], self.max_results)
)
self.results += self.json['results']
next_url = self.json.get('next')
if next_url:
self.fetch(next_url)
else:
break
except KeyError:
self.results += [self.json]
break
except IndexError:
break
def parse_elements(self, element):
"""
Get a list of a certain element from the root of the results attribute.
:param element: the element you wish to get.
:return: A list of all elements in the root of the results attribute.
"""
self.parse()
return [x[element] for x in self.results]
@property
def header(self):
"""
The header with credentials for the api.
"""
if self.use_header:
return {
"username": self.username,
"password": self.password
}
return {}
class Organisations(Base):
"""
Makes a connection to the organisations endpoint of the lizard api.
"""
data_type = 'organisations'
def all(self, organisation=None):
"""
:return: a list of organisations belonging one has access to
(with the credentials from the header attribute)
"""
if organisation:
self.get(unique_id=organisation)
else:
self.get()
self.parse()
return self.parse_elements('unique_id')
class Locations(Base):
"""
Makes a connection to the locations endpoint of the lizard api.
"""
def __init__(self, base="https://ggmn.lizard.net", use_header=False):
self.data_type = 'locations'
self.uuids = []
super().__init__(base, use_header)
def bbox(self, south_west, north_east, organisation=None):
"""
Find all locations within a certain bounding box.
returns records within bounding box using Bounding Box format (min Lon,
min Lat, max Lon, max Lat). Also returns features with overlapping
geometry.
:param south_west: lattitude and longtitude of the south-western point
:param north_east: lattitude and longtitude of the north-eastern point
:return: a dictionary of the api-response.
"""
min_lat, min_lon = south_west
max_lat, max_lon = north_east
coords = self.commaify(min_lon, min_lat, max_lon, max_lat)
org_query = self.organisation_query(organisation, '')
self.get(in_bbox=coords, **org_query)
def distance_to_point(self, distance, lat, lon, organisation=None):
"""
Returns records with distance meters from point. Distance in meters
is converted to WGS84 degrees and thus an approximation.
:param distance: meters from point
:param lon: longtitude of point
:param lat: latitude of point
:return: a dictionary of the api-response.
"""
coords = self.commaify(lon, lat)
org_query = self.organisation_query(organisation, '')
self.get(distance=distance, point=coords, **org_query)
def commaify(self, *args):
"""
:return: a comma-seperated string of the given arguments
"""
return ','.join(str(x) for x in args)
def coord_uuid_name(self):
"""
Filters out the coordinates UUIDs and names of locations in results.
Use after a query is made.
:return: a dictionary with coordinates, UUIDs and names
"""
result = {}
for x in self.results:
if x['uuid'] not in self.uuids:
geom = x.get('geometry') or {}
result[x['uuid']] = {
'coordinates': geom.get(
'coordinates', ['','']),
'name': x['name']
}
self.uuids.append(x['uuid'])
return result
class TaskAPI(Base):
data_type = 'tasks'
def poll(self, url=None):
if url is None or not url.startswith('http'):
return
self.fetch(url)
@property
def status(self):
try:
logger.debug('Task status: %s', self.json.get("task_status"))
status = self.json.get("task_status")
if status is None:
logger.debug('Task status: NONE')
return "NONE"
return status
except AttributeError:
logger.debug('Task status: NONE')
return "NONE"
def timeseries_csv(self, organisation, extra_queries_ts):
if self.status != "SUCCESS":
raise LizardApiError('Download not ready.')
url = self.json.get("result_url")
self.fetch(url)
self.results = []
self.parse()
csv = (
[result['name'], result['uuid'],
jsdatetime.js_to_datestring(event['timestamp']), event['max']]
for result in self.results for event in result['events']
)
loc = Locations(use_header=self.use_header)
extra_queries = {
key if not key.startswith("location__") else key[10:]: value
for key, value in extra_queries_ts.items()
}
org_query = self.organisation_query(organisation, '')
extra_queries.update(**org_query)
loc.get(**extra_queries)
coords = loc.coord_uuid_name()
headers = (
[
r['uuid'], r['name'], coords[r['location']['uuid']]['name'],
coords[r['location']['uuid']]['coordinates'][0],
coords[r['location']['uuid']]['coordinates'][1]
]
for r in self.results
)
return headers, csv
class TimeSeries(Base):
"""
Makes a connection to the timeseries endpoint of the lizard api.
"""
def __init__(self, base="https://ggmn.lizard.net", use_header=False):
self.data_type = 'timeseries'
self.uuids = []
self.statistic = None
super().__init__(base, use_header)
def location_name(self, name, organisation=None):
"""
Returns time series metadata for a location by name.
:param name: name of a location
:return: a dictionary of with nested location, aquo quantities and
events.
"""
org_query = self.organisation_query(organisation)
return self.get(location__name=name, **org_query)
def location_uuid(self, loc_uuid, start='0001-01-01T00:00:00Z', end=None,
organisation=None):
"""
Returns time series for a location by location-UUID.
:param loc_uuid: name of a location
:param start: start timestamp in ISO 8601 format
:param end: end timestamp in ISO 8601 format, defaults to now
:return: a dictionary of with nested location, aquo quantities and
events.
"""
org_query = self.organisation_query(organisation)
self.get(location__uuid=loc_uuid, **org_query)
timeseries_uuids = [x['uuid'] for x in self.results]
self.results = []
for ts_uuid in timeseries_uuids:
ts = TimeSeries(self.base, use_header=self.use_header)
ts.uuid(ts_uuid, start, end, organisation)
self.results += ts.results
return self.results
def uuid(self, ts_uuid, start='0001-01-01T00:00:00Z', end=None,
organisation=None):
"""
Returns time series for a timeseries by timeseries-UUID.
:param ts_uuid: uuid of a timeseries
:param start: start timestamp in ISO 8601 format
:param end: end timestamp in ISO 8601 format
:return: a dictionary of with nested location, aquo quantities and
events.
"""
if not end:
end = jsdatetime.now_iso()
old_base_url = self.base_url
self.base_url += ts_uuid + "/"
org_query = self.organisation_query(organisation)
self.get(start=start, end=end, **org_query)
self.base_url = old_base_url
def start_csv_task(self, start='0001-01-01T00:00:00Z', end=None,
organisation=None):
if not end:
end = jsdatetime.now_iso()
if isinstance(start, int):
start -= 10000
if isinstance(end, int):
end += 10000
org_query = self.organisation_query(organisation)
poll_url = self.get(
start=start,
end=end,
async="true",
format="json",
**org_query
)[0]['url']
logger.debug("Async task url %s", poll_url)
return poll_url, self.extra_queries
def bbox(self, south_west, north_east, statistic=None,
start='0001-01-01T00:00:00Z', end=None, organisation=None):
"""
Find all timeseries within a certain bounding box.
Returns records within bounding box using Bounding Box format (min Lon,
min Lat, max Lon, max Lat). Also returns features with overlapping
geometry.
:param south_west: lattitude and longtitude of the south-western point
:param north_east: lattitude and longtitude of the north-eastern point
:param start_: start timestamp in ISO 8601 format
:param end: end timestamp in ISO 8601 format
:return: a dictionary of the api-response.
"""
if not end:
end = jsdatetime.now_iso()
if isinstance(start, int):
start -= 10000
if isinstance(end, int):
end += 10000
min_lat, min_lon = south_west
max_lat, max_lon = north_east
polygon_coordinates = [
[min_lon, min_lat],
[min_lon, max_lat],
[max_lon, max_lat],
[max_lon, min_lat],
[min_lon, min_lat],
]
points = [' '.join([str(x), str(y)]) for x, y in polygon_coordinates]
geom_within = {'a': 'POLYGON ((' + ', '.join(points) + '))'}
geom_within = urllib.parse.urlencode(geom_within).split('=')[1]
org_query = self.organisation_query(organisation)
self.statistic = statistic
if statistic == 'mean':
statistic = ['count', 'sum']
elif not statistic:
statistic = ['min', 'max', 'count', 'sum']
self.statistic = None
elif statistic == 'range (max - min)':
statistic = ['min', 'max']
elif statistic == 'difference (last - first)':
statistic = 'count'
elif statistic == 'difference (mean last - first year)':
year = dt.timedelta(days=366)
first_end = jsdatetime.datetime_to_js(jsdatetime.js_to_datetime(start) + year)
last_start = jsdatetime.datetime_to_js(jsdatetime.js_to_datetime(end) - year)
self.get(
start=start,
end=first_end,
min_points=1,
fields=['count', 'sum'],
location__geom_within=geom_within,
**org_query
)
first_year = {}
for r in self.results:
try:
first_year[r['location']['uuid']] = {
'first_value_timestamp': r['first_value_timestamp'],
'mean': r['events'][0]['sum'] / r['events'][0]['count']
}
except IndexError:
first_year[r['location']['uuid']] = {
'first_value_timestamp': np.nan,
'mean': np.nan
}
self.results = []
self.get(
start=last_start,
end=end,
min_points=1,
fields=['count', 'sum'],
location__geom_within=geom_within,
**org_query
)
for r in self.results:
try:
r['events'][0]['difference (mean last - first year)'] = \
r['events'][0]['sum'] / r['events'][0]['count'] - \
first_year[r['location']['uuid']]['mean']
r['first_value_timestamp'] = \
first_year[
r['location']['uuid']]['first_value_timestamp']
except IndexError:
r['events'] = [{
'difference (mean last - first year)': np.nan}]
r['first_value_timestamp'] = np.nan
r['last_value_timestamp'] = np.nan
return
self.get(
start=start,
end=end,
min_points=1,
fields=statistic,
location__geom_within=geom_within,
**org_query
)
def ts_to_dict(self, statistic=None, values=None,
start_date=None, end_date=None, date_time='js'):
"""
:param date_time: default: js. Several options:
'js': javascript integer datetime representation
'dt': python datetime object
'str': date in date format (dutch representation)
"""
if len(self.results) == 0:
self.response = {}
return self.response
if values:
values = values
else:
values = {}
if not statistic and self.statistic:
statistic = self.statistic
# np array with cols: 'min', 'max', 'sum', 'count', 'first', 'last'
if not statistic:
stats1 = ('min', 'max', 'sum', 'count')
stats2 = (
(0, 'min'),
(1, 'max'),
(2, 'mean'),
(3, 'range (max - min)'),
(4, 'difference (last - first)'),
(5, 'difference (mean last - first year)')
)
start_index = 6
else:
if statistic == 'mean':
stats1 = ('sum', 'count')
elif statistic == 'range (max - min)':
stats1 = ('min', 'max')
else:
stats1 = (statistic, )
stats2 = ((0, statistic), )
start_index = int(statistic == 'mean') + 1
ts = []
for result in self.results:
try:
timestamps = [int(result['first_value_timestamp']),
int(result['last_value_timestamp'])]
except (ValueError, TypeError):
timestamps = [np.nan, np.nan]
except TypeError:
# int(None)
timestamps = [np.nan, np.nan]
if not len(result['events']):
y = 2 if statistic == 'difference (mean last - first year)' \
else 0
ts.append(
[np.nan for _ in range(len(stats1) + y)] + timestamps)
else:
ts.append([float(result['events'][0][s]) for s in stats1] +
timestamps)
npts = np.array(ts)
if statistic:
if statistic == 'mean':
stat = (npts[:, 0] / npts[:, 1]).reshape(-1, 1)
elif statistic == 'range (max - min)':
stat = (npts[:, 1] - npts[:, 0]).reshape(-1, 1)
elif statistic == 'difference (last - first)':
stat = (npts[:, 1] - npts[:, 0]).reshape(-1, 1)
else:
stat = npts[:, 0].reshape(-1, 1)
npts_calculated = np.hstack(
(stat, npts[:, slice(start_index, -1)]))
else:
npts_calculated = np.hstack((
npts[:, 0:2],
(npts[:, 2] / npts[:, 3]).reshape(-1, 1),
(npts[:, 1] - npts[:, 0]).reshape(-1, 1),
npts[:, 4:]
))
for i, row in enumerate(npts_calculated):
location_uuid = self.results[i]['location']['uuid']
loc_dict = values.get(location_uuid, {})
loc_dict.update({stat: 'NaN' if np.isnan(row[i]) else row[i]
for i, stat in stats2})
loc_dict['timeseries_uuid'] = self.results[i]['uuid']
values[location_uuid] = loc_dict
npts_min = np.nanmin(npts_calculated, 0)
npts_max = np.nanmax(npts_calculated, 0)
extremes = {
stat: {
'min': npts_min[i] if not np.isnan(npts_min[i]) else 0,
'max': npts_max[i] if not np.isnan(npts_max[i]) else 0
} for i, stat in stats2
}
dt_conversion = {
'js': lambda x: x,
'dt': jsdatetime.js_to_datetime,
'str': jsdatetime.js_to_datestring
}[date_time]
if statistic != 'difference (mean last - first year)':
start = dt_conversion(max(jsdatetime.round_js_to_date(start_date),
jsdatetime.round_js_to_date(npts_min[-2])))
end = dt_conversion(min(jsdatetime.round_js_to_date(end_date),
jsdatetime.round_js_to_date(npts_max[-1])))
else:
start = dt_conversion(jsdatetime.round_js_to_date(start_date))
end = dt_conversion(jsdatetime.round_js_to_date(end_date))
self.response = {
"extremes": extremes,
"dates": {
"start": start,
"end": end
},
"values": values
}
return self.response
class GroundwaterLocations(Locations):
"""
Makes a connection to the locations endpoint of the lizard api.
Only selects GroundwaterStations.
"""
@property
def extra_queries(self):
return {
"object_type__model": 'filter'
}
class GroundwaterTimeSeries(TimeSeries):
"""
Makes a connection to the timeseries endpoint of the lizard api.
Only selects GroundwaterStations.
"""
@property
def extra_queries(self):
return {
"location__object_type__model": 'filter'
}
class GroundwaterTimeSeriesAndLocations(object):
def __init__(self):
self.locs = GroundwaterLocations()
self.ts = GroundwaterTimeSeries()
self.values = {}
def bbox(self, south_west, north_east, start='0001-01-01T00:00:00Z',
end=None, groundwater_type="GWmMSL"):
if not end:
self.end = jsdatetime.now_iso()
else:
self.end = end
self.start = start
self.ts.queries = {"name": groundwater_type}
self.locs.bbox(south_west, north_east)
self.ts.bbox(south_west=south_west, north_east=north_east,
start=start, end=self.end)
def locs_to_dict(self, values=None):
if values:
self.values = values
for loc in self.locs.results:
self.values.get(loc['uuid'], {}).update({
'coordinates': loc['geometry']['coordinates'],
'name': loc['name']
})
self.response = self.values
def results_to_dict(self):
self.locs_to_dict()
self.ts.ts_to_dict(values=self.values)
return self.ts.response
class RasterFeatureInfo(Base):
data_type = 'raster-aggregates'
def wms(self, lat, lng, layername, extra_params=None):
if 'igrac' in layername:
self.base_url = "https://raster.lizard.net/wms"
lat_f = float(lat)
lng_f = float(lng)
self.get(
request="getfeatureinfo",
layers=layername,
width=1,
height=1,
i=0,
j=0,
srs="epsg:4326",
bbox=','.join(
[lng, lat, str(lng_f+0.00001), str(lat_f+0.00001)]),
index="world"
)
try:
self.results = {"data": [self.results[1]]}
except IndexError:
self.results = {"data": ['null']}
elif layername == 'aquifers':
self.base_url = "https://ggis.un-igrac.org/geoserver/tbamap2015/wms"
extra_params.update({
'request': 'GetFeatureInfo',
'service': 'WMS',
'srs': 'EPSG:4326',
'info_format': 'application/json'
})
self.get(**extra_params)
self.results = {
'data': self.results['features'][0]['properties']['aq_name']}
else:
self.get(
agg='curve',
geom='POINT(' + lng + '+' + lat + ')',
srs='EPSG:4326',
raster_names=layername,
count=False
)
return self.results
def parse(self):
self.results = self.json
class RasterLimits(Base):
data_type = 'wms'
def __init__(self, base="https://raster.lizard.net",
use_header=False):
super().__init__(base, use_header)
self.base_url = join_urls(base, self.data_type)
self.max_results = None
def get_limits(self, layername, bbox):
try:
return self.get(
request='getlimits',
layers=layername,
bbox=bbox,
width=16,
height=16,
srs='epsg:4326'
)
except urllib.error.HTTPError:
return [[-1000, 1000]]
def parse(self):
self.results = self.json
class Filters(Base):
data_type = "filters"
def from_timeseries_uuid(self, uuid):
# We know the timeseries uuid. Timeseries are connected to locations
# and the locations are connected to the filters that contain the
# relevant information.
# first get the location uuid from the timeseries.
ts = Base(use_header=self.use_header, data_type='timeseries')
location_data = ts.get(uuid=uuid)[0]['location']
location_uuid = location_data.get('uuid')
# surface_level is stored in the extra_metadata field of a location
try:
surface_level = str(location_data.get("extra_metadata")
.get("surface_level")) + " (m)"
except AttributeError:
surface_level = None
# next get the location for the filter id
location = Base(use_header=self.use_header, data_type='locations')
try:
filter_id = location.get(uuid=location_uuid)[0].get(
'object').get('id')
except TypeError:
# the location doesn't connect to a filter, return empty
return {}
if filter_id:
# next get and return the filter metadata
gw_filter = Base(use_header=self.use_header, data_type='filters')
result = gw_filter.get(uuid=filter_id)[0]
result.update({
"surface_level": surface_level
})
return result
return {}
class Users(Base):
data_type = "users"
def get_organisations(self, username):
self.get(username=username)
if len(self.results) > 1 or len(self.results) == 0:
if len(self.results):
raise LizardApiError("Username is not unique")
raise LizardApiError("Username not found")
organisations_url = self.results[0].get("organisations_url")
organisations = {
org['name']: org['unique_id'] for org in
self.fetch(organisations_url)
}
logger.debug('Found %d organisations for url: %s', len(organisations),
organisations_url)
if settings.DEFAULT_ORGANISATION_NAME in organisations.keys():
default_org = [(
settings.DEFAULT_ORGANISATION_NAME,
organisations[settings.DEFAULT_ORGANISATION_NAME])
]
del organisations[settings.DEFAULT_ORGANISATION_NAME]
return default_org + sorted(organisations.items())
return sorted(organisations.items())
if __name__ == '__main__':
end = "1452470400000"
start = "-2208988800000"
start_time = time()
GWinfo = GroundwaterTimeSeriesAndLocations()
GWinfo.bbox(south_west=[-65.80277639340238, -223.9453125], north_east=[
81.46626086056541, 187.3828125], start=start, end=end)
x = GWinfo.results_to_dict()
print(time() - start_time)
pprint(x)
| gpl-3.0 | -8,302,654,246,957,907,000 | 34.988109 | 90 | 0.52858 | false | 4.142056 | false | false | false |
mne-tools/mne-python | mne/preprocessing/realign.py | 1 | 4237 | # -*- coding: utf-8 -*-
# Authors: Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
from numpy.polynomial.polynomial import Polynomial
from ..io import BaseRaw
from ..utils import _validate_type, warn, logger, verbose
@verbose
def realign_raw(raw, other, t_raw, t_other, verbose=None):
"""Realign two simultaneous recordings.
Due to clock drift, recordings at a given same sample rate made by two
separate devices simultaneously can become out of sync over time. This
function uses event times captured by both acquisition devices to resample
``other`` to match ``raw``.
Parameters
----------
raw : instance of Raw
The first raw instance.
other : instance of Raw
The second raw instance. It will be resampled to match ``raw``.
t_raw : array-like, shape (n_events,)
The times of shared events in ``raw`` relative to ``raw.times[0]`` (0).
Typically these could be events on some TTL channel like
``find_events(raw)[:, 0] - raw.first_event``.
t_other : array-like, shape (n_events,)
The times of shared events in ``other`` relative to ``other.times[0]``.
%(verbose)s
Notes
-----
This function operates inplace. It will:
1. Estimate the zero-order (start offset) and first-order (clock drift)
correction.
2. Crop the start of ``raw`` or ``other``, depending on which started
recording first.
3. Resample ``other`` to match ``raw`` based on the clock drift.
4. Crop the end of ``raw`` or ``other``, depending on which stopped
recording first (and the clock drift rate).
This function is primarily designed to work on recordings made at the same
sample rate, but it can also operate on recordings made at different
sample rates to resample and deal with clock drift simultaneously.
.. versionadded:: 0.22
"""
from scipy import stats
_validate_type(raw, BaseRaw, 'raw')
_validate_type(other, BaseRaw, 'other')
t_raw = np.array(t_raw, float)
t_other = np.array(t_other, float)
if t_raw.ndim != 1 or t_raw.shape != t_other.shape:
raise ValueError('t_raw and t_other must be 1D with the same shape, '
f'got shapes {t_raw.shape} and {t_other.shape}')
if len(t_raw) < 20:
warn('Fewer than 20 times passed, results may be unreliable')
# 1. Compute correction factors
poly = Polynomial.fit(x=t_other, y=t_raw, deg=1)
converted = poly.convert(domain=(-1, 1))
[zero_ord, first_ord] = converted.coef
logger.info(f'Zero order coefficient: {zero_ord} \n'
f'First order coefficient: {first_ord}')
r, p = stats.pearsonr(t_other, t_raw)
msg = f'Linear correlation computed as R={r:0.3f} and p={p:0.2e}'
if p > 0.05 or r <= 0:
raise ValueError(msg + ', cannot resample safely')
if p > 1e-6:
warn(msg + ', results may be unreliable')
else:
logger.info(msg)
dr_ms_s = 1000 * abs(1 - first_ord)
logger.info(
f'Drift rate: {1000 * dr_ms_s:0.1f} μs/sec '
f'(total drift over {raw.times[-1]:0.1f} sec recording: '
f'{raw.times[-1] * dr_ms_s:0.1f} ms)')
# 2. Crop start of recordings to match using the zero-order term
msg = f'Cropping {zero_ord:0.3f} sec from the start of '
if zero_ord > 0: # need to crop start of raw to match other
logger.info(msg + 'raw')
raw.crop(zero_ord, None)
t_raw -= zero_ord
else: # need to crop start of other to match raw
logger.info(msg + 'other')
other.crop(-zero_ord, None)
t_other += zero_ord
# 3. Resample data using the first-order term
logger.info('Resampling other')
sfreq_new = raw.info['sfreq'] * first_ord
other.load_data().resample(sfreq_new, verbose=True)
other.info['sfreq'] = raw.info['sfreq']
# 4. Crop the end of one of the recordings if necessary
delta = raw.times[-1] - other.times[-1]
msg = f'Cropping {abs(delta):0.3f} sec from the end of '
if delta > 0:
logger.info(msg + 'raw')
raw.crop(0, other.times[-1])
elif delta < 0:
logger.info(msg + 'other')
other.crop(0, raw.times[-1])
| bsd-3-clause | -3,332,209,393,944,654,300 | 37.509091 | 79 | 0.627007 | false | 3.421648 | false | false | false |
jamii/inkling | jottinks/src/NoteTree2.py | 1 | 4804 | """
Copyright 2008 Jamie Brandon, Mark Haines
This file is part of jottinKs.
JottinKs is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
JottinKs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with jottinKs. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from Note import *
import Utils
from Writing import *
from PyKDE4.kdecore import *
from PyKDE4.kdeui import *
from PyQt4 import uic
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import cPickle
import pickle
class NoteTree(QTreeWidget):
def __init__(self, root=None):
QTreeWidget.__init__(self)
self.header().hide()
self.setColumnCount(1)
if root:
self.root = root
else:
self.root = NoteTreeRoot()
self.addTopLevelItem(self.root)
self.root.setTitle()
self.connect(self,SIGNAL("itemClicked (QTreeWidgetItem *,int)"),self.treeItemClicked)
self.actionList = None
self.selectedItem = self.root.next()
def treeItemClicked(self,item,column):
print "Got click", item.noteData.title
self.clearSelection()
self.selectedItem = item
item.setSelected(True)
self.scrollToItem(item)
self.showNote(item.noteData)
item.setTitle()
def showNote(self,noteData):
self.emit(SIGNAL("showNote(PyQt_PyObject)"),noteData)
def click(self,item):
print "Sent click", item.noteData.title
self.emit(SIGNAL("itemClicked (QTreeWidgetItem *,int)"),item,0)
# !!! Do I need this?
def addNote(self,note):
self.root.addChild(NoteTreeItem(note))
def newNote(self):
item = NoteTreeItem(Writing())
self.selectedItem.parent().insertChild(self.selectedItem.index()+1,item)
item.setTitle()
self.click(item)
print "added" , item, item.parent()
def newSubNote(self):
item = NoteTreeItem(Writing())
self.selectedItem.addChild(item)
item.setTitle()
self.click(item)
def deleteNote(self):
print "Will delete:", self.selectedItem
print "Parent is:" , self.selectedItem.parent()
deletee = self.selectedItem
self.click(deletee.previousItem())
deletee.remove()
def actions(self):
if not self.actionList:
newNote = KAction(KIcon("new"),i18n("New note"), self)
self.connect(newNote,SIGNAL("triggered()"),self.newNote)
newSubNote = KAction(KIcon("new"),i18n("New subnote"), self)
self.connect(newSubNote,SIGNAL("triggered()"),self.newSubNote)
deleteNote = KAction(KIcon("delete"),i18n("Delete note"), self)
self.connect(deleteNote,SIGNAL("triggered()"),self.deleteNote)
self.actionList = [newNote, newSubNote, deleteNote]
return self.actionList
def topLevelItems(self):
i = 0
length = self.root.childCount()
while i<length:
yield self.root.child(i)
i += 1
def __reduce__(self):
(NoteTree,(self.root,))
def __reduce_ex__(self,i):
return self.__reduce__()
class NoteTreeItem(QTreeWidgetItem):
def __init__(self, noteData=None, children = []):
QTreeWidgetItem.__init__(self)
self.noteData = noteData
for child in children:
self.addChild(child)
# Cant call this until the item has been added to the tree
def setTitle(self):
self.treeWidget().setItemWidget(self,0,QLabel("Bugger"))
for child in self.children():
child.setTitle()
def children(self):
children = []
for i in range(0,self.childCount()):
children.append(self.child(i))
return children
def index(self):
return self.parent().indexOfChild(self)
def previousItem(self):
i = self.index()
if i==0:
return self.parent()
else:
return self.parent().child(i-1)
def nextItem(self):
i = self.index()
if i+1 == self.parent().childCount():
return self.parent().nextItem()
else:
return self.parent().child(i+1)
def remove(self):
self.parent().removeChild(self)
def __reduce__(self):
return (NoteTreeItem,(self.noteData,self.children()))
class NoteTreeRoot(NoteTreeItem):
def __init__(self,children=[]):
NoteTreeItem.__init__(self,Writing(),children)
self.setText(0,"Root")
def parent(self):
return self
# This makes the new note function work.
# If we use index anywhere else it may cause some pain
def index(self):
return self.childCount() - 1
def previous(self):
return self
def next(self):
if self.childCount():
return self.child(0)
else:
return self
def remove(self):
pass
def __reduce__(self):
return (NoteTreeRoot,(self.children(),)) | gpl-3.0 | 634,833,686,618,809,700 | 24.289474 | 87 | 0.696295 | false | 3.189907 | false | false | false |
brianjimenez/lightdock | lightdock/scoring/dfire2/driver.py | 1 | 7814 | """DFIRE2 potential scoring function
Yuedong Yang, Yaoqi Zhou. Ab initio folding of terminal segments with secondary structures
reveals the fine difference between two closely related all-atom statistical energy functions.
Protein Science,17:1212-1219(2008)
"""
import os
import numpy as np
from lightdock.structure.model import DockingModel
from lightdock.scoring.functions import ModelAdapter, ScoringFunction
from lightdock.structure.space import SpacePoints
from lightdock.scoring.dfire2.c.cdfire2 import calculate_dfire2
from lightdock.constants import DEFAULT_CONTACT_RESTRAINTS_CUTOFF
# Potential constants
atom_type_number = 167
bin_number = 30
DFIRE2_ATOM_TYPES = {'GLY CA': 40, 'HIS C': 45, 'VAL O': 137, 'GLY O': 42, 'GLY N': 39, 'HIS O': 46, 'HIS N': 43,
'TRP CE3': 151, 'GLY C': 41, 'TRP CE2': 150, 'LYS NZ': 69, 'MET C': 80, 'VAL N': 134, 'PRO CA': 95,
'MET O': 81, 'MET N': 78, 'SER OG': 126, 'ARG NH2': 120, 'VAL C': 136, 'THR CG2': 133, 'ALA CB': 4,
'ALA CA': 1, 'TRP CG': 146, 'TRP CA': 142, 'TRP CB': 145, 'ALA N': 0, 'ILE CB': 57, 'ILE CA': 54,
'TRP CH2': 154, 'GLU CA': 20, 'GLU CB': 23, 'GLU CD': 25, 'GLU CG': 24, 'HIS CG': 48,
'ASP OD1': 17, 'HIS CA': 44, 'CYS N': 5, 'CYS O': 8, 'HIS CE1': 51, 'TYR CG': 160, 'TYR CA': 156,
'TYR CB': 159, 'CYS C': 7, 'ARG CB': 114, 'LYS C': 63, 'ARG CG': 115, 'ARG CD': 116,
'THR OG1': 132, 'LYS O': 64, 'LYS N': 61, 'SER C': 123, 'ILE CD1': 60, 'PRO CB': 98, 'PRO CD': 100,
'PRO CG': 99, 'ARG CZ': 118, 'SER O': 124, 'SER N': 121, 'PHE CD1': 34, 'PHE CD2': 35,
'THR CA': 128, 'HIS CD2': 50, 'THR CB': 131, 'PRO C': 96, 'PRO N': 94, 'PRO O': 97, 'PHE CA': 29,
'MET CE': 85, 'MET CG': 83, 'MET CA': 79, 'ILE C': 55, 'MET CB': 82, 'TRP CD2': 148,
'TRP CD1': 147, 'GLN CD': 107, 'ILE CG1': 58, 'ILE CG2': 59, 'PHE CE2': 37, 'PHE CE1': 36,
'GLU OE1': 26, 'GLU OE2': 27, 'ASP CG': 16, 'ASP CB': 15, 'ASP CA': 12, 'THR O': 130, 'THR N': 127,
'SER CA': 122, 'SER CB': 125, 'PHE CG': 33, 'GLU O': 22, 'GLU N': 19, 'PHE CB': 32, 'VAL CG1': 139,
'GLU C': 21, 'ILE O': 56, 'ILE N': 53, 'GLN CA': 102, 'GLN CB': 105, 'ASN C': 88, 'VAL CG2': 140,
'TRP CZ2': 152, 'TRP CZ3': 153, 'PHE CZ': 38, 'TRP O': 144, 'TRP N': 141, 'LEU CB': 74,
'GLN N': 101, 'GLN O': 104, 'LEU O': 73, 'GLN C': 103, 'TRP C': 143, 'HIS CB': 47, 'GLN NE2': 109,
'LEU CD2': 77, 'ASP OD2': 18, 'LEU CD1': 76, 'VAL CA': 135, 'ASN OD1': 92, 'ALA O': 3,
'MET SD': 84, 'ALA C': 2, 'THR C': 129, 'TYR CD1': 161, 'ARG NH1': 119, 'TYR CD2': 162,
'ASN ND2': 93, 'TRP NE1': 149, 'HIS ND1': 49, 'LEU C': 72, 'ASN O': 89, 'ASN N': 86, 'ASP C': 13,
'LEU CA': 71, 'ASP O': 14, 'ASP N': 11, 'CYS CB': 9, 'LEU N': 70, 'LEU CG': 75, 'CYS CA': 6,
'TYR OH': 166, 'ASN CA': 87, 'ASN CB': 90, 'ASN CG': 91, 'TYR CE2': 164, 'ARG C': 112,
'TYR CE1': 163, 'HIS NE2': 52, 'ARG O': 113, 'ARG N': 110, 'TYR C': 157, 'GLN CG': 106,
'ARG CA': 111, 'TYR N': 155, 'TYR O': 158, 'CYS SG': 10, 'TYR CZ': 165, 'ARG NE': 117,
'VAL CB': 138, 'LYS CB': 65, 'LYS CA': 62, 'PHE C': 30, 'LYS CG': 66, 'LYS CE': 68, 'LYS CD': 67,
'GLN OE1': 108, 'PHE N': 28, 'PHE O': 31}
class DFIRE2Potential(object):
"""Loads DFIRE2 potentials information"""
def __init__(self):
data_path = os.path.dirname(os.path.realpath(__file__)) + '/data/'
self.energy = np.load(data_path + 'dfire2_energies.npy').ravel()
class DFIRE2Object(object):
def __init__(self, residue_index, atom_index):
self.residue_index = residue_index
self.atom_index = atom_index
class DFIRE2Adapter(ModelAdapter, DFIRE2Potential):
"""Adapts a given Complex to a DockingModel object suitable for this
DFIRE2 scoring function.
"""
def _get_docking_model(self, molecule, restraints):
"""Builds a suitable docking model for this scoring function"""
objects = []
coordinates = []
parsed_restraints = {}
atom_index = 0
for residue in molecule.residues:
for rec_atom in residue.atoms:
rec_atom_type = rec_atom.residue_name + ' ' + rec_atom.name
if rec_atom_type in DFIRE2_ATOM_TYPES:
objects.append(DFIRE2Object(residue.number, DFIRE2_ATOM_TYPES[rec_atom_type]))
coordinates.append([rec_atom.x, rec_atom.y, rec_atom.z])
# Restraints support
res_id = "%s.%s.%s" % (rec_atom.chain_id, residue.name, str(residue.number))
if restraints and res_id in restraints:
try:
parsed_restraints[res_id].append(atom_index)
except:
parsed_restraints[res_id] = [atom_index]
atom_index += 1
try:
return DockingModel(objects, SpacePoints(coordinates), parsed_restraints, n_modes=molecule.n_modes.copy())
except AttributeError:
return DockingModel(objects, SpacePoints(coordinates), parsed_restraints)
class DFIRE2(ScoringFunction):
"""Implements DFIRE2 potential"""
def __init__(self, weight=1.0):
super(DFIRE2, self).__init__(weight)
self.cached = False
self.potential = DFIRE2Potential()
def __call__(self, receptor, receptor_coordinates, ligand, ligand_coordinates):
if not self.cached:
self.res_index = []
self.atom_index = []
for o in receptor.objects:
self.res_index.append(o.residue_index)
self.atom_index.append(o.atom_index)
last = self.res_index[-1]
for o in ligand.objects:
self.res_index.append(o.residue_index + last)
self.atom_index.append(o.atom_index)
self.res_index = np.array(self.res_index, dtype=np.int32)
self.atom_index = np.array(self.atom_index, dtype=np.int32)
self.molecule_length = len(self.res_index)
self.cached = True
return self.evaluate_energy(receptor, receptor_coordinates, ligand, ligand_coordinates)
def evaluate_energy(self, receptor, receptor_coordinates, ligand, ligand_coordinates):
coordinates = np.append(receptor_coordinates.coordinates, ligand_coordinates.coordinates).reshape((-1, 3))
energy, interface_receptor, interface_ligand = calculate_dfire2(self.res_index,
self.atom_index,
coordinates,
self.potential.energy,
self.molecule_length,
DEFAULT_CONTACT_RESTRAINTS_CUTOFF)
# Code to consider contacts in the interface
perc_receptor_restraints = ScoringFunction.restraints_satisfied(receptor.restraints, set(interface_receptor))
perc_ligand_restraints = ScoringFunction.restraints_satisfied(ligand.restraints, set(interface_ligand))
return energy + perc_receptor_restraints * energy + perc_ligand_restraints * energy
# Needed to dynamically load the scoring functions from command line
DefinedScoringFunction = DFIRE2
DefinedModelAdapter = DFIRE2Adapter
| gpl-3.0 | -1,820,260,206,580,369,700 | 57.75188 | 120 | 0.538905 | false | 3.148268 | false | false | false |
mvaled/sentry | src/sentry/message_filters.py | 1 | 16944 | # TODO RaduW 8.06.2019 remove the sentry.filters package and rename this module to filters
from __future__ import absolute_import
import collections
from collections import namedtuple
import re
from sentry.models.projectoption import ProjectOption
from sentry.utils.data_filters import FilterStatKeys
from rest_framework import serializers
from sentry.api.fields.multiplechoice import MultipleChoiceField
from six.moves.urllib.parse import urlparse
from sentry.utils.safe import get_path
from ua_parser.user_agent_parser import Parse
from sentry.signals import inbound_filter_toggled
EventFilteredRet = namedtuple("EventFilteredRet", "should_filter reason")
def should_filter_event(project_config, data):
"""
Checks if an event should be filtered
:param project_config: relay config for the request (for the project really)
:param data: the event data
:return: an EventFilteredRet explaining if the event should be filtered and, if it should the reason
for filtering
"""
for event_filter in get_all_filters():
if _is_filter_enabled(project_config, event_filter) and event_filter(project_config, data):
return EventFilteredRet(should_filter=True, reason=event_filter.spec.id)
return EventFilteredRet(should_filter=False, reason=None)
def get_all_filters():
"""
Returns a list of the existing event filters
An event filter is a function that receives a project_config and an event data payload and returns a tuple
(should_filter:bool, filter_reason: string | None) representing
:return: list of registered event filters
"""
return (
_localhost_filter,
_browser_extensions_filter,
_legacy_browsers_filter,
_web_crawlers_filter,
)
def set_filter_state(filter_id, project, state):
flt = _filter_from_filter_id(filter_id)
if flt is None:
raise FilterNotRegistered(filter_id)
if flt == _legacy_browsers_filter:
if state is None:
state = {}
option_val = "0"
if "active" in state:
if state["active"]:
option_val = "1"
elif "subfilters" in state and len(state["subfilters"]) > 0:
option_val = set(state["subfilters"])
ProjectOption.objects.set_value(
project=project, key=u"filters:{}".format(filter_id), value=option_val
)
return option_val
else:
# all boolean filters
if state is None:
state = {"active": True}
ProjectOption.objects.set_value(
project=project,
key=u"filters:{}".format(filter_id),
value="1" if state.get("active", False) else "0",
)
if state:
inbound_filter_toggled.send(project=project, sender=flt)
return state.get("active", False)
def get_filter_state(filter_id, project):
"""
Returns the filter state
IMPORTANT: this function accesses the database, it should NEVER be used by the ingestion pipe.
This api is used by the ProjectFilterDetails and ProjectFilters endpoints
:param filter_id: the filter Id
:param project: the project for which we want the filter state
:return: True if the filter is enabled False otherwise
:raises: ValueError if filter id not registered
"""
flt = _filter_from_filter_id(filter_id)
if flt is None:
raise FilterNotRegistered(filter_id)
filter_state = ProjectOption.objects.get_value(
project=project, key=u"filters:{}".format(flt.spec.id)
)
if filter_state is None:
raise ValueError(
"Could not find filter state for filter {0}."
" You need to register default filter state in projectoptions.defaults.".format(
filter_id
)
)
if flt == _legacy_browsers_filter:
# special handling for legacy browser state
if filter_state == "1":
return True
if filter_state == "0":
return False
return filter_state
else:
return filter_state == "1"
class FilterNotRegistered(Exception):
pass
def _filter_from_filter_id(filter_id):
"""
Returns the corresponding filter for a filter id or None if no filter with the given id found
"""
for flt in get_all_filters():
if flt.spec.id == filter_id:
return flt
return None
class _FilterSerializer(serializers.Serializer):
active = serializers.BooleanField()
class _FilterSpec(object):
"""
Data associated with a filter, it defines its name, id, default enable state and how its state is serialized
in the database
"""
def __init__(self, id, name, description, serializer_cls=None):
self.id = id
self.name = name
self.description = description
if serializer_cls is None:
self.serializer_cls = _FilterSerializer
else:
self.serializer_cls = serializer_cls
def _get_filter_settings(project_config, flt):
"""
Gets the filter options from the relay config or the default option if not specified in the relay config
:param project_config: the relay config for the request
:param flt: the filter
:return: the options for the filter
"""
filter_settings = project_config.config.get("filter_settings", {})
return filter_settings.get(get_filter_key(flt), None)
def _is_filter_enabled(project_config, flt):
filter_options = _get_filter_settings(project_config, flt)
if filter_options is None:
raise ValueError("unknown filter", flt.spec.id)
return filter_options["is_enabled"]
def get_filter_key(flt):
return flt.spec.id.replace("-", "_")
# ************* local host filter *************
_LOCAL_IPS = frozenset(["127.0.0.1", "::1"])
_LOCAL_DOMAINS = frozenset(["127.0.0.1", "localhost"])
def _localhost_filter(project_config, data):
ip_address = get_path(data, "user", "ip_address") or ""
url = get_path(data, "request", "url") or ""
domain = urlparse(url).hostname
return ip_address in _LOCAL_IPS or domain in _LOCAL_DOMAINS
_localhost_filter.spec = _FilterSpec(
id=FilterStatKeys.LOCALHOST,
name="Filter out events coming from localhost",
description="This applies to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.",
)
# ************* browser extensions filter *************
_EXTENSION_EXC_VALUES = re.compile(
"|".join(
(
re.escape(x)
for x in (
# Random plugins/extensions
"top.GLOBALS",
# See: http://blog.errorception.com/2012/03/tale-of-unfindable-js-error.html
"originalCreateNotification",
"canvas.contentDocument",
"MyApp_RemoveAllHighlights",
"http://tt.epicplay.com",
"Can't find variable: ZiteReader",
"jigsaw is not defined",
"ComboSearch is not defined",
"http://loading.retry.widdit.com/",
"atomicFindClose",
# Facebook borked
"fb_xd_fragment",
# ISP "optimizing" proxy - `Cache-Control: no-transform` seems to
# reduce this. (thanks @acdha)
# See http://stackoverflow.com/questions/4113268
"bmi_SafeAddOnload",
"EBCallBackMessageReceived",
# See
# https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE
"_gCrWeb",
# See http://toolbar.conduit.com/Debveloper/HtmlAndGadget/Methods/JSInjection.aspx
"conduitPage",
# Google Search app (iOS)
# See: https://github.com/getsentry/raven-js/issues/756
"null is not an object (evaluating 'elt.parentNode')",
# Dragon Web Extension from Nuance Communications
# See: https://forum.sentry.io/t/error-in-raven-js-plugin-setsuspendstate/481/
"plugin.setSuspendState is not a function",
# lastpass
"should_do_lastpass_here",
# google translate
# see https://medium.com/@amir.harel/a-b-target-classname-indexof-is-not-a-function-at-least-not-mine-8e52f7be64ca
"a[b].target.className.indexOf is not a function",
)
)
),
re.I,
)
_EXTENSION_EXC_SOURCES = re.compile(
"|".join(
(
# Facebook flakiness
r"graph\.facebook\.com",
# Facebook blocked
r"connect\.facebook\.net",
# Woopra flakiness
r"eatdifferent\.com\.woopra-ns\.com",
r"static\.woopra\.com\/js\/woopra\.js",
# Chrome extensions
r"^chrome(?:-extension)?:\/\/",
# Cacaoweb
r"127\.0\.0\.1:4001\/isrunning",
# Other
r"webappstoolbarba\.texthelp\.com\/",
r"metrics\.itunes\.apple\.com\.edgesuite\.net\/",
# Kaspersky Protection browser extension
r"kaspersky-labs\.com",
# Google ad server (see http://whois.domaintools.com/2mdn.net)
r"2mdn\.net",
)
),
re.I,
)
def _browser_extensions_filter(project_config, data):
if data.get("platform") != "javascript":
return False
# get exception value
try:
exc_value = data["exception"]["values"][0]["value"]
except (LookupError, TypeError):
exc_value = ""
if exc_value:
if _EXTENSION_EXC_VALUES.search(exc_value):
return True
# get exception source
try:
exc_source = data["exception"]["values"][0]["stacktrace"]["frames"][-1]["abs_path"]
except (LookupError, TypeError):
exc_source = ""
if exc_source:
if _EXTENSION_EXC_SOURCES.search(exc_source):
return True
return False
_browser_extensions_filter.spec = _FilterSpec(
id=FilterStatKeys.BROWSER_EXTENSION,
name="Filter out errors known to be caused by browser extensions",
description="Certain browser extensions will inject inline scripts and are known to cause errors.",
)
# ************* legacy browsers filter *************
MIN_VERSIONS = {
"Chrome": 0,
"IE": 10,
"Firefox": 0,
"Safari": 6,
"Edge": 0,
"Opera": 15,
"Android": 4,
"Opera Mini": 8,
}
def _legacy_browsers_filter(project_config, data):
def get_user_agent(data):
try:
for key, value in get_path(data, "request", "headers", filter=True) or ():
if key.lower() == "user-agent":
return value
except LookupError:
return ""
if data.get("platform") != "javascript":
return False
value = get_user_agent(data)
if not value:
return False
ua = Parse(value)
if not ua:
return False
browser = ua["user_agent"]
if not browser["family"]:
return False
# IE Desktop and IE Mobile use the same engines, therefore we can treat them as one
if browser["family"] == "IE Mobile":
browser["family"] = "IE"
filter_settings = _get_filter_settings(project_config, _legacy_browsers_filter)
# handle old style config
if filter_settings is None:
return _filter_default(browser)
enabled_sub_filters = filter_settings.get("options")
if isinstance(enabled_sub_filters, collections.Sequence):
for sub_filter_name in enabled_sub_filters:
sub_filter = _legacy_browsers_sub_filters.get(sub_filter_name)
if sub_filter is not None and sub_filter(browser):
return True
return False
class _LegacyBrowserFilterSerializer(serializers.Serializer):
active = serializers.BooleanField()
subfilters = MultipleChoiceField(
choices=[
"ie_pre_9",
"ie9",
"ie10",
"opera_pre_15",
"android_pre_4",
"safari_pre_6",
"opera_mini_pre_8",
]
)
_legacy_browsers_filter.spec = _FilterSpec(
id=FilterStatKeys.LEGACY_BROWSER,
name="Filter out known errors from legacy browsers",
description="Older browsers often give less accurate information, and while they may report valid issues, "
"the context to understand them is incorrect or missing.",
serializer_cls=_LegacyBrowserFilterSerializer,
)
def _filter_default(browser):
"""
Legacy filter - new users specify individual filters
"""
try:
minimum_version = MIN_VERSIONS[browser["family"]]
except KeyError:
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if minimum_version > major_browser_version:
return True
return False
def _filter_opera_pre_15(browser):
if not browser["family"] == "Opera":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 15:
return True
return False
def _filter_safari_pre_6(browser):
if not browser["family"] == "Safari":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 6:
return True
return False
def _filter_android_pre_4(browser):
if not browser["family"] == "Android":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 4:
return True
return False
def _filter_opera_mini_pre_8(browser):
if not browser["family"] == "Opera Mini":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 8:
return True
return False
def _filter_ie10(browser):
return _filter_ie_internal(browser, lambda major_ver: major_ver == 10)
def _filter_ie9(browser):
return _filter_ie_internal(browser, lambda major_ver: major_ver == 9)
def _filter_ie_pre_9(browser):
return _filter_ie_internal(browser, lambda major_ver: major_ver <= 8)
def _filter_ie_internal(browser, compare_version):
if not browser["family"] == "IE":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
return compare_version(major_browser_version)
# list all browser specific sub filters that should be called
_legacy_browsers_sub_filters = {
"default": _filter_default,
"opera_pre_15": _filter_opera_pre_15,
"safari_pre_6": _filter_safari_pre_6,
"android_pre_4": _filter_android_pre_4,
"opera_mini_pre_8": _filter_opera_mini_pre_8,
"ie9": _filter_ie9,
"ie10": _filter_ie10,
"ie_pre_9": _filter_ie_pre_9,
}
# ************* web crawler filter *************
# not all of these agents are guaranteed to execute JavaScript, but to avoid
# overhead of identifying which ones do, and which ones will over time we simply
# target all of the major ones
_CRAWLERS = re.compile(
r"|".join(
(
# Google spiders (Adsense and others)
# https://support.google.com/webmasters/answer/1061943?hl=en
r"Mediapartners\-Google",
r"AdsBot\-Google",
r"Googlebot",
r"FeedFetcher\-Google",
# Bing search
r"BingBot",
r"BingPreview",
# Baidu search
r"Baiduspider",
# Yahoo
r"Slurp",
# Sogou
r"Sogou",
# facebook
r"facebook",
# Alexa
r"ia_archiver",
# Generic bot
r"bots?[\/\s\)\;]",
# Generic spider
r"spider[\/\s\)\;]",
# Slack - see https://api.slack.com/robots
r"Slack",
# Google indexing bot
r"Calypso AppCrawler",
# Pingdom
r"pingdom",
# Lytics
r"lyticsbot",
)
),
re.I,
)
def _web_crawlers_filter(project_config, data):
try:
for key, value in get_path(data, "request", "headers", filter=True) or ():
if key.lower() == "user-agent":
if not value:
return False
return bool(_CRAWLERS.search(value))
return False
except LookupError:
return False
_web_crawlers_filter.spec = _FilterSpec(
id=FilterStatKeys.WEB_CRAWLER,
name="Filter out known web crawlers",
description="Some crawlers may execute pages in incompatible ways which then cause errors that"
" are unlikely to be seen by a normal user.",
)
| bsd-3-clause | -1,040,848,108,898,830,200 | 28.519164 | 130 | 0.602632 | false | 3.92313 | true | false | false |
eResearchSA/reporting-storage-hcp | ersa_storage_hcp/__init__.py | 1 | 5549 | #!/usr/bin/python3
"""Application and persistence management."""
# pylint: disable=no-member, import-error, no-init, too-few-public-methods
# pylint: disable=cyclic-import, no-name-in-module, invalid-name
import os
from flask import Flask
from flask.ext import restful
from flask.ext.cors import CORS
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from sqlalchemy.dialects.postgresql import UUID
app = Flask("storage-hcp")
cors = CORS(app)
restapi = restful.Api(app)
app.config["ERSA_STORAGE_HCP_TOKEN"] = os.getenv("ERSA_STORAGE_HCP_TOKEN")
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("ERSA_STORAGE_HCP_DATABASE")
db = SQLAlchemy(app)
def _id_column():
"""Generate a UUID column."""
return db.Column(UUID,
server_default=text("uuid_generate_v4()"),
primary_key=True)
class Allocation(db.Model):
"""Storage Allocation"""
id = _id_column()
allocation = db.Column(db.Integer, unique=True, nullable=False)
tenants = db.relationship("Tenant", backref="allocation")
namespaces = db.relationship("Namespace", backref="allocation")
def json(self):
"""Jsonify"""
return {"id": self.id, "allocation": self.allocation}
class Snapshot(db.Model):
"""Storage Snapshot"""
id = _id_column()
ts = db.Column(db.Integer, nullable=False)
usage = db.relationship("Usage", backref="snapshot")
def json(self):
"""Jsonify"""
return {"id": self.id, "ts": self.ts}
class Tenant(db.Model):
"""HCP Tenant"""
id = _id_column()
name = db.Column(db.String(256), unique=True, nullable=False)
namespaces = db.relationship("Namespace", backref="tenant")
allocation_id = db.Column(None, db.ForeignKey("allocation.id"))
def json(self, namespaces=True):
"""Jsonify"""
result = {"id": self.id, "name": self.name}
if self.allocation:
result["allocation"] = self.allocation.json()
if namespaces:
result["namespaces"] = [namespace.json(tenants=False)
for namespace in self.namespaces]
return result
class Namespace(db.Model):
"""HCP Namespace"""
id = _id_column()
name = db.Column(db.String(256), nullable=False)
usage = db.relationship("Usage", backref="namespace")
tenant_id = db.Column(None,
db.ForeignKey("tenant.id"),
index=True,
nullable=False)
allocation_id = db.Column(None, db.ForeignKey("allocation.id"))
def json(self, tenants=True):
"""Jsonify"""
result = {"id": self.id, "name": self.name}
if self.allocation:
result["allocation"] = self.allocation.json()
if tenants:
result["tenant"] = self.tenant.json(namespaces=False)
return result
class Usage(db.Model):
"""HCP Usage"""
id = _id_column()
start_time = db.Column(db.Integer, index=True, nullable=False)
end_time = db.Column(db.Integer, index=True, nullable=False)
ingested_bytes = db.Column(db.BigInteger, nullable=False)
raw_bytes = db.Column(db.BigInteger, nullable=False)
reads = db.Column(db.BigInteger, nullable=False)
writes = db.Column(db.BigInteger, nullable=False)
deletes = db.Column(db.BigInteger, nullable=False)
objects = db.Column(db.BigInteger, nullable=False)
bytes_in = db.Column(db.BigInteger, nullable=False)
bytes_out = db.Column(db.BigInteger, nullable=False)
metadata_only_objects = db.Column(db.BigInteger, nullable=False)
metadata_only_bytes = db.Column(db.BigInteger, nullable=False)
tiered_objects = db.Column(db.BigInteger, nullable=False)
tiered_bytes = db.Column(db.BigInteger, nullable=False)
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
namespace_id = db.Column(None,
db.ForeignKey("namespace.id"),
index=True,
nullable=False)
def json(self):
"""Jsonify"""
return {
"start_time": self.start_time,
"end_time": self.end_time,
"ingested_bytes": self.ingested_bytes,
"raw_bytes": self.raw_bytes,
"reads": self.reads,
"writes": self.writes,
"deletes": self.deletes,
"objects": self.objects,
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
"metadata_only_objects": self.metadata_only_objects,
"metadata_only_bytes": self.metadata_only_bytes,
"tiered_objects": self.tiered_objects,
"tiered_bytes": self.tiered_bytes,
"snapshot": self.snapshot.json(),
"namespace": {
"id": self.namespace.id,
"name": self.namespace.name
}
}
def run():
"""Let's roll."""
db.engine.execute("create extension if not exists \"uuid-ossp\";")
db.create_all()
from ersa_storage_hcp import api
restapi.add_resource(api.PingResource, "/ping")
restapi.add_resource(api.AllocationResource, "/allocation")
restapi.add_resource(api.StorageResource, "/storage")
restapi.add_resource(api.SnapshotResource, "/snapshot")
restapi.add_resource(api.UsageResource, "/usage")
app.run(host="127.0.0.1", port=int(os.getenv("ERSA_STORAGE_HCP_PORT")))
| apache-2.0 | 2,379,986,731,309,536,000 | 31.83432 | 78 | 0.605515 | false | 3.724161 | false | false | false |
varunarya10/oslo.serialization | oslo_serialization/jsonutils.py | 1 | 8936 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
#. A handy function for getting an object down to something that can be
JSON serialized. See :func:`.to_primitive`.
#. Wrappers around :func:`.loads` and :func:`.dumps`. The :func:`.dumps`
wrapper will automatically use :func:`.to_primitive` for you if needed.
#. This sets up ``anyjson`` to use the :func:`.loads` and :func:`.dumps`
wrappers if ``anyjson`` is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
import uuid
is_simplejson = False
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
# NOTE(mriedem): Make sure we have a new enough version of simplejson
# to support the namedobject_as_tuple argument. This can be removed
# in the Kilo release when python 2.6 support is dropped.
if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args:
is_simplejson = True
else:
import json
except ImportError:
import json
else:
import json
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
import six.moves.xmlrpc_client as xmlrpclib
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, ``convert_instances=True`` is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
if isinstance(value, uuid.UUID):
return six.text_type(value)
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return list(map(recursive, value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
elif any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
JSONEncoder = json.JSONEncoder
JSONDecoder = json.JSONDecoder
def dumps(obj, default=to_primitive, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str``.
:param obj: object to be serialized
:param default: function that returns a serializable version of an object
:param kwargs: extra named parameters, please see documentation \
of `json.dumps <https://docs.python.org/2/library/json.html#basic-usage>`_
:returns: json formatted string
"""
if is_simplejson:
kwargs['namedtuple_as_object'] = False
return json.dumps(obj, default=default, **kwargs)
def dump(obj, fp, *args, **kwargs):
"""Serialize ``obj`` as a JSON formatted stream to ``fp``
:param obj: object to be serialized
:param fp: a ``.write()``-supporting file-like object
:param default: function that returns a serializable version of an object
:param args: extra arguments, please see documentation \
of `json.dump <https://docs.python.org/2/library/json.html#basic-usage>`_
:param kwargs: extra named parameters, please see documentation \
of `json.dump <https://docs.python.org/2/library/json.html#basic-usage>`_
"""
default = kwargs.get('default', to_primitive)
if is_simplejson:
kwargs['namedtuple_as_object'] = False
return json.dump(obj, fp, default=default, *args, **kwargs)
def loads(s, encoding='utf-8', **kwargs):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
:param s: string to deserialize
:param encoding: encoding used to interpret the string
:param kwargs: extra named parameters, please see documentation \
of `json.loads <https://docs.python.org/2/library/json.html#basic-usage>`_
:returns: python object
"""
return json.loads(encodeutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8', **kwargs):
"""Deserialize ``fp`` to a Python object.
:param fp: a ``.read()`` -supporting file-like object
:param encoding: encoding used to interpret the string
:param kwargs: extra named parameters, please see documentation \
of `json.loads <https://docs.python.org/2/library/json.html#basic-usage>`_
:returns: python object
"""
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| apache-2.0 | -9,187,173,856,179,363,000 | 37.025532 | 79 | 0.652865 | false | 4.152416 | true | false | false |
ryanjoneil/docker-image-construction | ipynb/examples/example1.py | 1 | 3732 | from mosek.fusion import Model, Domain, Expr, ObjectiveSense
import sys
# Example 1. Full representation of 3-image problem with all maximal cliques.
# DICP instance:
#
# Resource consumption by command:
#
# C = {A, B, C, D}
#
# | x = A: 5 |
# r(c) = | x = B: 10 |
# | x = C: 7 |
# | x = D: 12 |
#
# Images to create:
#
# I = {1, 2, 3}
#
# | i = 1: {A, B} |
# C(i) = | i = 2: {A, B, C, D} |
# | i = 3: {B, C, D} |
r = {'A': 5.0, 'B': 10.0, 'C': 7.0, 'D': 12.0}
m = Model()
binary = (Domain.inRange(0.0, 1.0), Domain.isInteger())
# Provide a variable for each image and command. This is 1 if the command
# is not run as part of a clique for the image.
x_1_a = m.variable('x_1_a', *binary)
x_1_b = m.variable('x_1_b', *binary)
x_2_a = m.variable('x_2_a', *binary)
x_2_b = m.variable('x_2_b', *binary)
x_2_c = m.variable('x_2_c', *binary)
x_2_d = m.variable('x_2_d', *binary)
x_3_b = m.variable('x_3_b', *binary)
x_3_c = m.variable('x_3_c', *binary)
x_3_d = m.variable('x_3_d', *binary)
# Provide a variable for each maximal clique and maximal sub-clique.
x_12_ab = m.variable('x_12_ab', *binary)
x_123_b = m.variable('x_123_b', *binary)
x_123_b_12_a = m.variable('x_123_b_12_a', *binary)
x_123_b_23_cd = m.variable('x_123_b_23_cd', *binary)
# Each command must be run once for each image.
m.constraint('c_1_a', Expr.add([x_1_a, x_12_ab, x_123_b_12_a]), Domain.equalsTo(1.0))
m.constraint('c_1_b', Expr.add([x_1_b, x_12_ab, x_123_b]), Domain.equalsTo(1.0))
m.constraint('c_2_a', Expr.add([x_2_a, x_12_ab, x_123_b_12_a]), Domain.equalsTo(1.0))
m.constraint('c_2_b', Expr.add([x_2_b, x_12_ab, x_123_b]), Domain.equalsTo(1.0))
m.constraint('c_2_c', Expr.add([x_2_c, x_123_b_23_cd]), Domain.equalsTo(1.0))
m.constraint('c_2_d', Expr.add([x_2_d, x_123_b_23_cd]), Domain.equalsTo(1.0))
m.constraint('c_3_b', Expr.add([x_3_b, x_123_b]), Domain.equalsTo(1.0))
m.constraint('c_3_c', Expr.add([x_3_c, x_123_b_23_cd]), Domain.equalsTo(1.0))
m.constraint('c_3_d', Expr.add([x_3_d, x_123_b_23_cd]), Domain.equalsTo(1.0))
# Add dependency constraints for sub-cliques.
m.constraint('d_123_b_12_a', Expr.sub(x_123_b, x_123_b_12_a), Domain.greaterThan(0.0))
m.constraint('d_123_b_23_cd', Expr.sub(x_123_b, x_123_b_23_cd), Domain.greaterThan(0.0))
# Eliminated intersections between cliques.
m.constraint('e1', Expr.add([x_12_ab, x_123_b]), Domain.lessThan(1.0))
m.constraint('e2', Expr.add([x_123_b_12_a, x_123_b_23_cd]), Domain.lessThan(1.0))
# Minimize resources required to construct all images.
obj = [Expr.mul(c, x) for c, x in [
# Individual image/command pairs
(r['A'], x_1_a), (r['B'], x_1_b),
(r['A'], x_2_a), (r['B'], x_2_b), (r['C'], x_2_c), (r['D'], x_2_d),
(r['B'], x_3_b), (r['C'], x_3_c), (r['D'], x_3_d),
# Cliques
(r['A'] + r['B'], x_12_ab),
(r['B'], x_123_b),
(r['A'], x_123_b_12_a),
(r['C'] + r['D'], x_123_b_23_cd),
]]
m.objective('w', ObjectiveSense.Minimize, Expr.add(obj))
m.setLogHandler(sys.stdout)
m.solve()
print
print 'Image 1:'
print '\tx_1_a = %.0f' % x_1_a.level()[0]
print '\tx_1_b = %.0f' % x_1_b.level()[0]
print
print 'Image 2:'
print '\tx_2_a = %.0f' % x_2_a.level()[0]
print '\tx_2_b = %.0f' % x_2_b.level()[0]
print '\tx_2_c = %.0f' % x_2_c.level()[0]
print '\tx_2_d = %.0f' % x_2_d.level()[0]
print
print 'Image 3:'
print '\tx_3_b = %.0f' % x_3_b.level()[0]
print '\tx_3_c = %.0f' % x_3_c.level()[0]
print '\tx_3_d = %.0f' % x_3_d.level()[0]
print
print 'Cliques:'
print '\tx_12_ab = %.0f' % x_12_ab.level()[0]
print '\tx_123_b = %.0f' % x_123_b.level()[0]
print '\tx_123_b_12_a = %.0f' % x_123_b_12_a.level()[0]
print '\tx_123_b_23_cd = %.0f' % x_123_b_23_cd.level()[0]
print
| mit | 2,658,812,973,706,589,700 | 32.927273 | 88 | 0.566184 | false | 2.093102 | false | false | false |
clld/tsammalex | tsammalex/util.py | 1 | 4317 | from collections import OrderedDict
from purl import URL
from sqlalchemy.orm import joinedload, contains_eager
from clld.web.util.multiselect import MultiSelect
from clld.db.meta import DBSession
from clld.db.models.common import Language, Unit, Value, ValueSet
from clld.web.util.htmllib import HTML
from clld.web.util.helpers import maybe_external_link, collapsed
from tsammalex.models import split_ids
assert split_ids
def license_name(license_url):
if license_url == "http://commons.wikimedia.org/wiki/GNU_Free_Documentation_License":
return 'GNU Free Documentation License'
if license_url == 'http://en.wikipedia.org/wiki/Public_domain':
license_url = 'http://creativecommons.org/publicdomain/zero/1.0/'
license_url_ = URL(license_url)
if license_url_.host() != 'creativecommons.org':
return license_url
comps = license_url_.path().split('/')
if len(comps) < 3:
return license_url
return {
'zero': 'Public Domain',
}.get(comps[2], '(CC) %s' % comps[2].upper())
def names_in_2nd_languages(vs):
def format_name(n):
res = [HTML.i(n.name)]
if n.ipa:
res.append(' [%s]' % n.ipa)
return HTML.span(*res)
def format_language(vs):
return ' '.join([vs.language.name, ', '.join(format_name(n) for n in vs.values)])
query = DBSession.query(ValueSet).join(ValueSet.language)\
.order_by(Language.name)\
.filter(Language.pk.in_([l.pk for l in vs.language.second_languages]))\
.filter(ValueSet.parameter_pk == vs.parameter_pk)\
.options(contains_eager(ValueSet.language), joinedload(ValueSet.values))
res = '; '.join(format_language(vs) for vs in query)
if res:
res = '(%s)' % res
return res
def source_link(source):
label = source
host = URL(source).host()
if host == 'commons.wikimedia.org':
label = 'wikimedia'
elif host == 'en.wikipedia.org':
label = 'wikipedia'
return maybe_external_link(source, label=label)
def with_attr(f):
def wrapper(ctx, name, *args, **kw):
kw['attr'] = getattr(ctx, name)
if not kw['attr']:
return '' # pragma: no cover
return f(ctx, name, *args, **kw)
return wrapper
@with_attr
def tr_rel(ctx, name, label=None, dt='name', dd='description', attr=None):
content = []
for item in attr:
content.extend([HTML.dt(getattr(item, dt)), HTML.dd(getattr(item, dd))])
content = HTML.dl(*content, class_='dl-horizontal')
if len(attr) > 3:
content = collapsed('collapsed-' + name, content)
return HTML.tr(HTML.td((label or name.capitalize()) + ':'), HTML.td(content))
@with_attr
def tr_attr(ctx, name, label=None, content=None, attr=None):
return HTML.tr(
HTML.td((label or name.capitalize()) + ':'),
HTML.td(content or maybe_external_link(attr)))
def format_classification(taxon, with_species=False, with_rank=False):
names = OrderedDict()
for r in 'kingdom phylum class_ order family'.split():
names[r.replace('_', '')] = getattr(taxon, r)
if with_species:
names[taxon.rank] = taxon.name
return HTML.ul(
*[HTML.li(('{0} {1}: {2}' if with_rank else '{0}{2}').format('-' * i, *n))
for i, n in enumerate(n for n in names.items() if n[1])],
class_="unstyled")
class LanguageMultiSelect(MultiSelect):
def __init__(self, ctx, req, name='languages', eid='ms-languages', **kw):
kw['selected'] = ctx.languages
MultiSelect.__init__(self, req, name, eid, **kw)
@classmethod
def query(cls):
return DBSession.query(Language).order_by(Language.name)
def get_options(self):
return {
'data': [self.format_result(p) for p in self.query()],
'multiple': True,
'maximumSelectionSize': 2}
def parameter_index_html(context=None, request=None, **kw):
return dict(select=LanguageMultiSelect(context, request))
def language_detail_html(context=None, request=None, **kw):
return dict(categories=list(DBSession.query(Unit)
.filter(Unit.language == context).order_by(Unit.name)))
def language_index_html(context=None, request=None, **kw):
return dict(map_=request.get_map('languages', col='lineage', dt=context))
| apache-2.0 | -358,611,519,282,476,700 | 32.207692 | 89 | 0.632847 | false | 3.38854 | false | false | false |
tsengj10/physics-admit | admissions/management/commands/jelley.py | 1 | 1202 | from django.core.management.base import BaseCommand, CommandError
from admissions.models import *
class Command(BaseCommand):
help = 'Recalculate Jelley scores and ranks'
def add_arguments(self, parser):
parser.add_argument('tag', nargs='?', default='test')
def handle(self, *args, **options):
weights = Weights.objects.last()
all_students = Candidate.objects.all()
for s in all_students:
s.stored_jell_score = s.calc_jell_score(weights)
s.save()
self.stdout.write('Jelley score of {0} is {1}'.format(s.ucas_id, s.stored_jell_score))
ordered = Candidate.objects.order_by('-stored_jell_score').all()
first = True
index = 1
for s in ordered:
if first:
s.stored_rank = index
previous_score = s.stored_jell_score
previous_rank = index
first = False
else:
if s.stored_jell_score == previous_score:
s.stored_rank = previous_rank
else:
s.stored_rank = index
previous_score = s.stored_jell_score
previous_rank = index
s.save()
self.stdout.write('Rank of {0} is {1} ({2})'.format(s.ucas_id, s.stored_rank, index))
index = index + 1
| gpl-2.0 | -6,135,829,524,808,457,000 | 31.486486 | 92 | 0.624792 | false | 3.454023 | false | false | false |
xiawei0000/Kinectforactiondetect | ChalearnLAPSample.py | 1 | 41779 | # coding=gbk
#-------------------------------------------------------------------------------
# Name: Chalearn LAP sample
# Purpose: Provide easy access to Chalearn LAP challenge data samples
#
# Author: Xavier Baro
#
# Created: 21/01/2014
# Copyright: (c) Xavier Baro 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
import zipfile
import shutil
import cv2
import numpy
import csv
from PIL import Image, ImageDraw
from scipy.misc import imresize
class Skeleton(object):
""" Class that represents the skeleton information """
"""¹Ç¼ÜÀ࣬ÊäÈë¹Ç¼ÜÊý¾Ý£¬½¨Á¢Àà"""
#define a class to encode skeleton data
def __init__(self,data):
""" Constructor. Reads skeleton information from given raw data """
# Create an object from raw data
self.joins=dict();
pos=0
self.joins['HipCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Spine']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Head']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
def getAllData(self):
""" Return a dictionary with all the information for each skeleton node """
return self.joins
def getWorldCoordinates(self):
""" Get World coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][0]
return skel
def getJoinOrientations(self):
""" Get orientations of all skeleton nodes """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][1]
return skel
def getPixelCoordinates(self):
""" Get Pixel coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][2]
return skel
def toImage(self,width,height,bgColor):
""" Create an image for the skeleton information """
SkeletonConnectionMap = (['HipCenter','Spine'],['Spine','ShoulderCenter'],['ShoulderCenter','Head'],['ShoulderCenter','ShoulderLeft'], \
['ShoulderLeft','ElbowLeft'],['ElbowLeft','WristLeft'],['WristLeft','HandLeft'],['ShoulderCenter','ShoulderRight'], \
['ShoulderRight','ElbowRight'],['ElbowRight','WristRight'],['WristRight','HandRight'],['HipCenter','HipRight'], \
['HipRight','KneeRight'],['KneeRight','AnkleRight'],['AnkleRight','FootRight'],['HipCenter','HipLeft'], \
['HipLeft','KneeLeft'],['KneeLeft','AnkleLeft'],['AnkleLeft','FootLeft'])
im = Image.new('RGB', (width, height), bgColor)
draw = ImageDraw.Draw(im)
for link in SkeletonConnectionMap:
p=self.getPixelCoordinates()[link[1]]
p.extend(self.getPixelCoordinates()[link[0]])
draw.line(p, fill=(255,0,0), width=5)
for node in self.getPixelCoordinates().keys():
p=self.getPixelCoordinates()[node]
r=5
draw.ellipse((p[0]-r,p[1]-r,p[0]+r,p[1]+r),fill=(0,0,255))
del draw
image = numpy.array(im)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
##ÊÖÊÆÊý¾ÝµÄÀ࣬ÊäÈë·¾¶£¬½¨Á¢ÊÖÊÆÊý¾ÝÀà
class GestureSample(object):
""" Class that allows to access all the information for a certain gesture database sample """
#define class to access gesture data samples
#³õʼ»¯£¬¶ÁÈ¡Îļþ
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=GestureSample('Sample0001.zip')
"""
# Check the given file
if not os.path.exists(fileName): #or not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
#ÅжÏÊÇzip»¹ÊÇĿ¼
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Open video access for Depth information
depthVideoPath=self.samplePath + os.path.sep + self.seqID + '_depth.mp4'
if not os.path.exists(depthVideoPath):
raise Exception("Invalid sample file. Depth data is not available")
self.depth = cv2.VideoCapture(depthVideoPath)
while not self.depth.isOpened():
self.depth = cv2.VideoCapture(depthVideoPath)
cv2.waitKey(500)
# Open video access for User segmentation information
userVideoPath=self.samplePath + os.path.sep + self.seqID + '_user.mp4'
if not os.path.exists(userVideoPath):
raise Exception("Invalid sample file. User segmentation data is not available")
self.user = cv2.VideoCapture(userVideoPath)
while not self.user.isOpened():
self.user = cv2.VideoCapture(userVideoPath)
cv2.waitKey(500)
# Read skeleton data
skeletonPath=self.samplePath + os.path.sep + self.seqID + '_skeleton.csv'
if not os.path.exists(skeletonPath):
raise Exception("Invalid sample file. Skeleton data is not available")
self.skeletons=[]
with open(skeletonPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.skeletons.append(Skeleton(row))
del filereader
# Read sample data
sampleDataPath=self.samplePath + os.path.sep + self.seqID + '_data.csv'
if not os.path.exists(sampleDataPath):
raise Exception("Invalid sample file. Sample data is not available")
self.data=dict()
with open(sampleDataPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.data['numFrames']=int(row[0])
self.data['fps']=int(row[1])
self.data['maxDepth']=int(row[2])
del filereader
# Read labels data
labelsPath=self.samplePath + os.path.sep + self.seqID + '_labels.csv'
if not os.path.exists(labelsPath):
#warnings.warn("Labels are not available", Warning)
self.labels=[]
else:
self.labels=[]
with open(labelsPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.labels.append(map(int,row))
del filereader
#Îö¹¹º¯Êý
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
del self.rgb;
del self.depth;
del self.user;
shutil.rmtree(self.samplePath)
#´ÓvideoÖжÁÈ¡Ò»Ö¡·µ»Ø
def getFrame(self,video, frameNum):
""" Get a single frame from given video object """
# Check frame number
# Get total number of frames
numFrames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
# Set the frame index
video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,frameNum-1)
ret,frame=video.read()
if ret==False:
raise Exception("Cannot read the frame")
return frame
#ÏÂÃæµÄº¯Êý¶¼ÊÇÕë¶ÔÊý¾Ý³ÉÔ±£¬µÄÌض¨Ö¡²Ù×÷µÄ
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
return self.getFrame(self.rgb,frameNum)
#·µ»ØÉî¶Èͼ£¬Ê¹ÓÃ16int±£´æµÄ
def getDepth(self, frameNum):
""" Get the depth image for the given frame """
#get Depth frame
depthData=self.getFrame(self.depth,frameNum)
# Convert to grayscale
depthGray=cv2.cvtColor(depthData,cv2.cv.CV_RGB2GRAY)
# Convert to float point
depth=depthGray.astype(numpy.float32)
# Convert to depth values
depth=depth/255.0*float(self.data['maxDepth'])
depth=depth.round()
depth=depth.astype(numpy.uint16)
return depth
def getUser(self, frameNum):
""" Get user segmentation image for the given frame """
#get user segmentation frame
return self.getFrame(self.user,frameNum)
def getSkeleton(self, frameNum):
""" Get the skeleton information for a given frame. It returns a Skeleton object """
#get user skeleton for a given frame
# Check frame number
# Get total number of frames
numFrames = len(self.skeletons)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
return self.skeletons[frameNum-1]
def getSkeletonImage(self, frameNum):
""" Create an image with the skeleton image for a given frame """
return self.getSkeleton(frameNum).toImage(640,480,(255,255,255))
def getNumFrames(self):
""" Get the number of frames for this sample """
return self.data['numFrames']
#½«ËùÓеÄÒ»Ö¡Êý¾Ý ´ò°üµ½Ò»¸ö´óµÄ¾ØÕóÀï
def getComposedFrame(self, frameNum):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
skel=self.getSkeletonImage(frameNum)
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize1=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
compSize2=(max(user.shape[0],skel.shape[0]),user.shape[1]+skel.shape[1])
comp = numpy.zeros((compSize1[0]+ compSize2[0],max(compSize1[1],compSize2[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]=depth
comp[compSize1[0]:compSize1[0]+user.shape[0],:user.shape[1],:]=user
comp[compSize1[0]:compSize1[0]+skel.shape[0],user.shape[1]:user.shape[1]+skel.shape[1],:]=skel
return comp
def getComposedFrameOverlapUser(self, frameNum):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
mask = numpy.mean(user, axis=2) > 150
mask = numpy.tile(mask, (3,1,1))
mask = mask.transpose((1,2,0))
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
comp = numpy.zeros((compSize[0]+ compSize[0],max(compSize[1],compSize[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]= depth
comp[compSize[0]:compSize[0]+user.shape[0],:user.shape[1],:]= mask * rgb
comp[compSize[0]:compSize[0]+user.shape[0],user.shape[1]:user.shape[1]+user.shape[1],:]= mask * depth
return comp
def getComposedFrame_480(self, frameNum, ratio=0.5, topCut=60, botCut=140):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
rgb = rgb[topCut:-topCut,botCut:-botCut,:]
rgb = imresize(rgb, ratio, interp='bilinear')
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
user = user[topCut:-topCut,botCut:-botCut,:]
user = imresize(user, ratio, interp='bilinear')
mask = numpy.mean(user, axis=2) > 150
mask = numpy.tile(mask, (3,1,1))
mask = mask.transpose((1,2,0))
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth[topCut:-topCut,botCut:-botCut]
depth = imresize(depth, ratio, interp='bilinear')
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
comp = numpy.zeros((compSize[0]+ compSize[0],max(compSize[1],compSize[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]= depth
comp[compSize[0]:compSize[0]+user.shape[0],:user.shape[1],:]= mask * rgb
comp[compSize[0]:compSize[0]+user.shape[0],user.shape[1]:user.shape[1]+user.shape[1],:]= mask * depth
return comp
def getDepth3DCNN(self, frameNum, ratio=0.5, topCut=60, botCut=140):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
user = user[topCut:-topCut,botCut:-botCut,:]
user = imresize(user, ratio, interp='bilinear')
mask = numpy.mean(user, axis=2) > 150
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth[topCut:-topCut,botCut:-botCut]
depth = imresize(depth, ratio, interp='bilinear')
depth = depth.astype(numpy.uint8)
return mask * depth
def getDepthOverlapUser(self, frameNum, x_centre, y_centre, pixel_value, extractedFrameSize=224, upshift = 0):
""" Get a composition of all the modalities for a given frame """
halfFrameSize = extractedFrameSize/2
user=self.getUser(frameNum)
mask = numpy.mean(user, axis=2) > 150
ratio = pixel_value/ 3000
# Build depth image
# get sample modalities
depthValues=self.getDepth(frameNum)
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
mask = imresize(mask, ratio, interp='nearest')
depth = imresize(depth, ratio, interp='bilinear')
depth_temp = depth * mask
depth_extracted = depth_temp[x_centre-halfFrameSize-upshift:x_centre+halfFrameSize-upshift, y_centre-halfFrameSize: y_centre+halfFrameSize]
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
depth_extracted = depth_extracted.round()
depth_extracted = depth_extracted.astype(numpy.uint8)
depth_extracted = cv2.applyColorMap(depth_extracted,cv2.COLORMAP_JET)
# Build final image
compSize=(depth.shape[0],depth.shape[1])
comp = numpy.zeros((compSize[0] + extractedFrameSize,compSize[1]+compSize[1],3), numpy.uint8)
# Create composition
comp[:depth.shape[0],:depth.shape[1],:]=depth
mask_new = numpy.tile(mask, (3,1,1))
mask_new = mask_new.transpose((1,2,0))
comp[:depth.shape[0],depth.shape[1]:depth.shape[1]+depth.shape[1],:]= mask_new * depth
comp[compSize[0]:,:extractedFrameSize,:]= depth_extracted
return comp
def getDepthCentroid(self, startFrame, endFrame):
""" Get a composition of all the modalities for a given frame """
x_centre = []
y_centre = []
pixel_value = []
for frameNum in range(startFrame, endFrame):
user=self.getUser(frameNum)
depthValues=self.getDepth(frameNum)
depth = depthValues.astype(numpy.float32)
#depth = depth*255.0/float(self.data['maxDepth'])
mask = numpy.mean(user, axis=2) > 150
width, height = mask.shape
XX, YY, count, pixel_sum = 0, 0, 0, 0
for x in range(width):
for y in range(height):
if mask[x, y]:
XX += x
YY += y
count += 1
pixel_sum += depth[x, y]
if count>0:
x_centre.append(XX/count)
y_centre.append(YY/count)
pixel_value.append(pixel_sum/count)
return [numpy.mean(x_centre), numpy.mean(y_centre), numpy.mean(pixel_value)]
def getGestures(self):
""" Get the list of gesture for this sample. Each row is a gesture, with the format (gestureID,startFrame,endFrame) """
return self.labels
def getGestureName(self,gestureID):
""" Get the gesture label from a given gesture ID """
names=('vattene','vieniqui','perfetto','furbo','cheduepalle','chevuoi','daccordo','seipazzo', \
'combinato','freganiente','ok','cosatifarei','basta','prendere','noncenepiu','fame','tantotempo', \
'buonissimo','messidaccordo','sonostufo')
# Check the given file
if gestureID<1 or gestureID>20:
raise Exception("Invalid gesture ID <" + str(gestureID) + ">. Valid IDs are values between 1 and 20")
return names[gestureID-1]
def exportPredictions(self, prediction,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
output_filename = os.path.join(predPath, self.seqID + '_prediction.csv')
output_file = open(output_filename, 'wb')
for row in prediction:
output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n")
output_file.close()
def play_video(self):
"""
play the video, Wudi adds this
"""
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while (self.rgb.isOpened()):
ret, frame = self.rgb.read()
cv2.imshow('frame',frame)
if cv2.waitKey(5) & 0xFF == ord('q'):
break
self.rgb.release()
cv2.destroyAllWindows()
def evaluate(self,csvpathpred):
""" Evaluate this sample agains the ground truth file """
maxGestures=11
seqLength=self.getNumFrames()
# Get the list of gestures from the ground truth and frame activation
predGestures = []
binvec_pred = numpy.zeros((maxGestures, seqLength))
gtGestures = []
binvec_gt = numpy.zeros((maxGestures, seqLength))
with open(csvpathpred, 'rb') as csvfilegt:
csvgt = csv.reader(csvfilegt)
for row in csvgt:
binvec_pred[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
predGestures.append(int(row[0]))
# Get the list of gestures from prediction and frame activation
for row in self.getActions():
binvec_gt[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
gtGestures.append(int(row[0]))
# Get the list of gestures without repetitions for ground truth and predicton
gtGestures = numpy.unique(gtGestures)
predGestures = numpy.unique(predGestures)
# Find false positives
falsePos=numpy.setdiff1d(gtGestures, numpy.union1d(gtGestures,predGestures))
# Get overlaps for each gesture
overlaps = []
for idx in gtGestures:
intersec = sum(binvec_gt[idx-1] * binvec_pred[idx-1])
aux = binvec_gt[idx-1] + binvec_pred[idx-1]
union = sum(aux > 0)
overlaps.append(intersec/union)
# Use real gestures and false positive gestures to calculate the final score
return sum(overlaps)/(len(overlaps)+len(falsePos))
def get_shift_scale(self, template, ref_depth, start_frame=10, end_frame=20, debug_show=False):
"""
Wudi add this method for extracting normalizing depth wrt Sample0003
"""
from skimage.feature import match_template
Feature_all = numpy.zeros(shape=(480, 640, end_frame-start_frame), dtype=numpy.uint16 )
count = 0
for frame_num in range(start_frame,end_frame):
depth_original = self.getDepth(frame_num)
mask = numpy.mean(self.getUser(frame_num), axis=2) > 150
Feature_all[:, :, count] = depth_original * mask
count += 1
depth_image = Feature_all.mean(axis = 2)
depth_image_normalized = depth_image * 1.0 / float(self.data['maxDepth'])
depth_image_normalized /= depth_image_normalized.max()
result = match_template(depth_image_normalized, template, pad_input=True)
#############plot
x, y = numpy.unravel_index(numpy.argmax(result), result.shape)
shift = [depth_image.shape[0]/2-x, depth_image.shape[1]/2-y]
subsize = 25 # we use 25 by 25 region as a measurement for median of distance
minX = max(x - subsize,0)
minY = max(y - subsize,0)
maxX = min(x + subsize,depth_image.shape[0])
maxY = min(y + subsize,depth_image.shape[1])
subregion = depth_image[minX:maxX, minY:maxY]
distance = numpy.median(subregion[subregion>0])
scaling = distance*1.0 / ref_depth
from matplotlib import pyplot as plt
print "[x, y, shift, distance, scaling]"
print str([x, y, shift, distance, scaling])
if debug_show:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(8, 4))
ax1.imshow(template)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(depth_image_normalized)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = template.shape
rect = plt.Rectangle((y-hcoin/2, x-wcoin/2), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
import cv2
from scipy.misc import imresize
rows,cols = depth_image_normalized.shape
M = numpy.float32([[1,0, shift[1]],[0,1, shift[0]]])
affine_image = cv2.warpAffine(depth_image_normalized, M, (cols, rows))
resize_image = imresize(affine_image, scaling)
resize_image_median = cv2.medianBlur(resize_image,5)
ax3.imshow(resize_image_median)
ax3.set_axis_off()
ax3.set_title('image_transformed')
# highlight matched region
hcoin, wcoin = resize_image_median.shape
rect = plt.Rectangle((wcoin/2-160, hcoin/2-160), 320, 320, edgecolor='r', facecolor='none')
ax3.add_patch(rect)
ax4.imshow(result)
ax4.set_axis_off()
ax4.set_title('`match_template`\nresult')
# highlight matched region
ax4.autoscale(False)
ax4.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
return [shift, scaling]
def get_shift_scale_depth(self, shift, scale, framenumber, IM_SZ, show_flag=False):
"""
Wudi added this method to extract segmented depth frame,
by a shift and scale
"""
depth_original = self.getDepth(framenumber)
mask = numpy.mean(self.getUser(framenumber), axis=2) > 150
resize_final_out = numpy.zeros((IM_SZ,IM_SZ))
if mask.sum() < 1000: # Kinect detect nothing
print "skip "+ str(framenumber)
flag = False
else:
flag = True
depth_user = depth_original * mask
depth_user_normalized = depth_user * 1.0 / float(self.data['maxDepth'])
depth_user_normalized = depth_user_normalized *255 /depth_user_normalized.max()
rows,cols = depth_user_normalized.shape
M = numpy.float32([[1,0, shift[1]],[0,1, shift[0]]])
affine_image = cv2.warpAffine(depth_user_normalized, M,(cols, rows))
resize_image = imresize(affine_image, scale)
resize_image_median = cv2.medianBlur(resize_image,5)
rows, cols = resize_image_median.shape
image_crop = resize_image_median[rows/2-160:rows/2+160, cols/2-160:cols/2+160]
resize_final_out = imresize(image_crop, (IM_SZ,IM_SZ))
if show_flag: # show the segmented images here
cv2.imshow('image',image_crop)
cv2.waitKey(10)
return [resize_final_out, flag]
#¶¯×÷Êý¾ÝÀà
class ActionSample(object):
""" Class that allows to access all the information for a certain action database sample """
#define class to access actions data samples
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=ActionSample('Sec01.zip')
"""
# Check the given file
if not os.path.exists(fileName) and not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Read sample data
sampleDataPath=self.samplePath + os.path.sep + self.seqID + '_data.csv'
if not os.path.exists(sampleDataPath):
raise Exception("Invalid sample file. Sample data is not available")
self.data=dict()
with open(sampleDataPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.data['numFrames']=int(row[0])
del filereader
# Read labels data
labelsPath=self.samplePath + os.path.sep + self.seqID + '_labels.csv'
self.labels=[]
if not os.path.exists(labelsPath):
warnings.warn("Labels are not available", Warning)
else:
with open(labelsPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.labels.append(map(int,row))
del filereader
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
del self.rgb;
shutil.rmtree(self.samplePath)
def getFrame(self,video, frameNum):
""" Get a single frame from given video object """
# Check frame number
# Get total number of frames
numFrames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
# Set the frame index
video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,frameNum-1)
ret,frame=video.read()
if ret==False:
raise Exception("Cannot read the frame")
return frame
def getNumFrames(self):
""" Get the number of frames for this sample """
return self.data['numFrames']
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
return self.getFrame(self.rgb,frameNum)
def getActions(self):
""" Get the list of gesture for this sample. Each row is an action, with the format (actionID,startFrame,endFrame) """
return self.labels
def getActionsName(self,actionID):
""" Get the action label from a given action ID """
names=('wave','point','clap','crouch','jump','walk','run','shake hands', \
'hug','kiss','fight')
# Check the given file
if actionID<1 or actionID>11:
raise Exception("Invalid action ID <" + str(actionID) + ">. Valid IDs are values between 1 and 11")
return names[actionID-1]
def exportPredictions(self, prediction,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
output_filename = os.path.join(predPath, self.seqID + '_prediction.csv')
output_file = open(output_filename, 'wb')
for row in prediction:
output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n")
output_file.close()
def evaluate(self,csvpathpred):
""" Evaluate this sample agains the ground truth file """
maxGestures=11
seqLength=self.getNumFrames()
# Get the list of gestures from the ground truth and frame activation
predGestures = []
binvec_pred = numpy.zeros((maxGestures, seqLength))
gtGestures = []
binvec_gt = numpy.zeros((maxGestures, seqLength))
with open(csvpathpred, 'rb') as csvfilegt:
csvgt = csv.reader(csvfilegt)
for row in csvgt:
binvec_pred[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
predGestures.append(int(row[0]))
# Get the list of gestures from prediction and frame activation
for row in self.getActions():
binvec_gt[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
gtGestures.append(int(row[0]))
# Get the list of gestures without repetitions for ground truth and predicton
gtGestures = numpy.unique(gtGestures)
predGestures = numpy.unique(predGestures)
# Find false positives
falsePos=numpy.setdiff1d(gtGestures, numpy.union1d(gtGestures,predGestures))
# Get overlaps for each gesture
overlaps = []
for idx in gtGestures:
intersec = sum(binvec_gt[idx-1] * binvec_pred[idx-1])
aux = binvec_gt[idx-1] + binvec_pred[idx-1]
union = sum(aux > 0)
overlaps.append(intersec/union)
# Use real gestures and false positive gestures to calculate the final score
return sum(overlaps)/(len(overlaps)+len(falsePos))
#×Ë̬Êý¾ÝÀà
class PoseSample(object):
""" Class that allows to access all the information for a certain pose database sample """
#define class to access gesture data samples
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=PoseSample('Seq01.zip')
"""
# Check the given file
if not os.path.exists(fileName) and not os.path.isfile(fileName):
raise Exception("Sequence path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath):
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Set path for rgb images
rgbPath=self.samplePath + os.path.sep + 'imagesjpg'+ os.path.sep
if not os.path.exists(rgbPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgbpath = rgbPath
# Set path for gt images
gtPath=self.samplePath + os.path.sep + 'maskspng'+ os.path.sep
if not os.path.exists(gtPath):
self.gtpath= "empty"
else:
self.gtpath = gtPath
frames=os.listdir(self.rgbpath)
self.numberFrames=len(frames)
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
shutil.rmtree(self.samplePath)
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
if frameNum>self.numberFrames:
raise Exception("Number of frame has to be less than: "+ self.numberFrames)
framepath=self.rgbpath+self.seqID[3:5]+'_'+ '%04d' %frameNum+'.jpg'
if not os.path.isfile(framepath):
raise Exception("RGB file does not exist: " + framepath)
return cv2.imread(framepath)
def getNumFrames(self):
return self.numberFrames
def getLimb(self, frameNum, actorID,limbID):
""" Get the BW limb image for a certain frame and a certain limbID """
if self.gtpath == "empty":
raise Exception("Limb labels are not available for this sequence. This sequence belong to the validation set.")
else:
limbpath=self.gtpath+self.seqID[3:5]+'_'+ '%04d' %frameNum+'_'+str(actorID)+'_'+str(limbID)+'.png'
if frameNum>self.numberFrames:
raise Exception("Number of frame has to be less than: "+ self.numberFrames)
if actorID<1 or actorID>2:
raise Exception("Invalid actor ID <" + str(actorID) + ">. Valid frames are values between 1 and 2 ")
if limbID<1 or limbID>14:
raise Exception("Invalid limb ID <" + str(limbID) + ">. Valid frames are values between 1 and 14")
return cv2.imread(limbpath,cv2.CV_LOAD_IMAGE_GRAYSCALE)
def getLimbsName(self,limbID):
""" Get the limb label from a given limb ID """
names=('head','torso','lhand','rhand','lforearm','rforearm','larm','rarm', \
'lfoot','rfoot','lleg','rleg','lthigh','rthigh')
# Check the given file
if limbID<1 or limbID>14:
raise Exception("Invalid limb ID <" + str(limbID) + ">. Valid IDs are values between 1 and 14")
return names[limbID-1]
def overlap_images(self, gtimage, predimage):
""" this function computes the hit measure of overlap between two binary images im1 and im2 """
[ret, im1] = cv2.threshold(gtimage, 127, 255, cv2.THRESH_BINARY)
[ret, im2] = cv2.threshold(predimage, 127, 255, cv2.THRESH_BINARY)
intersec = cv2.bitwise_and(im1, im2)
intersec_val = float(numpy.sum(intersec))
union = cv2.bitwise_or(im1, im2)
union_val = float(numpy.sum(union))
if union_val == 0:
return 0
else:
if float(intersec_val / union_val)>0.5:
return 1
else:
return 0
def exportPredictions(self, prediction,frame,actor,limb,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
prediction_filename = predPath+os.path.sep+ self.seqID[3:5] +'_'+ '%04d' %frame +'_'+str(actor)+'_'+str(limb)+'_prediction.png'
cv2.imwrite(prediction_filename,prediction)
def evaluate(self, predpath):
""" Evaluate this sample agains the ground truth file """
# Get the list of videos from ground truth
gt_list = os.listdir(self.gtpath)
# For each sample on the GT, search the given prediction
score = 0.0
nevals = 0
for gtlimbimage in gt_list:
# Avoid double check, use only labels file
if not gtlimbimage.lower().endswith(".png"):
continue
# Build paths for prediction and ground truth files
aux = gtlimbimage.split('.')
parts = aux[0].split('_')
seqID = parts[0]
gtlimbimagepath = os.path.join(self.gtpath,gtlimbimage)
predlimbimagepath= os.path.join(predpath) + os.path.sep + seqID+'_'+parts[1]+'_'+parts[2]+'_'+parts[3]+"_prediction.png"
#check predfile exists
if not os.path.exists(predlimbimagepath) or not os.path.isfile(predlimbimagepath):
raise Exception("Invalid video limb prediction file. Not all limb predictions are available")
#Load images
gtimage=cv2.imread(gtlimbimagepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
predimage=cv2.imread(predlimbimagepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
if cv2.cv.CountNonZero(cv2.cv.fromarray(gtimage)) >= 1:
score += self.overlap_images(gtimage, predimage)
nevals += 1
#release videos and return mean overlap
return score/nevals
| mit | 2,354,620,429,577,627,000 | 41.762538 | 150 | 0.599919 | false | 3.442568 | false | false | false |
icandigitbaby/openchange | script/bug-analysis/buganalysis/pkgshelper.py | 1 | 24843 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) Enrique J. Hernández 2014
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Helper methods to set the Package and Dependencies fields, if missing, from Apport crashes.
This is specific to Zentyal.
"""
from datetime import datetime
def map_package(report):
"""
Given a report, it will return the package and the version depending on the
DistroRelease and the ExecutableTimestamp fields specific from Zentyal repositories.
:param apport.report.Report report: the crash report
:returns: a tuple containing the package and the version of the package.
:rtype tuple:
"""
if 'DistroRelease' not in report or 'ExecutableTimestamp' not in report:
raise SystemError('No DistroRelease or ExecutableTimestamp to map the package')
distro_release = report['DistroRelease']
crash_date = datetime.fromtimestamp(int(report['ExecutableTimestamp']))
if distro_release == 'Ubuntu 14.04':
if crash_date >= datetime(2014, 5, 24, 1, 31): # Release date
return ('samba', '3:4.1.7+dfsg-2~zentyal2~64')
return ('samba', '3:4.1.7+dfsg-2~zentyal1~32')
elif distro_release == 'Ubuntu 13.10':
return ('samba', '2:4.1.6+dfsg-1~zentyal1~106')
elif distro_release == 'Ubuntu 12.04':
if crash_date < datetime(2013, 10, 2):
return ('samba4', '4.1.0rc3-zentyal3')
elif crash_date < datetime(2013, 12, 10, 13, 03):
return ('samba4', '4.1.0rc4-zentyal1')
elif crash_date < datetime(2013, 12, 17, 11, 34):
return ('samba4', '4.1.2-zentyal2')
elif crash_date < datetime(2014, 3, 5, 20, 16):
return ('samba4', '4.1.3-zentyal2')
elif crash_date < datetime(2014, 5, 30, 8, 41):
return ('samba4', '4.1.5-zentyal1')
else:
return ('samba4', '4.1.7-zentyal1')
else:
raise SystemError('Invalid Distro Release %s' % distro_release)
def map_dependencies(report):
"""
Given a report, it will return the dependencies from the package depending on the
DistroRelease fields specific from Zentyal repositories.
:param apport.report.Report report: the crash report
:returns: a list of the current dependencies packages
:rtype tuple:
"""
if 'DistroRelease' not in report:
raise SystemError('No DistroRelease to get the dependencies packages')
distro_release = report['DistroRelease']
if distro_release == 'Ubuntu 14.04':
return (
'adduser',
'apt-utils',
'attr',
'base-passwd',
'busybox-initramfs',
'ca-certificates',
'ckeditor',
'coreutils',
'cpio',
'cron',
'dbus',
'debconf',
'debconf-i18n',
'debianutils',
'dpkg',
'e2fslibs',
'e2fsprogs',
'file',
'findutils',
'gcc-4.8-base',
'gcc-4.9-base',
'gnustep-base-common',
'gnustep-base-runtime',
'gnustep-common',
'ifupdown',
'initramfs-tools',
'initramfs-tools-bin',
'initscripts',
'insserv',
'iproute2',
'isc-dhcp-client',
'isc-dhcp-common',
'javascript-common',
'klibc-utils',
'kmod',
'krb5-locales',
'libacl1',
'libaio1',
'libapache2-mod-wsgi',
'libapparmor1',
'libapt-inst1.5',
'libapt-pkg4.12',
'libarchive-extract-perl',
'libasn1-8-heimdal',
'libattr1',
'libaudit-common',
'libaudit1',
'libavahi-client3',
'libavahi-common-data',
'libavahi-common3',
'libblkid1',
'libbsd0',
'libbz2-1.0',
'libc6',
'libcap2',
'libcgmanager0',
'libcomerr2',
'libcups2',
'libcurl3-gnutls',
'libdb5.3',
'libdbus-1-3',
'libdebconfclient0',
'libdrm2',
'libevent-2.0-5',
'libexpat1',
'libffi6',
'libfile-copy-recursive-perl',
'libgcc1',
'libgcrypt11',
'libgdbm3',
'libglib2.0-0',
'libglib2.0-data',
'libgmp10',
'libgnustep-base1.24',
'libgnutls26',
'libgpg-error0',
'libgpm2',
'libgssapi-krb5-2',
'libgssapi3-heimdal',
'libhcrypto4-heimdal',
'libhdb9-heimdal',
'libheimbase1-heimdal',
'libheimntlm0-heimdal',
'libhx509-5-heimdal',
'libicu52',
'libidn11',
'libjs-jquery',
'libjs-jquery-ui',
'libjs-prototype',
'libjs-scriptaculous',
'libjs-sphinxdoc',
'libjs-swfobject',
'libjs-underscore',
'libjson-c2',
'libjson0',
'libk5crypto3',
'libkdc2-heimdal',
'libkeyutils1',
'libklibc',
'libkmod2',
'libkrb5-26-heimdal',
'libkrb5-3',
'libkrb5support0',
'liblasso3',
'libldap-2.4-2',
'libldb1',
'liblocale-gettext-perl',
'liblog-message-simple-perl',
'liblzma5',
'libmagic1',
'libmapi0',
'libmapiproxy0',
'libmapistore0',
'libmemcached10',
'libmodule-pluggable-perl',
'libmount1',
'libmysqlclient18',
'libncurses5',
'libncursesw5',
'libnih-dbus1',
'libnih1',
'libntdb1',
'libobjc4',
'libp11-kit0',
'libpam-modules',
'libpam-modules-bin',
'libpam-runtime',
'libpam-systemd',
'libpam0g',
'libpcre3',
'libplymouth2',
'libpng12-0',
'libpod-latex-perl',
'libpopt0',
'libpq5',
'libprocps3',
'libpython-stdlib',
'libpython2.7',
'libpython2.7-minimal',
'libpython2.7-stdlib',
'libreadline6',
'libroken18-heimdal',
'librtmp0',
'libsasl2-2',
'libsasl2-modules',
'libsasl2-modules-db',
'libsbjson2.3',
'libselinux1',
'libsemanage-common',
'libsemanage1',
'libsepol1',
'libslang2',
'libsope1',
'libsqlite3-0',
'libss2',
'libssl1.0.0',
'libstdc++6',
'libsystemd-daemon0',
'libsystemd-login0',
'libtalloc2',
'libtasn1-6',
'libtdb1',
'libterm-ui-perl',
'libtevent0',
'libtext-charwidth-perl',
'libtext-iconv-perl',
'libtext-soundex-perl',
'libtext-wrapi18n-perl',
'libtinfo5',
'libudev1',
'libustr-1.0-1',
'libuuid1',
'libwbclient0',
'libwind0-heimdal',
'libxml2',
'libxmlsec1',
'libxmlsec1-openssl',
'libxslt1.1',
'libxtables10',
'logrotate',
'lsb-base',
'makedev',
'memcached',
'mime-support',
'module-init-tools',
'mount',
'mountall',
'multiarch-support',
'mysql-common',
'netbase',
'openchange-ocsmanager',
'openchange-rpcproxy',
'openchangeproxy',
'openchangeserver',
'openssl',
'passwd',
'perl',
'perl-base',
'perl-modules',
'plymouth',
'plymouth-theme-ubuntu-text',
'procps',
'psmisc',
'python',
'python-beaker',
'python-bs4',
'python-chardet',
'python-crypto',
'python-decorator',
'python-dns',
'python-dnspython',
'python-formencode',
'python-ldb',
'python-lxml',
'python-mako',
'python-markupsafe',
'python-minimal',
'python-mysqldb',
'python-nose',
'python-ntdb',
'python-ocsmanager',
'python-openid',
'python-openssl',
'python-paste',
'python-pastedeploy',
'python-pastedeploy-tpl',
'python-pastescript',
'python-pkg-resources',
'python-pygments',
'python-pylons',
'python-repoze.lru',
'python-routes',
'python-rpclib',
'python-samba',
'python-scgi',
'python-setuptools',
'python-simplejson',
'python-six',
'python-spyne',
'python-sqlalchemy',
'python-sqlalchemy-ext',
'python-support',
'python-talloc',
'python-tdb',
'python-tempita',
'python-tz',
'python-waitress',
'python-weberror',
'python-webhelpers',
'python-webob',
'python-webtest',
'python2.7',
'python2.7-minimal',
'readline-common',
'samba',
'samba-common',
'samba-common-bin',
'samba-dsdb-modules',
'samba-libs',
'samba-vfs-modules',
'sed',
'sensible-utils',
'sgml-base',
'shared-mime-info',
'sogo',
'sogo-common',
'sogo-openchange',
'systemd-services',
'sysv-rc',
'sysvinit-utils',
'tar',
'tdb-tools',
'tmpreaper',
'tzdata',
'ucf',
'udev',
'unzip',
'update-inetd',
'upstart',
'util-linux',
'uuid-runtime',
'xml-core',
'zip',
'zlib1g'
)
elif distro_release == 'Ubuntu 13.10':
return (
'adduser',
'apt-utils',
'base-passwd',
'busybox-initramfs',
'ca-certificates',
'ckeditor',
'coreutils',
'cpio',
'cron',
'dbus',
'debconf',
'debconf-i18n',
'debianutils',
'dpkg',
'e2fslibs',
'e2fsprogs',
'file',
'findutils',
'gcc-4.8-base',
'gnustep-base-common',
'gnustep-base-runtime',
'gnustep-common',
'ifupdown',
'initramfs-tools',
'initramfs-tools-bin',
'initscripts',
'insserv',
'iproute2',
'isc-dhcp-client',
'isc-dhcp-common',
'klibc-utils',
'kmod',
'libacl1',
'libaio1',
'libapache2-mod-wsgi',
'libapparmor1',
'libapt-inst1.5',
'libapt-pkg4.12',
'libasn1-8-heimdal',
'libattr1',
'libaudit-common',
'libaudit1',
'libavahi-client3',
'libavahi-common-data',
'libavahi-common3',
'libblkid1',
'libbsd0',
'libbz2-1.0',
'libc6',
'libcap2',
'libclass-isa-perl',
'libcomerr2',
'libcups2',
'libcurl3-gnutls',
'libdb5.1',
'libdbus-1-3',
'libdrm2',
'libevent-2.0-5',
'libexpat1',
'libffi6',
'libfile-copy-recursive-perl',
'libgcc1',
'libgcrypt11',
'libgdbm3',
'libglib2.0-0',
'libgmp10',
'libgnustep-base1.24',
'libgnutls26',
'libgpg-error0',
'libgssapi-krb5-2',
'libgssapi3-heimdal',
'libhcrypto4-heimdal',
'libhdb9-heimdal',
'libheimbase1-heimdal',
'libheimntlm0-heimdal',
'libhx509-5-heimdal',
'libicu48',
'libidn11',
'libjs-jquery',
'libjs-jquery-ui',
'libjs-prototype',
'libjs-scriptaculous',
'libjs-sphinxdoc',
'libjs-underscore',
'libjson-c2',
'libjson0',
'libk5crypto3',
'libkdc2-heimdal',
'libkeyutils1',
'libklibc',
'libkmod2',
'libkrb5-26-heimdal',
'libkrb5-3',
'libkrb5support0',
'liblasso3',
'libldap-2.4-2',
'libldb1',
'liblocale-gettext-perl',
'liblzma5',
'libmagic1',
'libmapi0',
'libmapiproxy0',
'libmapistore0',
'libmemcached10',
'libmount1',
'libmysqlclient18',
'libncurses5',
'libncursesw5',
'libnih-dbus1',
'libnih1',
'libntdb1',
'libobjc4',
'libp11-kit0',
'libpam-modules',
'libpam-modules-bin',
'libpam-runtime',
'libpam-systemd',
'libpam0g',
'libpci3',
'libpcre3',
'libplymouth2',
'libpng12-0',
'libpopt0',
'libpq5',
'libprocps0',
'libpython-stdlib',
'libpython2.7',
'libpython2.7-minimal',
'libpython2.7-stdlib',
'libreadline6',
'libroken18-heimdal',
'librtmp0',
'libsasl2-2',
'libsasl2-modules',
'libsasl2-modules-db',
'libsbjson2.3',
'libselinux1',
'libsemanage-common',
'libsemanage1',
'libsepol1',
'libslang2',
'libsope1',
'libsqlite3-0',
'libss2',
'libssl1.0.0',
'libstdc++6',
'libswitch-perl',
'libsystemd-daemon0',
'libsystemd-login0',
'libtalloc2',
'libtasn1-3',
'libtdb1',
'libtevent0',
'libtext-charwidth-perl',
'libtext-iconv-perl',
'libtext-wrapi18n-perl',
'libtinfo5',
'libudev1',
'libusb-1.0-0',
'libustr-1.0-1',
'libuuid1',
'libwbclient0',
'libwind0-heimdal',
'libxml2',
'libxmlsec1',
'libxmlsec1-openssl',
'libxslt1.1',
'libxtables10',
'logrotate',
'lsb-base',
'makedev',
'memcached',
'mime-support',
'module-init-tools',
'mount',
'mountall',
'multiarch-support',
'mysql-common',
'netbase',
'openchange-ocsmanager',
'openchange-rpcproxy',
'openchangeproxy',
'openchangeserver',
'openssl',
'passwd',
'pciutils',
'perl',
'perl-base',
'perl-modules',
'plymouth',
'plymouth-theme-ubuntu-text',
'procps',
'psmisc',
'python',
'python-beaker',
'python-chardet',
'python-crypto',
'python-decorator',
'python-dnspython',
'python-formencode',
'python-ldb',
'python-lxml',
'python-mako',
'python-mapistore',
'python-markupsafe',
'python-minimal',
'python-mysqldb',
'python-nose',
'python-ntdb',
'python-ocsmanager',
'python-openssl',
'python-paste',
'python-pastedeploy',
'python-pastescript',
'python-pkg-resources',
'python-pygments',
'python-pylons',
'python-repoze.lru',
'python-routes',
'python-rpclib',
'python-samba',
'python-setuptools',
'python-simplejson',
'python-spyne',
'python-support',
'python-talloc',
'python-tdb',
'python-tempita',
'python-tz',
'python-weberror',
'python-webhelpers',
'python-webob',
'python-webtest',
'python2.7',
'python2.7-minimal',
'readline-common',
'samba',
'samba-common',
'samba-common-bin',
'samba-dsdb-modules',
'samba-libs',
'samba-vfs-modules',
'sed',
'sensible-utils',
'sgml-base',
'shared-mime-info',
'sogo',
'sogo-common',
'sogo-openchange',
'systemd-services',
'sysv-rc',
'sysvinit-utils',
'tar',
'tdb-tools',
'tmpreaper',
'tzdata',
'ucf',
'udev',
'update-inetd',
'upstart',
'usbutils',
'util-linux',
'xml-core',
'zip',
'zlib1g'
)
elif distro_release == 'Ubuntu 12.04':
return (
'adduser',
'apache2',
'apache2-utils',
'apache2.2-bin',
'apache2.2-common',
'autotools-dev',
'base-passwd',
'bind9-host',
'binutils',
'busybox-initramfs',
'ca-certificates',
'coreutils',
'cpio',
'cpp-4.6',
'debconf',
'debianutils',
'dnsutils',
'dpkg',
'findutils',
'gcc-4.6',
'gcc-4.6-base',
'gnustep-base-common',
'gnustep-base-runtime',
'gnustep-common',
'gnustep-make',
'gobjc-4.6',
'ifupdown',
'initramfs-tools',
'initramfs-tools-bin',
'initscripts',
'insserv',
'iproute',
'klibc-utils',
'libacl1',
'libapache2-mod-wsgi',
'libapr1',
'libaprutil1',
'libaprutil1-dbd-sqlite3',
'libaprutil1-ldap',
'libasn1-8-heimdal',
'libattr1',
'libavahi-client3',
'libavahi-common-data',
'libavahi-common3',
'libbind9-80',
'libblkid1',
'libbsd0',
'libbz2-1.0',
'libc-bin',
'libc-dev-bin',
'libc6',
'libc6-dev',
'libcap2',
'libclass-isa-perl',
'libcomerr2',
'libcups2',
'libcurl3',
'libdb5.1',
'libdbus-1-3',
'libdm0',
'libdns81',
'libdrm-intel1',
'libdrm-nouveau1a',
'libdrm-radeon1',
'libdrm2',
'libevent-2.0-5',
'libexpat1',
'libffi6',
'libgcc1',
'libgcrypt11',
'libgdbm3',
'libgeoip1',
'libglib2.0-0',
'libgmp10',
'libgnustep-base1.22',
'libgnutls26',
'libgomp1',
'libgpg-error0',
'libgssapi-krb5-2',
'libgssapi3-heimdal',
'libhcrypto4-heimdal',
'libheimbase1-heimdal',
'libheimntlm0-heimdal',
'libhx509-5-heimdal',
'libicu48',
'libidn11',
'libisc83',
'libisccc80',
'libisccfg82',
'libjs-prototype',
'libjs-scriptaculous',
'libk5crypto3',
'libkeyutils1',
'libklibc',
'libkrb5-26-heimdal',
'libkrb5-3',
'libkrb5support0',
'libldap-2.4-2',
'liblwres80',
'liblzma5',
'libmapi0',
'libmapiproxy0',
'libmapistore0',
'libmemcached6',
'libmount1',
'libmpc2',
'libmpfr4',
'libmysqlclient18',
'libncurses5',
'libncursesw5',
'libnih-dbus1',
'libnih1',
'libobjc3',
'libp11-kit0',
'libpam-modules',
'libpam-modules-bin',
'libpam0g',
'libpciaccess0',
'libpcre3',
'libplymouth2',
'libpng12-0',
'libpython2.7',
'libquadmath0',
'libreadline6',
'libroken18-heimdal',
'librtmp0',
'libsasl2-2',
'libsbjson2.3',
'libselinux1',
'libslang2',
'libsope-appserver4.9',
'libsope-core4.9',
'libsope-gdl1-4.9',
'libsope-ldap4.9',
'libsope-mime4.9',
'libsope-xml4.9',
'libsqlite3-0',
'libssl1.0.0',
'libstdc++6',
'libswitch-perl',
'libtasn1-3',
'libtinfo5',
'libudev0',
'libuuid1',
'libwind0-heimdal',
'libxml2',
'libxslt1.1',
'linux-libc-dev',
'lsb-base',
'makedev',
'memcached',
'mime-support',
'module-init-tools',
'mount',
'mountall',
'multiarch-support',
'mysql-common',
'ncurses-bin',
'openchange-ocsmanager',
'openchange-rpcproxy',
'openchangeproxy',
'openchangeserver',
'openssl',
'passwd',
'perl',
'perl-base',
'perl-modules',
'plymouth',
'procps',
'python',
'python-beaker',
'python-decorator',
'python-dnspython',
'python-formencode',
'python-lxml',
'python-mako',
'python-mapistore',
'python-markupsafe',
'python-minimal',
'python-mysqldb',
'python-nose',
'python-ocsmanager',
'python-paste',
'python-pastedeploy',
'python-pastescript',
'python-pkg-resources',
'python-pygments',
'python-pylons',
'python-routes',
'python-rpclib',
'python-setuptools',
'python-simplejson',
'python-spyne',
'python-support',
'python-tempita',
'python-tz',
'python-weberror',
'python-webhelpers',
'python-webob',
'python-webtest',
'python2.7',
'python2.7-minimal',
'readline-common',
'samba4',
'sed',
'sensible-utils',
'sgml-base',
'sogo',
'sogo-openchange',
'sope4.9-libxmlsaxdriver',
'sysv-rc',
'sysvinit-utils',
'tar',
'tmpreaper',
'tzdata',
'udev',
'upstart',
'util-linux',
'xml-core',
'xz-utils',
'zlib1g'
)
else:
raise SystemError('Invalid Distro Release %s' % distro_release)
| gpl-3.0 | 6,213,581,533,280,807,000 | 27.987165 | 91 | 0.434506 | false | 3.758814 | false | false | false |
RossMcKenzie/ACJ | ACJ.py | 1 | 20954 | from __future__ import division
import random
import os
import numpy as np
import pickle
import datetime
import json
class Decision(object):
def __init__(self, pair, result, reviewer, time):
self.pair = pair
self.result = result
self.reviewer = reviewer
self.time = time
def dict(self):
return {'Pair':[str(self.pair[0]),str(self.pair[1])], 'Result':str(self.result), 'reviewer':str(self.reviewer), 'time':str(self.time)}
def ACJ(data, maxRounds, noOfChoices = 1, logPath = None, optionNames = ["Choice"]):
if noOfChoices < 2:
return UniACJ(data, maxRounds, logPath, optionNames)
else:
return MultiACJ(data, maxRounds, noOfChoices, logPath, optionNames)
class MultiACJ(object):
'''Holds multiple ACJ objects for running comparisons with multiple choices.
The first element of the list of acj objects keeps track of the used pairs.'''
def __init__(self, data, maxRounds, noOfChoices, logPath = None, optionNames = None):
self.data = list(data)
self.n = len(data)
self.round = 0
self.step = 0
self.noOfChoices = noOfChoices
self.acjs = [ACJ(data, maxRounds) for _ in range(noOfChoices)]
self.logPath = logPath
if optionNames == None:
self.optionNames = [str(i) for i in range(noOfChoices)]
else:
self.optionNames = optionNames
self.nextRound()
def getScript(self, ID):
'''Gets script with ID'''
return self.acjs[0].getScript(ID)
def getID(self, script):
'''Gets ID of script'''
return self.acjs[0].getID(script)
def infoPairs(self):
'''Returns pairs based on summed selection arrays from Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
pairs = []
#Create
sA = np.zeros((self.n, self.n))
for acj in self.acjs:
sA = sA+acj.selectionArray()
while(np.max(sA)>0):
iA, iB = np.unravel_index(sA.argmax(), sA.shape)
pairs.append([self.data[iA], self.data[iB]])
sA[iA,:] = 0
sA[iB,:] = 0
sA[:,iA] = 0
sA[:,iB] = 0
return pairs
def nextRound(self):
'''Returns next round of pairs'''
roundList = self.infoPairs()
for acj in self.acjs:
acj.nextRound(roundList)
acj.step = 0
self.round = self.acjs[0].round
self.step = self.acjs[0].step
return self.acjs[0].roundList
def nextPair(self):
'''gets next pair from main acj'''
p = self.acjs[0].nextPair(startNext=False)
if p == -1:
if self.nextRound() != None:
p = self.acjs[0].nextPair(startNext=False)
else:
return None
self.step = self.acjs[0].step
return p
def nextIDPair(self):
'''Gets ID of next pair'''
pair = self.nextPair()
if pair == None:
return None
idPair = []
for p in pair:
idPair.append(self.getID(p))
return idPair
def WMS(self):
ret = []
for acj in self.acjs:
ret.append(acj.WMS())
return ret
def comp(self, pair, result = None, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins'''
if result == None:
result = [True for _ in range(self.noOfChoices)]
if self.noOfChoices != len(result):
raise StandardError('Results list needs to be noOfChoices in length')
for i in range(self.noOfChoices):
self.acjs[i].comp(pair, result[i], update, reviewer, time)
if self.logPath != None:
self.log(self.logPath, pair, result, reviewer, time)
def IDComp(self, idPair, result = None, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins. Uses IDs'''
pair = []
for p in idPair:
pair.append(self.getScript(p))
self.comp(pair, result, update, reviewer, time)
def rankings(self, value=True):
'''Returns current rankings
Default is by value but score can be used'''
rank = []
for acj in self.acjs:
rank.append(acj.rankings(value))
return rank
def reliability(self):
'''Calculates reliability'''
rel = []
for acj in self.acjs:
rel.append(acj.reliability()[0])
return rel
def log(self, path, pair, result, reviewer = 'Unknown', time = 0):
'''Writes out a log of a comparison'''
timestamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S_%f')
with open(path+os.sep+str(reviewer)+timestamp+".log", 'w+') as file:
file.write("Reviewer:%s\n" % str(reviewer))
file.write("A:%s\n" % str(pair[0]))
file.write("B:%s\n" % str(pair[1]))
for i in range(len(result)):
file.write("Winner of %s:%s\n" %(self.optionNames[i], "A" if result[i] else "B"))
file.write("Time:%s\n" % str(time))
def JSONLog(self):
'''Write acjs states to JSON files'''
for acj in self.acjs:
acj.JSONLog()
def percentReturned(self):
return self.acjs[0].percentReturned()
def results(self):
'''Prints a list of scripts and thier value scaled between 0 and 100'''
rank = []
for r in self.rankings():
rank.append(list(zip(r[0], (r[1]-r[1].min())*100/(r[1].max()-r[1].min()))))
return rank
def decisionCount(self, reviewer):
return self.acjs[0].decisionCount(reviewer)
class UniACJ(object):
'''Base object to hold comparison data and run algorithm
script is used to refer to anything that is being ranked with ACJ
Dat is an array to hold the scripts with rows being [id, script, score, quality, trials]
Track is an array with each value representing number of times a winner (dim 0) has beaten the loser (dim 1)
Decisions keeps track of all the descisions madein descision objects
'''
def __init__(self, data, maxRounds, logPath = None, optionNames = None):
self.reviewers = []
self.optionNames = optionNames
self.noOfChoices = 1
self.round = 0
self.maxRounds = maxRounds
self.update = False
self.data = list(data)
self.dat = np.zeros((5, len(data)))
self.dat[0] = np.asarray(range(len(data)))
#self.dat[1] = np.asarray(data)
#self.dat[2] = np.zeros(len(data), dtype=float)
#self.dat[3] = np.zeros(len(data), dtype=float)
#self.dat[4] = np.zeros(len(data), dtype=float)
self.track = np.zeros((len(data), len(data)))
self.n = len(data)
self.swis = 5
self.roundList = []
self.step = -1
self.decay = 1
self.returned = []
self.logPath = logPath
self.decisions = []
def nextRound(self, extRoundList = None):
'''Returns next round of pairs'''
print("Hello")
self.round = self.round+1
self.step = 0
if self.round > self.maxRounds:
self.maxRounds = self.round
#print(self.round)
if self.round > 1:
self.updateAll()
if extRoundList == None:
self.roundList = self.infoPairs()
else:
self.roundList = extRoundList
self.returned = [False for i in range(len(self.roundList))]
return self.roundList
def polittNextRound(self):
self.round = self.round+1
if self.round > self.maxRounds:
self.roundList = None
elif self.round<2:
self.roundList = self.randomPairs()
elif self.round<2+self.swis:
self.updateAll()
self.roundList = self.scorePairs()
else:
#if self.round == 1+swis:
#self.dat[3] = (1/self.dat[1].size)*self.dat[2][:]
self.updateAll()
self.roundList = self.valuePairs()
return self.roundList
#return self.scorePairs()
def getID(self, script):
'''Gets ID of script'''
return self.data.index(script)
def getScript(self, ID):
'''Gets script with ID'''
return self.data[ID]
def nextPair(self, startNext = True):
'''Returns next pair. Will start new rounds automatically if startNext is true'''
self.step = self.step + 1
if self.step >= len(self.roundList):
if all(self.returned):
if (startNext):
self.nextRound()
#self.polittNextRound()
if self.roundList == None or self.roundList == []:
return None
else:
return -1
else:
o = [p for p in self.roundList if not self.returned[self.roundList.index(p)]]
return random.choice(o)
return self.roundList[self.step]
def nextIDPair(self, startNext = True):
'''Returns ID of next pair'''
pair = self.nextPair()
if pair == None:
return None
idPair = []
for p in pair:
idPair.append(self.getID(p))
return idPair
def singleProb(self, iA, iB):
prob = np.exp(self.dat[3][iA]-self.dat[3][iB])/(1+np.exp(self.dat[3][iA]-self.dat[3][iB]))
return prob
def prob(self, iA):
'''Returns a numpy array of the probability of A beating other values
Based on the Bradley-Terry-Luce model (Bradley and Terry 1952; Luce 1959)'''
probs = np.exp(self.dat[3][iA]-self.dat[3])/(1+np.exp(self.dat[3][iA]-self.dat[3]))
return probs
def fullProb(self):
'''Returns a 2D array of all probabilities of x beating y'''
pr = np.zeros((self.n, self.n))
for i in range(self.n):
pr[i] = self.dat[3][i]
return np.exp(pr-self.dat[3])/(1+np.exp(pr-self.dat[3]))
def fisher(self):
'''returns fisher info array'''
prob = self.fullProb()
return ((prob**2)*(1-prob)**2)+((prob.T**2)*(1-prob.T)**2)
def selectionArray(self):
'''Returns a selection array based on Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
F = self.fisher()*np.logical_not(np.identity(self.n))
ran = np.random.rand(self.n, self.n)*np.max(F)
a = 0
b = 0
#Create array from fisher mixed with noise
for i in range(1, self.round+1):
a = a + (i-1)**self.decay
for i in range(1, self.maxRounds+1):
b = b + (i-1)**self.decay
W = a/b
S = ((1-W)*ran)+(W*F)
#Remove i=j and already compared scripts
return S*np.logical_not(np.identity(self.n))*np.logical_not(self.track+self.track.T)
def updateValue(self, iA):
'''Updates the value of script A using Newton's Method'''
scoreA = self.dat[2][iA]
valA = self.dat[3][iA]
probA = self.prob(iA)
x = np.sum(probA)-0.5#Subtract where i = a
y = np.sum(probA*(1-probA))-0.25#Subtract where i = a
if x == 0:
exit()
#print(self.dat[3])
return self.dat[3][iA]+((self.dat[2][iA]-x)/y)
#print(self.dat[3][iA])
#print("--------")
def updateAll(self):
'''Updates the value of all scripts using Newton's Method'''
newDat = np.zeros(self.dat[3].size)
for i in self.dat[0]:
newDat[i] = self.updateValue(i)
self.dat[3] = newDat[:]
def randomPairs(self, dat = None):
'''Returns a list of random pairs from dat'''
if dat == None:
dat = self.data
shufDat = np.array(dat, copy=True)
ranPairs = []
while len(shufDat)>1:
a = shufDat[0]
b = shufDat[1]
shufDat = shufDat[2:]
ranPairs.append([a,b])
return ranPairs
def scorePairs(self, dat = None, scores = None):
'''Returns random pairs with matching scores or close if no match'''
if dat == None:
dat = self.dat
shuf = np.array(dat[:3], copy=True)
np.random.shuffle(shuf.T)
shuf.T
shuf = shuf[:, np.argsort(shuf[2])]
pairs = []
i = 0
#Pairs matching scores
while i<(shuf[0].size-1):
aID = shuf[0][i]
bID = shuf[0][i+1]
if (self.track[aID][bID]+self.track[bID][aID])==0 and shuf[2][i]==shuf[2][i+1]:
pairs.append([self.data[shuf[0][i]], self.data[shuf[0][i+1]]])
shuf = np.delete(shuf, [i, i+1], 1)
else:
i = i+1
#Add on closest score couplings of unmatched scores
i = 0
while i<shuf[0].size-1:
aID = shuf[0][i]
j = i+1
while j<shuf[0].size:
bID = shuf[0][j]
if (self.track[aID][bID]+self.track[bID][aID])==0:
pairs.append([self.data[shuf[0][i]], self.data[shuf[0][j]]])
shuf = np.delete(shuf, [i, j], 1)
break
else:
j = j+1
if j == shuf[0].size:
i = i+1
return pairs
def valuePairs(self):
'''Returns pairs matched by close values Politt(2012)'''
shuf = np.array(self.dat, copy=True)#Transpose to shuffle columns rather than rows
np.random.shuffle(shuf.T)
shuf.T
pairs = []
i = 0
while i<shuf[0].size-1:
aID = shuf[0][i]
newShuf = shuf[:, np.argsort(np.abs(shuf[3] - shuf[3][i]))]
j = 0
while j<newShuf[0].size:
bID = newShuf[0][j]
if (self.track[aID][bID]+self.track[bID][aID])==0 and self.data[aID]!=self.data[bID]:
pairs.append([self.data[shuf[0][i]], self.data[newShuf[0][j]]])
iJ = np.where(shuf[0]==newShuf[0][j])[0][0]
shuf = np.delete(shuf, [i, iJ], 1)
break
else:
j = j+1
if j == shuf[0].size:
i = i+1
return pairs
def infoPairs(self):
'''Returns pairs based on selection array from Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
pairs = []
#Create
sA = self.selectionArray()
while(np.max(sA)>0):
iA, iB = np.unravel_index(sA.argmax(), sA.shape)
pairs.append([self.data[iA], self.data[iB]])
sA[iA,:] = 0
sA[iB,:] = 0
sA[:,iA] = 0
sA[:,iB] = 0
return pairs
def rmse(self):
'''Calculate rmse'''
prob = self.fullProb()
y = 1/np.sqrt(np.sum(prob*(1-prob), axis=1)-0.25)
return np.sqrt(np.mean(np.square(y)))
def trueSD(self):
'''Calculate true standard deviation'''
sd = np.std(self.dat[3])
return ((sd**2)/(self.rmse()**2))**(0.5)
def reliability(self):
'''Calculates reliability'''
G = self.trueSD()/self.rmse()
return [(G**2)/(1+(G**2))]
def SR(self, pair, result):
'''Calculates the Squared Residual and weight of a decision'''
p = [self.getID(a) for a in pair]
if result:
prob = self.singleProb(p[0], p[1])
else:
prob = self.singleProb(p[1], p[0])
res = 1-prob
weight = prob*(1-prob)
SR = (res**2)
return SR, weight
def addDecision(self, pair, result, reviewer, time = 0):
'''Adds an SSR to the SSR array'''
self.decisions.append(Decision(pair, result,reviewer, time))
def revID(self, reviewer):
return self.reviewers.index(reviewer)
def WMS(self, decisions = None):
'''Builds data lists:
[reviewer] [sum of SR, sum of weights]
and uses it to make dict reviewer: WMS
WMS = Sum SR/Sum weights
also returns mean and std div'''
if decisions == None:
decisions = self.decisions
self.reviewers = []
SRs = []
weights = []
for dec in decisions:
if dec.reviewer not in self.reviewers:
self.reviewers.append(dec.reviewer)
SRs.append(0)
weights.append(0)
SR, weight = self.SR(dec.pair, dec.result)
revID = self.reviewers.index(dec.reviewer)
SRs[revID] = SRs[revID] + SR
weights[revID] = weights[revID] + weight
WMSs = []
WMSDict = {}
for i in range(len(self.reviewers)):
WMS = SRs[i]/weights[i]
WMSs.append(WMS)
WMSDict[self.reviewers[i]]=WMS
return WMSDict, np.mean(WMSs), np.std(WMSs)
def comp(self, pair, result = True, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins'''
self.addDecision(pair, result, reviewer, time)
if pair[::-1] in self.roundList:
pair = pair[::-1]
result = not result
if pair in self.roundList:
self.returned[self.roundList.index(pair)] = True
a = pair[0]
b = pair[1]
if update == None:
update = self.update
iA = self.data.index(a)
iB = self.data.index(b)
if result:
self.track[iA,iB] = 1
self.track[iB,iA] = 0
else:
self.track[iA,iB] = 0
self.track[iB,iA] = 1
self.dat[2,iA] = np.sum(self.track[iA,:])
self.dat[2,iB] = np.sum(self.track[iB,:])
self.dat[4,iA] = self.dat[4][iA]+1
self.dat[4,iB] = self.dat[4][iB]+1
if self.logPath != None:
self.log(self.logPath, pair, result, reviewer, time)
def IDComp(self, idPair, result = True, update = None, reviewer = 'Unknown', time=0):
'''Adds in a result between a and b where true is a wins and False is b wins, Uses IDs'''
pair = []
for p in idPair:
pair.append(self.getScript(p))
self.comp(pair, result, update, reviewer, time)
def percentReturned(self):
if len(self.returned) == 0:
return 0
return (sum(self.returned)/len(self.returned))*100
def log(self, path, pair, result, reviewer = 'Unknown', time = 0):
'''Writes out a log of a comparison'''
timestamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S_%f')
with open(path+os.sep+str(reviewer)+timestamp+".log", 'w+') as file:
file.write("Reviewer:%s\n" % str(reviewer))
file.write("A:%s\n" % str(pair[0]))
file.write("B:%s\n" % str(pair[1]))
file.write("Winner:%s\n" %("A" if result else "B"))
file.write("Time:%s\n" % str(time))
def JSONLog(self, path = None):
'''Writes out a JSON containing data from ACJ'''
if path == None:
path = self.logPath
choice = self.optionNames[0].replace(" ", "_")
ACJDict = {"Criteria":choice, "Scripts":self.scriptDict(), "Reviewers":self.reviewerDict(), "Decisions":self.decisionList()}
with open(path+os.sep+"ACJ_"+choice+".json", 'w+') as file:
json.dump(ACJDict, file, indent=4)
def decisionCount(self, reviewer):
c = 0
for dec in self.decisions:
if (dec.reviewer == reviewer):
c = c + 1
return c
def reviewerDict(self):
revs = {}
WMSs, _, _ = self.WMS()
for rev in self.reviewers:
revDict = {'decisions':self.decisionCount(rev), 'WMS':WMSs[rev]}
revs[str(rev)]= revDict
print(len(revs))
return revs
def scriptDict(self):
scr = {}
r = self.results()[0]
for i in range(len(r)):
scrDict = {"Score":r[i][1]}
scr[str(r[i][0])] = scrDict
return scr
def decisionList(self):
dec = []
for d in self.decisions:
dec.append(d.dict())
return dec
def rankings(self, value=True):
'''Returns current rankings
Default is by value but score can be used'''
if value:
return [np.asarray(self.data)[np.argsort(self.dat[3])], self.dat[3][np.argsort(self.dat[3])]]
else:
return self.data[np.argsort(self.dat[2])]
def results(self):
'''Prints a list of scripts and thier value scaled between 0 and 100'''
r = self.rankings()
rank = list(zip(r[0], (r[1]-r[1].min())*100/(r[1].max()-r[1].min())))
return [rank]
| mit | -3,679,451,613,960,517,600 | 34.818803 | 142 | 0.537654 | false | 3.427777 | false | false | false |
kbussell/pydocusign | pydocusign/client.py | 1 | 20977 | """DocuSign client."""
from collections import namedtuple
import base64
import json
import logging
import os
import warnings
import requests
from pydocusign import exceptions
logger = logging.getLogger(__name__)
Response = namedtuple('Response', ['status_code', 'text'])
class DocuSignClient(object):
"""DocuSign client."""
def __init__(self,
root_url='',
username='',
password='',
integrator_key='',
account_id='',
account_url='',
app_token=None,
oauth2_token=None,
timeout=None):
"""Configure DocuSign client."""
#: Root URL of DocuSign API.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_ROOT_URL``
#: environment variable, if available, is used.
self.root_url = root_url
if not self.root_url:
self.root_url = os.environ.get('DOCUSIGN_ROOT_URL', '')
#: API username.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_USERNAME``
#: environment variable, if available, is used.
self.username = username
if not self.username:
self.username = os.environ.get('DOCUSIGN_USERNAME', '')
#: API password.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_PASSWORD``
#: environment variable, if available, is used.
self.password = password
if not self.password:
self.password = os.environ.get('DOCUSIGN_PASSWORD', '')
#: API integrator key.
#:
#: If not explicitely provided or empty, then
#: ``DOCUSIGN_INTEGRATOR_KEY`` environment variable, if available, is
#: used.
self.integrator_key = integrator_key
if not self.integrator_key:
self.integrator_key = os.environ.get('DOCUSIGN_INTEGRATOR_KEY',
'')
#: API account ID.
#: This attribute can be guessed via :meth:`login_information`.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_ACCOUNT_ID``
#: environment variable, if available, is used.
self.account_id = account_id
if not self.account_id:
self.account_id = os.environ.get('DOCUSIGN_ACCOUNT_ID', '')
#: API AppToken.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_APP_TOKEN``
#: environment variable, if available, is used.
self.app_token = app_token
if not self.app_token:
self.app_token = os.environ.get('DOCUSIGN_APP_TOKEN', '')
#: OAuth2 Token.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_OAUTH2_TOKEN``
#: environment variable, if available, is used.
self.oauth2_token = oauth2_token
if not self.oauth2_token:
self.oauth2_token = os.environ.get('DOCUSIGN_OAUTH2_TOKEN', '')
#: User's URL, i.e. the one mentioning :attr:`account_id`.
#: This attribute can be guessed via :meth:`login_information`.
self.account_url = account_url
if self.root_url and self.account_id and not self.account_url:
self.account_url = '{root}/accounts/{account}'.format(
root=self.root_url,
account=self.account_id)
# Connection timeout.
if timeout is None:
timeout = float(os.environ.get('DOCUSIGN_TIMEOUT', 30))
self.timeout = timeout
def get_timeout(self):
"""Return connection timeout."""
return self._timeout
def set_timeout(self, value):
"""Set connection timeout. Converts ``value`` to a float.
Raises :class:`ValueError` in case the value is lower than 0.001.
"""
if value < 0.001:
raise ValueError('Cannot set timeout lower than 0.001')
self._timeout = int(value * 1000) / 1000.
def del_timeout(self):
"""Remove timeout attribute."""
del self._timeout
timeout = property(
get_timeout,
set_timeout,
del_timeout,
"""Connection timeout, in seconds, for HTTP requests to DocuSign's API.
This is not timeout for full request, only connection.
Precision is limited to milliseconds:
>>> client = DocuSignClient(timeout=1.2345)
>>> client.timeout
1.234
Setting timeout lower than 0.001 is forbidden.
>>> client.timeout = 0.0009 # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Cannot set timeout lower than 0.001
"""
)
def base_headers(self, sobo_email=None):
"""Return dictionary of base headers for all HTTP requests.
:param sobo_email: if specified, will set the appropriate header to act
on behalf of that user. The authenticated account must have the
appropriate permissions. See:
https://www.docusign.com/p/RESTAPIGuide/RESTAPIGuide.htm#SOBO/Send%20On%20Behalf%20Of%20Functionality%20in%20the%20DocuSign%20REST%20API.htm
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
if self.oauth2_token:
headers['Authorization'] = 'Bearer ' + self.oauth2_token
if sobo_email:
headers['X-DocuSign-Act-As-User'] = sobo_email
else:
auth = {
'Username': self.username,
'Password': self.password,
'IntegratorKey': self.integrator_key,
}
if sobo_email:
auth['SendOnBehalfOf'] = sobo_email
headers['X-DocuSign-Authentication'] = json.dumps(auth)
return headers
def _request(self, url, method='GET', headers=None, data=None,
json_data=None, expected_status_code=200, sobo_email=None):
"""Shortcut to perform HTTP requests."""
do_url = '{root}{path}'.format(root=self.root_url, path=url)
do_request = getattr(requests, method.lower())
if headers is None:
headers = {}
do_headers = self.base_headers(sobo_email)
do_headers.update(headers)
if data is not None:
do_data = json.dumps(data)
else:
do_data = None
try:
response = do_request(do_url, headers=do_headers, data=do_data,
json=json_data, timeout=self.timeout)
except requests.exceptions.RequestException as exception:
msg = "DocuSign request error: " \
"{method} {url} failed ; " \
"Error: {exception}" \
.format(method=method, url=do_url, exception=exception)
logger.error(msg)
raise exceptions.DocuSignException(msg)
if response.status_code != expected_status_code:
msg = "DocuSign request failed: " \
"{method} {url} returned code {status} " \
"while expecting code {expected}; " \
"Message: {message} ; " \
.format(
method=method,
url=do_url,
status=response.status_code,
expected=expected_status_code,
message=response.text,
)
logger.error(msg)
raise exceptions.DocuSignException(msg)
if response.headers.get('Content-Type', '') \
.startswith('application/json'):
return response.json()
return response.text
def get(self, *args, **kwargs):
"""Shortcut to perform GET operations on DocuSign API."""
return self._request(method='GET', *args, **kwargs)
def post(self, *args, **kwargs):
"""Shortcut to perform POST operations on DocuSign API."""
return self._request(method='POST', *args, **kwargs)
def put(self, *args, **kwargs):
"""Shortcut to perform PUT operations on DocuSign API."""
return self._request(method='PUT', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Shortcut to perform DELETE operations on DocuSign API."""
return self._request(method='DELETE', *args, **kwargs)
def login_information(self):
"""Return dictionary of /login_information.
Populate :attr:`account_id` and :attr:`account_url`.
"""
url = '/login_information'
headers = {
}
data = self.get(url, headers=headers)
self.account_id = data['loginAccounts'][0]['accountId']
self.account_url = '{root}/accounts/{account}'.format(
root=self.root_url,
account=self.account_id)
return data
@classmethod
def oauth2_token_request(cls, root_url, username, password,
integrator_key):
url = root_url + '/oauth2/token'
data = {
'grant_type': 'password',
'client_id': integrator_key,
'username': username,
'password': password,
'scope': 'api',
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
response = requests.post(url, headers=headers, data=data)
if response.status_code != 200:
raise exceptions.DocuSignOAuth2Exception(response.json())
return response.json()['access_token']
@classmethod
def oauth2_token_revoke(cls, root_url, token):
url = root_url + '/oauth2/revoke'
data = {
'token': token,
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
response = requests.post(url, headers=headers, data=data)
if response.status_code != 200:
raise exceptions.DocuSignOAuth2Exception(response.json())
def get_account_information(self, account_id=None):
"""Return dictionary of /accounts/:accountId.
Uses :attr:`account_id` (see :meth:`login_information`) if
``account_id`` is ``None``.
"""
if account_id is None:
account_id = self.account_id
url = self.account_url
else:
url = '/accounts/{accountId}/'.format(accountId=self.account_id)
return self.get(url)
def get_account_provisioning(self):
"""Return dictionary of /accounts/provisioning."""
url = '/accounts/provisioning'
headers = {
'X-DocuSign-AppToken': self.app_token,
}
return self.get(url, headers=headers)
def post_account(self, data):
"""Create account."""
url = '/accounts'
return self.post(url, data=data, expected_status_code=201)
def delete_account(self, accountId):
"""Create account."""
url = '/accounts/{accountId}'.format(accountId=accountId)
data = self.delete(url)
return data.strip() == ''
def _create_envelope_from_documents_request(self, envelope):
"""Return parts of the POST request for /envelopes.
.. warning::
Only one document is supported at the moment. This is a limitation
of `pydocusign`, not of `DocuSign`.
"""
data = envelope.to_dict()
documents = []
for document in envelope.documents:
documents.append({
"documentId": document.documentId,
"name": document.name,
"fileExtension": "pdf",
"documentBase64": base64.b64encode(
document.data.read()).decode('utf-8')
})
data['documents'] = documents
return data
def _create_envelope_from_template_request(self, envelope):
"""Return parts of the POST request for /envelopes,
for creating an envelope from a template.
"""
return envelope.to_dict()
def _create_envelope(self, envelope, data):
"""POST to /envelopes and return created envelope ID.
Called by ``create_envelope_from_document`` and
``create_envelope_from_template`` methods.
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes'.format(
accountId=self.account_id)
response_data = self._request(
url, method='POST', json_data=data, expected_status_code=201)
if not envelope.client:
envelope.client = self
if not envelope.envelopeId:
envelope.envelopeId = response_data['envelopeId']
return response_data['envelopeId']
def create_envelope_from_documents(self, envelope):
"""POST to /envelopes and return created envelope ID.
If ``envelope`` has no (or empty) ``envelopeId`` attribute, this
method sets the value.
If ``envelope`` has no (or empty) ``client`` attribute, this method
sets the value.
"""
data = self._create_envelope_from_documents_request(envelope)
return self._create_envelope(envelope, data)
def create_envelope_from_document(self, envelope):
warnings.warn("This method will be deprecated, use "
"create_envelope_from_documents instead.",
DeprecationWarning)
data = self._create_envelope_from_documents_request(envelope)
return self._create_envelope(envelope, data)
def create_envelope_from_template(self, envelope):
"""POST to /envelopes and return created envelope ID.
If ``envelope`` has no (or empty) ``envelopeId`` attribute, this
method sets the value.
If ``envelope`` has no (or empty) ``client`` attribute, this method
sets the value.
"""
data = self._create_envelope_from_template_request(envelope)
return self._create_envelope(envelope, data)
def void_envelope(self, envelopeId, voidedReason):
"""PUT to /{account}/envelopes/{envelopeId} with 'voided' status and
voidedReason, and return JSON."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
data = {
'status': 'voided',
'voidedReason': voidedReason
}
return self.put(url, data=data)
def get_envelope(self, envelopeId):
"""GET {account}/envelopes/{envelopeId} and return JSON."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
return self.get(url)
def get_envelope_recipients(self, envelopeId):
"""GET {account}/envelopes/{envelopeId}/recipients and return JSON."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
return self.get(url)
def post_recipient_view(self, authenticationMethod=None,
clientUserId='', email='', envelopeId='',
returnUrl='', userId='', userName=''):
"""POST to {account}/envelopes/{envelopeId}/views/recipient.
This is the method to start embedded signing for recipient.
Return JSON from DocuSign response.
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/views/recipient' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
if authenticationMethod is None:
authenticationMethod = 'none'
data = {
'authenticationMethod': authenticationMethod,
'clientUserId': clientUserId,
'email': email,
'envelopeId': envelopeId,
'returnUrl': returnUrl,
'userId': userId,
'userName': userName,
}
return self.post(url, data=data, expected_status_code=201)
def get_envelope_document_list(self, envelopeId):
"""GET the list of envelope's documents."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/documents' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
data = self.get(url)
return data['envelopeDocuments']
def get_envelope_document(self, envelopeId, documentId):
"""Download one document in envelope, return file-like object."""
if not self.account_url:
self.login_information()
url = '{root}/accounts/{accountId}/envelopes/{envelopeId}' \
'/documents/{documentId}' \
.format(root=self.root_url,
accountId=self.account_id,
envelopeId=envelopeId,
documentId=documentId)
headers = self.base_headers()
response = requests.get(url, headers=headers, stream=True)
return response.raw
def get_template(self, templateId):
"""GET the definition of the template."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/templates/{templateId}' \
.format(accountId=self.account_id,
templateId=templateId)
return self.get(url)
def get_connect_failures(self):
"""GET a list of DocuSign Connect failures."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/connect/failures' \
.format(accountId=self.account_id)
return self.get(url)['failures']
def add_envelope_recipients(self, envelopeId, recipients,
resend_envelope=False):
"""Add one or more recipients to an envelope
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/create/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
if resend_envelope:
url += '?resend_envelope=true'
data = {'signers': [recipient.to_dict() for recipient in recipients]}
return self.post(url, data=data)
def update_envelope_recipients(self, envelopeId, recipients,
resend_envelope=False):
"""Modify recipients in a draft envelope or correct recipient information
for an in process envelope
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/update/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
if resend_envelope:
url += '?resend_envelope=true'
data = {'signers': [recipient.to_dict() for recipient in recipients]}
return self.put(url, data=data)
def delete_envelope_recipient(self, envelopeId, recipientId):
"""Deletes one or more recipients from a draft or sent envelope.
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/delete/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients/' \
'{recipientId}'.format(accountId=self.account_id,
envelopeId=envelopeId,
recipientId=recipientId)
return self.delete(url)
def delete_envelope_recipients(self, envelopeId, recipientIds):
"""Deletes one or more recipients from a draft or sent envelope.
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/deleteList/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
data = {'signers': [{'recipientId': id_} for id_ in recipientIds]}
return self.delete(url, data=data)
| bsd-3-clause | 3,125,876,871,494,043,600 | 36.259325 | 148 | 0.576727 | false | 4.289775 | false | false | false |
chfoo/cloaked-octo-nemesis | visibli/visibli_url_grab.py | 1 | 14609 | '''Grab Visibli hex shortcodes'''
# Copyright 2013 Christopher Foo <[email protected]>
# Licensed under GPLv3. See COPYING.txt for details.
import argparse
import base64
import collections
import gzip
import html.parser
import http.client
import logging
import logging.handlers
import math
import os
import queue
import random
import re
import sqlite3
import threading
import time
import atexit
_logger = logging.getLogger(__name__)
class UnexpectedResult(ValueError):
pass
class UserAgent(object):
def __init__(self, filename):
self.strings = []
with open(filename, 'rt') as f:
while True:
line = f.readline().strip()
if not line:
break
self.strings.append(line)
self.strings = tuple(self.strings)
_logger.info('Initialized with %d user agents', len(self.strings))
class AbsSineyRateFunc(object):
def __init__(self, avg_rate=1.0):
self._avg_rate = avg_rate
self._amplitude = 1.0 / self._avg_rate * 5.6
self._x = 1.0
def get(self):
y = abs(self._amplitude * math.sin(self._x) * math.sin(self._x ** 2)
/ self._x)
self._x += 0.05
if self._x > 2 * math.pi:
self._x = 1.0
return y
class HTTPClientProcessor(threading.Thread):
def __init__(self, request_queue, response_queue, host, port):
threading.Thread.__init__(self)
self.daemon = True
self._request_queue = request_queue
self._response_queue = response_queue
self._http_client = http.client.HTTPConnection(host, port)
self.start()
def run(self):
while True:
path, headers, shortcode = self._request_queue.get()
try:
_logger.debug('Get %s %s', path, headers)
self._http_client.request('GET', path, headers=headers)
response = self._http_client.getresponse()
except http.client.HTTPException:
_logger.exception('Got an http error.')
self._http_client.close()
time.sleep(120)
else:
_logger.debug('Got response %s %s',
response.status, response.reason)
data = response.read()
self._response_queue.put((response, data, shortcode))
class InsertQueue(threading.Thread):
def __init__(self, db_path):
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue.Queue(maxsize=100)
self._event = threading.Event()
self._running = True
self._db_path = db_path
self.start()
def run(self):
self._db = sqlite3.connect(self._db_path)
while self._running:
self._process()
self._event.wait(timeout=10)
def _process(self):
with self._db:
while True:
try:
statement, values = self._queue.get_nowait()
except queue.Empty:
break
_logger.debug('Executing statement')
self._db.execute(statement, values)
def stop(self):
self._running = False
self._event.set()
def add(self, statement, values):
self._queue.put((statement, values))
class VisibliHexURLGrab(object):
def __init__(self, sequential=False, reverse_sequential=False,
avg_items_per_sec=0.5, database_dir='', user_agent_filename=None,
http_client_threads=2, save_reports=False):
db_path = os.path.join(database_dir, 'visibli.db')
self.database_dir = database_dir
self.db = sqlite3.connect(db_path)
self.db.execute('PRAGMA journal_mode=WAL')
with self.db:
self.db.execute('''CREATE TABLE IF NOT EXISTS visibli_hex
(shortcode INTEGER PRIMARY KEY ASC, url TEXT, not_exist INTEGER)
''')
self.host = 'localhost'
self.port = 8123
self.save_reports = save_reports
self.request_queue = queue.Queue(maxsize=1)
self.response_queue = queue.Queue(maxsize=10)
self.http_clients = self.new_clients(http_client_threads)
self.throttle_time = 1
self.sequential = sequential
self.reverse_sequential = reverse_sequential
self.seq_num = 0xffffff if self.reverse_sequential else 0
self.session_count = 0
#self.total_count = self.get_count() or 0
self.total_count = 0
self.user_agent = UserAgent(user_agent_filename)
self.headers = {
'Accept-Encoding': 'gzip',
'Host': 'links.sharedby.co',
}
self.average_deque = collections.deque(maxlen=100)
self.rate_func = AbsSineyRateFunc(avg_items_per_sec)
self.miss_count = 0
self.hit_count = 0
self.insert_queue = InsertQueue(db_path)
atexit.register(self.insert_queue.stop)
def new_clients(self, http_client_threads=2):
return [HTTPClientProcessor(self.request_queue, self.response_queue,
self.host, self.port)
for dummy in range(http_client_threads)]
def shortcode_to_int(self, shortcode):
return int.from_bytes(shortcode, byteorder='big', signed=False)
def new_shortcode(self):
while True:
if self.sequential or self.reverse_sequential:
s = '{:06x}'.format(self.seq_num)
shortcode = base64.b16decode(s.encode(), casefold=True)
if self.reverse_sequential:
self.seq_num -= 1
if self.seq_num < 0:
return None
else:
self.seq_num += 1
if self.seq_num > 0xffffff:
return None
else:
shortcode = os.urandom(3)
rows = self.db.execute('SELECT 1 FROM visibli_hex WHERE '
'shortcode = ? LIMIT 1', [self.shortcode_to_int(shortcode)])
if not len(list(rows)):
return shortcode
def run(self):
self.check_proxy_tor()
while True:
if not self.insert_queue.is_alive():
raise Exception('Insert queue died!')
shortcode = self.new_shortcode()
if shortcode is None:
break
shortcode_str = base64.b16encode(shortcode).lower().decode()
path = 'http://links.sharedby.co/links/{}'.format(shortcode_str)
headers = self.get_headers()
while True:
try:
self.request_queue.put_nowait((path, headers, shortcode))
except queue.Full:
self.read_responses()
else:
break
if self.session_count % 10 == 0:
_logger.info('Session={}, hit={}, total={}, {:.3f} u/s'.format(
self.session_count, self.hit_count,
self.session_count + self.total_count,
self.calc_avg()))
t = self.rate_func.get()
_logger.debug('Sleep {:.3f}'.format(t))
time.sleep(t)
self.read_responses()
_logger.info('Shutting down...')
time.sleep(30)
self.read_responses()
self.insert_queue.stop()
self.insert_queue.join()
def get_headers(self):
d = dict(self.headers)
d['User-Agent'] = random.choice(self.user_agent.strings)
return d
def read_responses(self):
while True:
try:
response, data, shortcode = self.response_queue.get(block=True,
timeout=0.05)
except queue.Empty:
break
self.session_count += 1
shortcode_str = base64.b16encode(shortcode).lower().decode()
try:
url = self.read_response(response, data)
except UnexpectedResult as e:
_logger.warn('Unexpected result %s', e)
if self.save_reports:
try:
self.write_report(e, shortcode_str, response, data)
except:
_logger.exception('Error writing report')
self.throttle(None, force=True)
continue
if not url:
self.add_no_url(shortcode)
self.miss_count += 1
else:
self.add_url(shortcode, url)
self.miss_count = 0
self.hit_count += 1
_logger.info('%s->%s...', shortcode_str,
url[:30] if url else '(none)')
self.throttle(response.status)
def read_response(self, response, data):
if response.getheader('Content-Encoding') == 'gzip':
_logger.debug('Got gzip data')
data = gzip.decompress(data)
if response.status == 301:
url = response.getheader('Location')
return url
elif response.status == 200:
match = re.search(br'<iframe id="[^"]+" src="([^"]+)">', data)
if not match:
raise UnexpectedResult('No iframe found')
url = match.group(1).decode()
url = html.parser.HTMLParser().unescape(url)
return url
elif response.status == 302:
location = response.getheader('Location')
# if location and 'sharedby' not in location \
# and 'visibli' not in location:
if location and location.startswith('http://yahoo.com'):
raise UnexpectedResult(
'Weird 302 redirect to {}'.format(location))
elif not location:
raise UnexpectedResult('No redirect location')
return
else:
raise UnexpectedResult('Unexpected status {}'.format(
response.status))
def throttle(self, status_code, force=False):
if force or 400 <= status_code <= 499 or 500 <= status_code <= 999 \
or self.miss_count > 2:
_logger.info('Throttle %d seconds', self.throttle_time)
time.sleep(self.throttle_time)
self.throttle_time *= 2
self.throttle_time = min(3600, self.throttle_time)
else:
self.throttle_time /= 2
self.throttle_time = min(600, self.throttle_time)
self.throttle_time = max(1, self.throttle_time)
def add_url(self, shortcode, url):
_logger.debug('Insert %s %s', shortcode, url)
self.insert_queue.add('INSERT OR IGNORE INTO visibli_hex VALUES (?, ?, ?)',
[self.shortcode_to_int(shortcode), url, None])
def add_no_url(self, shortcode):
_logger.debug('Mark no url %s', shortcode)
self.insert_queue.add('INSERT OR IGNORE INTO visibli_hex VALUES (?, ?, ?)',
[self.shortcode_to_int(shortcode), None, 1])
def get_count(self):
for row in self.db.execute('SELECT COUNT(ROWID) FROM visibli_hex '
'LIMIT 1'):
return int(row[0])
def calc_avg(self):
self.average_deque.append((self.session_count, time.time()))
try:
avg = ((self.session_count - self.average_deque[0][0])
/ (time.time() - self.average_deque[0][1]))
except ArithmeticError:
avg = 0
return avg
def check_proxy_tor(self):
http_client = http.client.HTTPConnection(self.host, self.port)
http_client.request('GET', 'http://check.torproject.org/',
headers={'Host': 'check.torproject.org'})
response = http_client.getresponse()
data = response.read()
_logger.debug('Check proxy got data=%s', data.decode())
if response.status != 200:
raise UnexpectedResult('Check tor page returned %d',
response.status)
if b'Congratulations. Your browser is configured to use Tor.' \
not in data:
raise UnexpectedResult('Not configured to use tor')
_logger.info('Using tor proxy')
def write_report(self, error, shortcode_str, response, data):
path = os.path.join(self.database_dir,
'report_{:.04f}'.format(time.time()))
_logger.debug('Writing report to %s', path)
with open(path, 'wt') as f:
f.write('Error ')
f.write(str(error))
f.write('\n')
f.write('Code ')
f.write(shortcode_str)
f.write('\n')
f.write(str(response.status))
f.write(response.reason)
f.write('\n')
f.write(str(response.getheaders()))
f.write('\n\nData\n\n')
f.write(str(data))
f.write('\n\nEnd Report\n')
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--sequential', action='store_true')
arg_parser.add_argument('--reverse-sequential', action='store_true')
arg_parser.add_argument('--save-reports', action='store_true')
arg_parser.add_argument('--average-rate', type=float, default=1.0)
arg_parser.add_argument('--quiet', action='store_true')
arg_parser.add_argument('--database-dir', default=os.getcwd())
arg_parser.add_argument('--log-dir', default=os.getcwd())
arg_parser.add_argument('--user-agent-file',
default=os.path.join(os.getcwd(), 'user-agents.txt'))
arg_parser.add_argument('--threads', type=int, default=2)
args = arg_parser.parse_args()
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
if not args.quiet:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(
logging.Formatter('%(levelname)s %(message)s'))
root_logger.addHandler(console)
log_filename = os.path.join(args.log_dir, 'visibli_url_grab.log')
file_log = logging.handlers.RotatingFileHandler(log_filename,
maxBytes=1048576, backupCount=9)
file_log.setLevel(logging.DEBUG)
file_log.setFormatter(logging.Formatter(
'%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s'))
root_logger.addHandler(file_log)
o = VisibliHexURLGrab(sequential=args.sequential,
reverse_sequential=args.reverse_sequential,
database_dir=args.database_dir,
avg_items_per_sec=args.average_rate,
user_agent_filename=args.user_agent_file,
http_client_threads=args.threads,
save_reports=args.save_reports,)
o.run()
| gpl-3.0 | 7,486,077,058,854,861,000 | 32.126984 | 83 | 0.56075 | false | 3.99153 | false | false | false |
jsaponara/opentaxforms | opentaxforms/ut.py | 1 | 14660 | from __future__ import print_function
import logging
import os
import pkg_resources
import re
import six
import sys
from collections import (
namedtuple as ntuple,
defaultdict as ddict,
OrderedDict as odict)
from datetime import datetime
from os.path import join as pathjoin, exists
from pint import UnitRegistry
from pprint import pprint as pp, pformat as pf
from subprocess import Popen, PIPE
from sys import stdout, exc_info
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
NL = '\n'
TAB = '\t'
quiet = False
Bbox = ntuple('Bbox', 'x0 y0 x1 y1')
def merge(bb1, bb2):
return Bbox(
min(bb1.x0, bb2.x0),
min(bb1.y0, bb2.y0),
max(bb1.x1, bb2.x1),
max(bb1.y1, bb2.y1))
def numerify(s):
try:
return int(''.join(d for d in s if d.isdigit()))
except ValueError:
return s
def compactify(multilineRegex):
# to avoid having to replace spaces in multilineRegex's with less readable
# '\s' etc no re.VERBOSE flag needed
r"""
line too long (folded):
titlePttn1=re.compile(r'(?:(\d\d\d\d) )?Form ([\w-]+(?: \w\w?)?)
(?: or ([\w-]+))?(?: ?\(?(?:Schedule ([\w-]+))\)?)?
(?: ?\((?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)
.+?\))?\s*$')
re.VERBOSE with spaces removed (else theyll be ignored in VERBOSE mode):
pttn=re.compile(
r'''(?:(\d\d\d\d)\s)? # 2016
Form\s([\w-]+ # Form 1040
(?:\s\w\w?)?) # AS
(?:\sor\s([\w-]+))? # or 1040A
(?:\s\s?\(?(?:Schedule\s([\w-]+))\)?)? # (Schedule B)
(?:\s\s?\((?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).+?\))?\s*$''',re.VERBOSE)
using compactify:
>>> anyMonth = 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec'
>>> compactify(
... '''(?:(\d\d\d\d) )? # 2016
... Form ([\w-]+ # Form 1040
... (?: \w\w?)?) # AS
... (?: or ([\w-]+))? # or 1040A
... (?: ?\(?(?:Schedule ([\w-]+))\)?)? # (Schedule B)
... (?: ?\((?:Rev|'''+anyMonth+''').+?\))?\s*$''')
'(?:(\\d\\d\\d\\d) )?Form ([\\w-]+(?: \\w\\w?)?)(?: or ([\\w-]+))?'
'(?: ?\\(?(?:Schedule ([\\w-]+))\\)?)?'
'(?: ?\\('
'(?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).+?\\))?'
'\\s*$'
# todo what should compactify return for these?
# [but note this entire docstring is raw]
#>>> compactify(r'\ # comment')
#>>> compactify(r'\\ # comment')
#>>> compactify( '\ # comment')
#>>> compactify( '\\ # comment')
#print len(multilineRegex),
'[%s%s]'%(multilineRegex[0],multilineRegex[1])
"""
def crunch(seg):
return re.sub(' *#.*$', '', seg.lstrip())
segs = multilineRegex.split(NL)
return ''.join(crunch(seg) for seg in segs)
class NoSuchPickle(Exception):
pass
class PickleException(Exception):
pass
def pickle(data, pickleFilePrefix):
picklname = '%s.pickl' % (pickleFilePrefix)
with open(picklname, 'wb') as pickl:
dump(data, pickl)
def unpickle(pickleFilePrefix, default=None):
picklname = '%s.pickl' % (pickleFilePrefix)
try:
with open(picklname, 'rb') as pickl:
data = load(pickl)
except IOError as e:
clas, exc, tb = exc_info()
if e.errno == 2: # no such file
if default == 'raise':
raise NoSuchPickle(NoSuchPickle(exc.args)).with_traceback(tb)
else:
data = default
else:
raise PickleException(PickleException(exc.args)).with_traceback(tb)
return data
def flattened(l):
# only works for single level of sublists
return [i for sublist in l for i in sublist]
def hasdups(l, key=None):
if key is None:
ll = l
else:
ll = [key(it) for it in l]
return any(it in ll[1 + i:] for i, it in enumerate(ll))
def uniqify(l):
'''uniqify in place'''
s = set()
idxs = [] # indexes of duplicate items
for i, item in enumerate(l):
if item in s:
idxs.append(i)
else:
s.add(item)
for i in reversed(idxs):
l.pop(i)
return l
def uniqify2(l):
'''uniqify in place; probably faster for small lists'''
for i, item in enumerate(reversed(l)):
if item in l[:i - 1]:
l.pop(i)
return l
log = logging.getLogger()
defaultLoglevel = 'WARN'
alreadySetupLogging = False
def setupLogging(loggerId, args=None):
global alreadySetupLogging
if alreadySetupLogging:
log.warn('ignoring extra call to setupLogging')
fname = log.name
else:
if args:
loglevel = args.loglevel.upper()
else:
loglevel = defaultLoglevel
loglevel = getattr(logging, loglevel)
if not isinstance(loglevel, int):
allowedLogLevels = 'debug info warn warning error critical exception'
raise ValueError('Invalid log level: %s, allowedLogLevels are %s' % (
args.loglevel, allowedLogLevels))
fname = loggerId + '.log'
filehandler=logging.FileHandler(fname, mode='w', encoding='utf-8')
filehandler.setLevel(loglevel)
log.setLevel(loglevel)
log.addHandler(filehandler)
alreadySetupLogging = True
return fname
def unsetupLogging():
global alreadySetupLogging
alreadySetupLogging=False
log.handlers = []
defaultOutput = stdout
def logg(msg, outputs=None):
'''
log=setupLogging('test')
logg('just testing',[stdout,log.warn])
'''
if outputs is None:
outputs = [defaultOutput]
for o in outputs:
m = msg
if o == stdout:
o = stdout.write
m = msg + '\n'
if quiet and o == stdout.write:
continue
o(m)
def jj(*args, **kw):
'''
jj is a more flexible join(), handy for debug output
>>> jj(330,'info',None)
'330 info None'
'''
delim = kw.get('delim', ' ')
try:
return delim.join(str(x) for x in args)
except Exception:
return delim.join(six.text_type(x) for x in args)
def jdb(*args, **kw):
logg(jj(*args, **kw), [log.debug])
def run0(cmd):
try:
# shell is handy for executable path, etc
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
except OSError as exc:
err = str(exc)
out = None
return out, err
def run(cmd, logprefix='run', loglevel='INFO'):
loglevel = getattr(logging, loglevel.upper(), None)
out, err = run0(cmd)
out, err = out.strip(), err.strip()
msg = '%s: command [%s] returned error [%s] and output [%s]' % (
logprefix, cmd, err, out)
if err:
log.error(msg)
raise Exception(msg)
else:
log.log(loglevel, msg)
return out, err
class Resource(object):
def __init__(self, pkgname, fpath=None):
self.pkgname = pkgname
self.fpath = fpath
def path(self):
return pkg_resources.resource_filename(self.pkgname, self.fpath)
def content(self):
return pkg_resources.resource_string(self.pkgname, self.fpath)
class CharEnum(object):
# unlike a real enum, no order guarantee the simplest one from this url:
# http://stackoverflow.com/questions/2676133/
@classmethod
def keys(cls):
return [k for k in cls.__dict__ if not k.startswith('_')]
@classmethod
def vals(cls):
return [cls.__dict__[k] for k in cls.keys()]
@classmethod
def items(cls):
return zip(cls.keys(), cls.vals())
class ChainablyUpdatableOrderedDict(odict):
'''
handy for ordered initialization
>>> d=ChainablyUpdatableOrderedDict()(a=0)(b=1)(c=2)
>>> assert d.keys()==['a','b','c']
'''
def __init__(self):
super(ChainablyUpdatableOrderedDict, self).__init__()
def __call__(self, **kw):
self.update(kw)
return self
class Bag(object):
# after alexMartelli at http://stackoverflow.com/questions/2597278
def __init__(self, *maps, **kw):
'''
>>> b=Bag(a=0)
>>> b.a=1
>>> b.b=0
>>> c=Bag(b)
'''
for mapp in maps:
getdict = None
if type(mapp) == dict:
getdict = lambda x: x
# def getdict(x): return x
elif type(mapp) == Bag:
getdict = lambda x: x.__dict__
# def getdict(x): return x.__dict__
elif type(mapp) == tuple:
mapp, getdict = mapp
if getdict is not None:
self.__dict__.update(getdict(mapp))
else:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__[k] = v
self.__dict__.update(kw)
def _getGetitems(self, mapp):
if type(mapp) == tuple:
mapp, getitems = mapp
else:
getitems = lambda m: m.items()
# def getitems(m): return m.items()
return mapp, getitems
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __len__(self):
return len(self.__dict__)
def __call__(self, *keys):
'''slicing interface
gimmicky but useful, and doesnt pollute key namespace
>>> b=Bag(a=1,b=2)
>>> assert b('a','b')==(1,2)
'''
return tuple(self.__dict__[k] for k in keys)
def clear(self):
self.__dict__={}
def update(self, *maps):
'''
>>> b=Bag(a=1,b=2)
>>> b.update(Bag(a=1,b=1,c=0))
Bag({'a': 1, 'b': 1, 'c': 0})
'''
for mapp in maps:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__[k] = v
return self
def __add__(self, *maps):
self.__iadd__(*maps)
return self
def __iadd__(self, *maps):
'''
>>> b=Bag(a=1,b=2)
>>> b+=Bag(a=1,b=1,c=0)
>>> assert b('a','b','c')==(2,3,0)
>>> b=Bag(a='1',b='2')
>>> b+=Bag(a='1',b='1',c='0')
>>> assert b('a','b','c')==('11','21','0')
'''
# todo error for empty maps[0]
zero = type(list(maps[0].values())[0])()
for mapp in maps:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__.setdefault(k, zero)
self.__dict__[k] += v
return self
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
return iter(self.__dict__.keys())
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def get(self, key, dflt=None):
return self.__dict__.get(key, dflt)
def __str__(self):
return 'Bag(' + pf(self.__dict__) + ')'
def __repr__(self):
return self.__str__()
ureg = UnitRegistry()
# interactive use: from pint import UnitRegistry as ureg; ur=ureg();
# qq=ur.Quantity
qq = ureg.Quantity
def notequalpatch(self, o):
return not self.__eq__(o)
setattr(qq, '__ne__', notequalpatch)
assert qq(1, 'mm') == qq(1, 'mm')
assert not qq(1, 'mm') != qq(1, 'mm')
class Qnty(qq):
@classmethod
def fromstring(cls, s):
'''
>>> Qnty.fromstring('25.4mm')
<Quantity(25.4, 'millimeter')>
'''
if ' ' in s:
qnty, unit = s.split()
else:
m = re.match(r'([\d\.\-]+)(\w+)', s)
if m:
qnty, unit = m.groups()
else:
raise Exception('unsupported Qnty format [%s]' % (s))
if '.' in qnty:
qnty = float(qnty)
else:
qnty = int(qnty)
unit = {
'pt': 'printers_point',
'in': 'inch',
}.get(unit, unit)
return Qnty(qnty, unit)
def __hash__(self):
return hash(repr(self))
def playQnty():
# pagewidth=Qnty(page.cropbox[2]-page.cropbox[0],'printers_point')
a = Qnty.fromstring('2in')
b = Qnty.fromstring('1in')
print(Qnty(a - b, 'printers_point'))
print(Qnty.fromstring('72pt'))
# cumColWidths=[sum(columnWidths[0:i],Qnty(0,columnWidths[0].units)) for i
# in range(len(columnWidths))]
print(Qnty(0, a.units))
# maxh=max([Qnty.fromstring(c.attrib.get('h',c.attrib.get('minH'))) for c
# in cells])
print(max(a, b))
s = set()
s.update([a, b])
assert len(s) == 1
def nth(n):
'''
>>> nth(2)
'2nd'
>>> nth(21)
'21st'
>>> nth('22')
'22nd'
>>> nth(23)
'23rd'
>>> nth(24)
'24th'
>>> nth(12)
'12th'
'''
n = str(n)
suffix = 'th'
if n[-1] == '1' and n[-2:] != '11':
suffix = 'st'
elif n[-1] == '2' and n[-2:] != '12':
suffix = 'nd'
elif n[-1] == '3' and n[-2:] != '13':
suffix = 'rd'
return n + suffix
def skip(s, substr):
'''
>>> skip('0123456789','45')
'6789'
'''
idx = s.index(substr)
return s[idx + len(substr):]
def until(s, substr):
'''
>>> until('0123456789','45')
'0123'
'''
try:
idx = s.index(substr)
return s[:idx]
except ValueError:
return s
def ensure_dir(folder):
'''ensure that directory exists'''
if not exists(folder):
os.makedirs(folder)
def now(format=None):
dt = datetime.now()
if format is None:
return dt.isoformat()
return dt.strftime(format)
def readImgSize(fname, dirName):
from PIL import Image
with open(pathjoin(dirName,fname), 'rb') as fh:
img = Image.open(fh)
imgw, imgh = img.size
return imgw, imgh
def asciiOnly(s):
if s:
s=''.join(c for c in s if ord(c)<127)
return s
if __name__ == "__main__":
args = sys.argv[1:]
if any('T' in arg for arg in args):
verbose = any('v' in arg for arg in args)
import doctest
doctest.testmod(verbose=verbose)
| agpl-3.0 | -4,866,279,262,803,321,000 | 25.178571 | 107 | 0.512005 | false | 3.367792 | false | false | false |
sbailey/redrock | py/redrock/fitz.py | 1 | 7113 | """
redrock.fitz
============
Functions for fitting minima of chi^2 results.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.constants
import scipy.special
from . import constants
from .rebin import rebin_template
from .zscan import calc_zchi2_one, spectral_data
from .zwarning import ZWarningMask as ZW
from .utils import transmission_Lyman
def get_dv(z, zref):
"""Returns velocity difference in km/s for two redshifts
Args:
z (float): redshift for comparison.
zref (float): reference redshift.
Returns:
(float): the velocity difference.
"""
c = (scipy.constants.speed_of_light/1000.) #- km/s
dv = c * (z - zref) / (1.0 + zref)
return dv
def find_minima(x):
"""Return indices of local minima of x, including edges.
The indices are sorted small to large.
Note:
this is somewhat conservative in the case of repeated values:
find_minima([1,1,1,2,2,2]) -> [0,1,2,4,5]
Args:
x (array-like): The data array.
Returns:
(array): The indices.
"""
x = np.asarray(x)
ii = np.where(np.r_[True, x[1:]<=x[:-1]] & np.r_[x[:-1]<=x[1:], True])[0]
jj = np.argsort(x[ii])
return ii[jj]
def minfit(x, y):
"""Fits y = y0 + ((x-x0)/xerr)**2
See redrock.zwarning.ZWarningMask.BAD_MINFIT for zwarn failure flags
Args:
x (array): x values.
y (array): y values.
Returns:
(tuple): (x0, xerr, y0, zwarn) where zwarn=0 is good fit.
"""
if len(x) < 3:
return (-1,-1,-1,ZW.BAD_MINFIT)
try:
#- y = a x^2 + b x + c
a,b,c = np.polyfit(x,y,2)
except np.linalg.LinAlgError:
return (-1,-1,-1,ZW.BAD_MINFIT)
if a == 0.0:
return (-1,-1,-1,ZW.BAD_MINFIT)
#- recast as y = y0 + ((x-x0)/xerr)^2
x0 = -b / (2*a)
y0 = -(b**2) / (4*a) + c
zwarn = 0
if (x0 <= np.min(x)) or (np.max(x) <= x0):
zwarn |= ZW.BAD_MINFIT
if (y0<=0.):
zwarn |= ZW.BAD_MINFIT
if a > 0.0:
xerr = 1 / np.sqrt(a)
else:
xerr = 1 / np.sqrt(-a)
zwarn |= ZW.BAD_MINFIT
return (x0, xerr, y0, zwarn)
def fitz(zchi2, redshifts, spectra, template, nminima=3, archetype=None):
"""Refines redshift measurement around up to nminima minima.
TODO:
if there are fewer than nminima minima, consider padding.
Args:
zchi2 (array): chi^2 values for each redshift.
redshifts (array): the redshift values.
spectra (list): list of Spectrum objects at different wavelengths
grids.
template (Template): the template for this fit.
nminima (int): the number of minima to consider.
Returns:
Table: the fit parameters for the minima.
"""
assert len(zchi2) == len(redshifts)
nbasis = template.nbasis
# Build dictionary of wavelength grids
dwave = { s.wavehash:s.wave for s in spectra }
if not archetype is None:
# TODO: set this as a parameter
deg_legendre = 3
wave = np.concatenate([ w for w in dwave.values() ])
wave_min = wave.min()
wave_max = wave.max()
legendre = { hs:np.array([scipy.special.legendre(i)( (w-wave_min)/(wave_max-wave_min)*2.-1. ) for i in range(deg_legendre)]) for hs, w in dwave.items() }
(weights, flux, wflux) = spectral_data(spectra)
results = list()
for imin in find_minima(zchi2):
if len(results) == nminima:
break
#- Skip this minimum if it is within constants.max_velo_diff km/s of a
# previous one dv is in km/s
zprev = np.array([tmp['z'] for tmp in results])
dv = get_dv(z=redshifts[imin],zref=zprev)
if np.any(np.abs(dv) < constants.max_velo_diff):
continue
#- Sample more finely around the minimum
ilo = max(0, imin-1)
ihi = min(imin+1, len(zchi2)-1)
zz = np.linspace(redshifts[ilo], redshifts[ihi], 15)
nz = len(zz)
zzchi2 = np.zeros(nz, dtype=np.float64)
zzcoeff = np.zeros((nz, nbasis), dtype=np.float64)
for i, z in enumerate(zz):
binned = rebin_template(template, z, dwave)
for k in list(dwave.keys()):
T = transmission_Lyman(z,dwave[k])
for vect in range(binned[k].shape[1]):
binned[k][:,vect] *= T
zzchi2[i], zzcoeff[i] = calc_zchi2_one(spectra, weights, flux,
wflux, binned)
#- fit parabola to 3 points around minimum
i = min(max(np.argmin(zzchi2),1), len(zz)-2)
zmin, sigma, chi2min, zwarn = minfit(zz[i-1:i+2], zzchi2[i-1:i+2])
try:
binned = rebin_template(template, zmin, dwave)
for k in list(dwave.keys()):
T = transmission_Lyman(zmin,dwave[k])
for vect in range(binned[k].shape[1]):
binned[k][:,vect] *= T
coeff = calc_zchi2_one(spectra, weights, flux, wflux,
binned)[1]
except ValueError as err:
if zmin<redshifts[0] or redshifts[-1]<zmin:
#- beyond redshift range can be invalid for template
coeff = np.zeros(template.nbasis)
zwarn |= ZW.Z_FITLIMIT
zwarn |= ZW.BAD_MINFIT
else:
#- Unknown problem; re-raise error
raise err
zbest = zmin
zerr = sigma
#- Initial minimum or best fit too close to edge of redshift range
if zbest < redshifts[1] or zbest > redshifts[-2]:
zwarn |= ZW.Z_FITLIMIT
if zmin < redshifts[1] or zmin > redshifts[-2]:
zwarn |= ZW.Z_FITLIMIT
#- parabola minimum outside fit range; replace with min of scan
if zbest < zz[0] or zbest > zz[-1]:
zwarn |= ZW.BAD_MINFIT
imin = np.where(zbest == np.min(zbest))[0][0]
zbest = zz[imin]
chi2min = zzchi2[imin]
#- Skip this better defined minimum if it is within
#- constants.max_velo_diff km/s of a previous one
zprev = np.array([tmp['z'] for tmp in results])
dv = get_dv(z=zbest, zref=zprev)
if np.any(np.abs(dv) < constants.max_velo_diff):
continue
if archetype is None:
results.append(dict(z=zbest, zerr=zerr, zwarn=zwarn,
chi2=chi2min, zz=zz, zzchi2=zzchi2,
coeff=coeff))
else:
chi2min, coeff, fulltype = archetype.get_best_archetype(spectra,weights,flux,wflux,dwave,zbest,legendre)
results.append(dict(z=zbest, zerr=zerr, zwarn=zwarn,
chi2=chi2min, zz=zz, zzchi2=zzchi2,
coeff=coeff, fulltype=fulltype))
#- Sort results by chi2min; detailed fits may have changed order
ii = np.argsort([tmp['chi2'] for tmp in results])
results = [results[i] for i in ii]
#- Convert list of dicts -> Table
from astropy.table import Table
results = Table(results)
assert len(results) > 0
return results
| bsd-3-clause | -6,433,627,876,131,985,000 | 28.392562 | 161 | 0.566568 | false | 3.139011 | false | false | false |
potsmaster/cinder | cinder/volume/drivers/dothill/dothill_client.py | 1 | 12318 | # Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import math
import time
from lxml import etree
from oslo_log import log as logging
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
self.ssl_verify = ssl_verify
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0.
"""
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code and return_code != '0':
raise exception.DotHillRequestError(
message=tree.findtext(".//PROPERTY[@name='response']"))
elif not return_code:
raise exception.DotHillRequestError(message="No status found")
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers, verify=self.ssl_verify)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (10 ** 9)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def copy_volume(self, src_name, dest_name, same_bknd, dest_bknd_name):
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
if same_bknd == 0:
return
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
break
time.sleep(1)
count += 1
time.sleep(5)
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (32 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name):
tree = self._request("/show/vdisks", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
| apache-2.0 | 2,781,379,234,421,409,300 | 35.443787 | 79 | 0.551469 | false | 4.115603 | false | false | false |
cbitstech/Purple-Robot-Django | management/commands/extractors/builtin_rawlocationprobeeventlog.py | 1 | 2943 | # pylint: disable=line-too-long
import datetime
import psycopg2
import pytz
CREATE_PROBE_TABLE_SQL = 'CREATE TABLE builtin_rawlocationprobeeventlog(id SERIAL PRIMARY KEY, user_id TEXT, guid TEXT, timestamp BIGINT, utc_logged TIMESTAMP, provider_status TEXT, log_event TEXT, satellites BIGINT);'
CREATE_PROBE_USER_ID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(user_id);'
CREATE_PROBE_GUID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(guid);'
CREATE_PROBE_UTC_LOGGED_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(utc_logged);'
def exists(connection_str, user_id, reading):
conn = psycopg2.connect(connection_str)
if probe_table_exists(conn) is False:
conn.close()
return False
cursor = conn.cursor()
cursor.execute('SELECT id FROM builtin_rawlocationprobeeventlog WHERE (user_id = %s AND guid = %s);', (user_id, reading['GUID']))
row_exists = (cursor.rowcount > 0)
cursor.close()
conn.close()
return row_exists
def probe_table_exists(conn):
cursor = conn.cursor()
cursor.execute('SELECT table_name FROM information_schema.tables WHERE (table_schema = \'public\' AND table_name = \'builtin_rawlocationprobeeventlog\')')
table_exists = (cursor.rowcount > 0)
cursor.close()
return table_exists
def insert(connection_str, user_id, reading, check_exists=True):
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
if check_exists and probe_table_exists(conn) is False:
cursor.execute(CREATE_PROBE_TABLE_SQL)
cursor.execute(CREATE_PROBE_USER_ID_INDEX)
cursor.execute(CREATE_PROBE_GUID_INDEX)
cursor.execute(CREATE_PROBE_UTC_LOGGED_INDEX)
conn.commit()
reading_cmd = 'INSERT INTO builtin_rawlocationprobeeventlog(user_id, ' + \
'guid, ' + \
'timestamp, ' + \
'utc_logged, ' + \
'provider_status, ' + \
'log_event, ' + \
'satellites) VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id;'
provider_status = None
satellites = None
if 'PROVIDER_STATUS' in reading:
provider_status = reading['PROVIDER_STATUS']
if 'satellites' in reading:
satellites = reading['satellites']
cursor.execute(reading_cmd, (user_id,
reading['GUID'],
reading['TIMESTAMP'],
datetime.datetime.fromtimestamp(reading['TIMESTAMP'], tz=pytz.utc),
provider_status,
reading['LOG_EVENT'],
satellites))
conn.commit()
cursor.close()
conn.close()
| gpl-3.0 | -4,866,252,767,588,838,000 | 34.890244 | 218 | 0.576283 | false | 4.048143 | false | false | false |
cloudnull/eventlet_wsgi | example_app/app.py | 1 | 3150 | # =============================================================================
# Copyright [2014] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
# This is an example application
# =============================================================================
import datetime
import os
import flask
import ewsgi
from cloudlib import parse_ini
from cloudlib import logger
CONFIG = parse_ini.ConfigurationSetup()
try:
CONFIG.load_config(name='example', path=os.getcwd())
# Load Default Configuration
default_config = CONFIG.config_args(section='default')
# Set the application name
APPNAME = default_config.get('appname', 'example')
# Store network Configuration
network_config = CONFIG.config_args(section='network')
# Store SSL configuration
ssl_config = CONFIG.config_args(section='ssl')
# Enable or disable DEBUG mode
DEBUG = default_config.get('debug', False)
except IOError:
# If the configuration file is not present, set the two bits we need
DEBUG = True
APPNAME = 'example'
# Load Logging
LOG = logger.getLogger(APPNAME)
# Load the flask APP
APP = flask.Flask(APPNAME)
# Enable general debugging
if DEBUG is True:
APP.debug = True
LOG.debug(APP.logger)
# Enable Application Threading
APP.threaded = True
# Enforce strict slashes in URI's
APP.url_map.strict_slashes = False
# Add Default Handling for File not found.
APP.errorhandler(ewsgi.not_found)
# Load the BLUEPRINT handler
BLUEPRINT = flask.Blueprint
blueprints = []
# Each Blueprint is essentially route. this has a name and needs to be
# stored as an object which will be used as a decorator.
hello_world = BLUEPRINT('hello', APPNAME)
test_path = BLUEPRINT('test_path', __name__)
# The decorator object is appended to the "blueprints" list and will be
# used later to register ALL blueprints.
blueprints.append(hello_world)
blueprints.append(test_path)
# This decorator loads the route and provides the allowed methods
# available from within the decorator
@hello_world.route('/hello', methods=['GET'])
def _hello_world():
"""Return 200 response on GET '/hello'."""
LOG.debug('hello world')
return 'hello world. The time is [ %s ]' % datetime.datetime.utcnow(), 200
@test_path.route('/test', methods=['GET'])
def _test_path():
"""Return 200 response on GET '/test'."""
state = {
'Application': APPNAME,
'time': datetime.datetime.utcnow(),
'request': {
'method': flask.request.method,
'path': flask.request.path
}
}
LOG.debug(state)
return flask.jsonify({'response': state}, indent=2), 200
# Register all blueprints as found in are `list` of blueprints
for blueprint in blueprints:
APP.register_blueprint(blueprint=blueprint)
| gpl-3.0 | -1,176,473,469,991,870,000 | 27.378378 | 79 | 0.653651 | false | 4.096229 | true | false | false |
Codepoints/unidump | unidump/__init__.py | 1 | 1861 | #!/usr/bin/env python3
"""
hexdump(1) for Unicode data
"""
from typing import IO
from unidump.output import sanitize_char, print_line, fill_and_print
from unidump.env import Env
VERSION = '1.1.3'
def unidump(inbytes: IO[bytes], env: Env) -> None:
"""take a list of bytes and print their Unicode codepoints
>>> import io
>>> import sys
>>> from unidump.env import Env
>>> _env = Env(linelength=4, output=sys.stdout)
>>> unidump(io.BytesIO(b'\\x01\\xF0\\x9F\\x99\\xB8ABC'), _env)
0 0001 1F678 0041 0042 .\U0001F678AB
7 0043 C
>>> unidump(io.BytesIO(b'\\xD7'), _env)
0 ?D7? X
>>> _env.encoding = 'latin1'
>>> unidump(io.BytesIO(b'\\xD7'), _env)
0 00D7 \u00D7
"""
byteoffset = 0
bytebuffer = b''
current_line = [0, [], '']
byte = inbytes.read(1)
while byte:
byteoffset += 1
bytebuffer += byte
try:
char = bytebuffer.decode(env.encoding)
except UnicodeDecodeError:
next_byte = inbytes.read(1)
if not next_byte or len(bytebuffer) >= 4:
for i, data in enumerate(bytebuffer):
current_line = (
fill_and_print(current_line, byteoffset - 4 + i,
'?{:02X}?'.format(data), 'X', env)
)
bytebuffer = b''
byte = next_byte
continue
else:
current_line = (
fill_and_print(current_line, byteoffset - len(bytebuffer),
'{:04X}'.format(ord(char)), sanitize_char(char),
env)
)
bytebuffer = b''
byte = inbytes.read(1)
print_line(current_line, env)
| mit | 131,718,491,574,732,180 | 27.630769 | 79 | 0.487372 | false | 3.670611 | false | false | false |
burjorjee/evolve-parities | evolveparities.py | 1 | 5098 | from contextlib import closing
from matplotlib.pyplot import plot, figure, hold, axis, ylabel, xlabel, savefig, title
from numpy import sort, logical_xor, transpose, logical_not
from numpy.numarray.functions import cumsum, zeros
from numpy.random import rand, shuffle
from numpy import mod, floor
import time
import cloud
from durus.file_storage import FileStorage
from durus.connection import Connection
def bitFreqVisualizer(effectiveAttrIndices, bitFreqs, gen):
f = figure(1)
n = len(bitFreqs)
hold(False)
plot(range(n), bitFreqs,'b.', markersize=10)
hold(True)
plot(effectiveAttrIndices, bitFreqs[effectiveAttrIndices],'r.', markersize=10)
axis([0, n-1, 0, 1])
title("Generation = %s" % (gen,))
ylabel('Frequency of the Bit 1')
xlabel('Locus')
f.canvas.draw()
f.show()
def showExperimentTimeStamps():
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
return conn.get_root().keys()
def neap_uga(m, n, gens, probMutation, effectiveAttrIndices, probMisclassification, bitFreqVisualizer=None):
""" neap = "noisy effective attribute parity"
"""
pop = rand(m,n)<0.5
bitFreqHist= zeros((n,gens+1))
for t in range(gens+1):
print "Generation %s" % t
bitFreqs = pop.astype('float').sum(axis=0)/m
bitFreqHist[:,t] = transpose(bitFreqs)
if bitFreqVisualizer:
bitFreqVisualizer(bitFreqs,t)
fitnessVals = mod(pop[:, effectiveAttrIndices].astype('byte').sum(axis=1) +
(rand(m) < probMisclassification).astype('byte'),2)
totalFitness = sum (fitnessVals)
cumNormFitnessVals = cumsum(fitnessVals).astype('float')/totalFitness
parentIndices = zeros(2*m, dtype='int16')
markers = sort(rand(2*m))
ctr = 0
for idx in xrange(2*m):
while markers[idx]>cumNormFitnessVals[ctr]:
ctr += 1
parentIndices[idx] = ctr
shuffle(parentIndices)
crossoverMasks = rand(m, n) < 0.5
newPop = zeros((m, n), dtype='bool')
newPop[crossoverMasks] = pop[parentIndices[:m], :][crossoverMasks]
newPop[logical_not(crossoverMasks)] = pop[parentIndices[m:], :][logical_not(crossoverMasks)]
mutationMasks = rand(m, n)<probMutation
pop = logical_xor(newPop,mutationMasks)
return bitFreqHist[0, :], bitFreqHist[-1, :]
def f(gens):
k = 7
n= k + 1
effectiveAttrIndices = range(k)
probMutation = 0.004
probMisclassification = 0.20
popSize = 1500
jid = cloud.call(neap_uga, **dict(m=popSize,
n=n,
gens=gens,
probMutation=probMutation,
effectiveAttrIndices=effectiveAttrIndices,
probMisclassification=probMisclassification))
print "Kicked off trial %s" % jid
return jid
def cloud_result(jid):
result = cloud.result(jid)
print "Retrieved results for trial %s" % jid
return result
def run_trials():
numTrials = 3000
gens = 1000
from multiprocessing.pool import ThreadPool as Pool
pool = Pool(50)
jids = pool.map(f,[gens]*numTrials)
print "Done spawning trials. Retrieving results..."
results = pool.map(cloud_result, jids)
firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
print "Done retrieving results. Press Enter to serialize..."
raw_input()
for i, result in enumerate(results):
firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists)
conn.commit()
pool.close()
pool.join()
def render_results(timestamp=None):
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
db = conn.get_root()
if not timestamp:
timestamp = sorted(db.keys())[-1]
firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp]
print "Done deserializing results. Plotting..."
x = [(2, 'First', firstLocusFreqsHists, "effective"),
(3, 'Last', lastLocusFreqsHists, "non-effective")]
for i, pos, freqsHists, filename in x :
freqsHists = freqsHists[:,:801]
f = figure(i)
hold(False)
plot(transpose(freqsHists), color='grey')
hold(True)
maxGens = freqsHists.shape[1]-1
plot([0, maxGens], [.05,.05], 'k--')
plot([0, maxGens], [.95,.95], 'k--')
axis([0, maxGens, 0, 1])
xlabel('Generation')
ylabel('1-Frequency of the '+pos+' Locus')
f.canvas.draw()
f.show()
savefig(filename+'.png', format='png', dpi=200)
if __name__ == "__main__":
cloud.start_simulator()
run_trials()
render_results()
print "Done plotting results. Press Enter to end..."
raw_input()
| gpl-3.0 | -5,851,822,647,906,978,000 | 32.539474 | 108 | 0.620832 | false | 3.451591 | false | false | false |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/funding_v30.py | 1 | 16706 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.amount_v30 import AmountV30 # noqa: F401,E501
from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501
from orcid_api_v3.models.funding_contributors_v30 import FundingContributorsV30 # noqa: F401,E501
from orcid_api_v3.models.funding_title_v30 import FundingTitleV30 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.organization_defined_funding_sub_type_v30 import OrganizationDefinedFundingSubTypeV30 # noqa: F401,E501
from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501
class FundingV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30',
'last_modified_date': 'LastModifiedDateV30',
'source': 'SourceV30',
'put_code': 'int',
'path': 'str',
'type': 'str',
'organization_defined_type': 'OrganizationDefinedFundingSubTypeV30',
'title': 'FundingTitleV30',
'short_description': 'str',
'amount': 'AmountV30',
'url': 'UrlV30',
'start_date': 'FuzzyDateV30',
'end_date': 'FuzzyDateV30',
'external_ids': 'ExternalIDsV30',
'contributors': 'FundingContributorsV30',
'organization': 'OrganizationV30',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'type': 'type',
'organization_defined_type': 'organization-defined-type',
'title': 'title',
'short_description': 'short-description',
'amount': 'amount',
'url': 'url',
'start_date': 'start-date',
'end_date': 'end-date',
'external_ids': 'external-ids',
'contributors': 'contributors',
'organization': 'organization',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, type=None, organization_defined_type=None, title=None, short_description=None, amount=None, url=None, start_date=None, end_date=None, external_ids=None, contributors=None, organization=None, visibility=None): # noqa: E501
"""FundingV30 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._type = None
self._organization_defined_type = None
self._title = None
self._short_description = None
self._amount = None
self._url = None
self._start_date = None
self._end_date = None
self._external_ids = None
self._contributors = None
self._organization = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
if type is not None:
self.type = type
if organization_defined_type is not None:
self.organization_defined_type = organization_defined_type
if title is not None:
self.title = title
if short_description is not None:
self.short_description = short_description
if amount is not None:
self.amount = amount
if url is not None:
self.url = url
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if external_ids is not None:
self.external_ids = external_ids
if contributors is not None:
self.contributors = contributors
if organization is not None:
self.organization = organization
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this FundingV30. # noqa: E501
:return: The created_date of this FundingV30. # noqa: E501
:rtype: CreatedDateV30
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this FundingV30.
:param created_date: The created_date of this FundingV30. # noqa: E501
:type: CreatedDateV30
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this FundingV30. # noqa: E501
:return: The last_modified_date of this FundingV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this FundingV30.
:param last_modified_date: The last_modified_date of this FundingV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this FundingV30. # noqa: E501
:return: The source of this FundingV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this FundingV30.
:param source: The source of this FundingV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this FundingV30. # noqa: E501
:return: The put_code of this FundingV30. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this FundingV30.
:param put_code: The put_code of this FundingV30. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this FundingV30. # noqa: E501
:return: The path of this FundingV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this FundingV30.
:param path: The path of this FundingV30. # noqa: E501
:type: str
"""
self._path = path
@property
def type(self):
"""Gets the type of this FundingV30. # noqa: E501
:return: The type of this FundingV30. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this FundingV30.
:param type: The type of this FundingV30. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["GRANT", "CONTRACT", "AWARD", "SALARY_AWARD", "grant", "contract", "award",
"salary-award"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def organization_defined_type(self):
"""Gets the organization_defined_type of this FundingV30. # noqa: E501
:return: The organization_defined_type of this FundingV30. # noqa: E501
:rtype: OrganizationDefinedFundingSubTypeV30
"""
return self._organization_defined_type
@organization_defined_type.setter
def organization_defined_type(self, organization_defined_type):
"""Sets the organization_defined_type of this FundingV30.
:param organization_defined_type: The organization_defined_type of this FundingV30. # noqa: E501
:type: OrganizationDefinedFundingSubTypeV30
"""
self._organization_defined_type = organization_defined_type
@property
def title(self):
"""Gets the title of this FundingV30. # noqa: E501
:return: The title of this FundingV30. # noqa: E501
:rtype: FundingTitleV30
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this FundingV30.
:param title: The title of this FundingV30. # noqa: E501
:type: FundingTitleV30
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def short_description(self):
"""Gets the short_description of this FundingV30. # noqa: E501
:return: The short_description of this FundingV30. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this FundingV30.
:param short_description: The short_description of this FundingV30. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def amount(self):
"""Gets the amount of this FundingV30. # noqa: E501
:return: The amount of this FundingV30. # noqa: E501
:rtype: AmountV30
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this FundingV30.
:param amount: The amount of this FundingV30. # noqa: E501
:type: AmountV30
"""
self._amount = amount
@property
def url(self):
"""Gets the url of this FundingV30. # noqa: E501
:return: The url of this FundingV30. # noqa: E501
:rtype: UrlV30
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this FundingV30.
:param url: The url of this FundingV30. # noqa: E501
:type: UrlV30
"""
self._url = url
@property
def start_date(self):
"""Gets the start_date of this FundingV30. # noqa: E501
:return: The start_date of this FundingV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this FundingV30.
:param start_date: The start_date of this FundingV30. # noqa: E501
:type: FuzzyDateV30
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this FundingV30. # noqa: E501
:return: The end_date of this FundingV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this FundingV30.
:param end_date: The end_date of this FundingV30. # noqa: E501
:type: FuzzyDateV30
"""
self._end_date = end_date
@property
def external_ids(self):
"""Gets the external_ids of this FundingV30. # noqa: E501
:return: The external_ids of this FundingV30. # noqa: E501
:rtype: ExternalIDsV30
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this FundingV30.
:param external_ids: The external_ids of this FundingV30. # noqa: E501
:type: ExternalIDsV30
"""
self._external_ids = external_ids
@property
def contributors(self):
"""Gets the contributors of this FundingV30. # noqa: E501
:return: The contributors of this FundingV30. # noqa: E501
:rtype: FundingContributorsV30
"""
return self._contributors
@contributors.setter
def contributors(self, contributors):
"""Sets the contributors of this FundingV30.
:param contributors: The contributors of this FundingV30. # noqa: E501
:type: FundingContributorsV30
"""
self._contributors = contributors
@property
def organization(self):
"""Gets the organization of this FundingV30. # noqa: E501
:return: The organization of this FundingV30. # noqa: E501
:rtype: OrganizationV30
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this FundingV30.
:param organization: The organization of this FundingV30. # noqa: E501
:type: OrganizationV30
"""
if organization is None:
raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501
self._organization = organization
@property
def visibility(self):
"""Gets the visibility of this FundingV30. # noqa: E501
:return: The visibility of this FundingV30. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this FundingV30.
:param visibility: The visibility of this FundingV30. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE", "public", "private",
"limited", "registered-only"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FundingV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FundingV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 2,249,127,809,126,188,500 | 28.939068 | 328 | 0.590028 | false | 3.777929 | false | false | false |
jelly/calibre | src/calibre/utils/resources.py | 1 | 3853 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import __builtin__, sys, os
from calibre import config_dir
class PathResolver(object):
def __init__(self):
self.locations = [sys.resources_location]
self.cache = {}
def suitable(path):
try:
return os.path.exists(path) and os.path.isdir(path) and \
os.listdir(path)
except:
pass
return False
self.default_path = sys.resources_location
dev_path = os.environ.get('CALIBRE_DEVELOP_FROM', None)
self.using_develop_from = False
if dev_path is not None:
dev_path = os.path.join(os.path.abspath(
os.path.dirname(dev_path)), 'resources')
if suitable(dev_path):
self.locations.insert(0, dev_path)
self.default_path = dev_path
self.using_develop_from = True
user_path = os.path.join(config_dir, 'resources')
self.user_path = None
if suitable(user_path):
self.locations.insert(0, user_path)
self.user_path = user_path
def __call__(self, path, allow_user_override=True):
path = path.replace(os.sep, '/')
key = (path, allow_user_override)
ans = self.cache.get(key, None)
if ans is None:
for base in self.locations:
if not allow_user_override and base == self.user_path:
continue
fpath = os.path.join(base, *path.split('/'))
if os.path.exists(fpath):
ans = fpath
break
if ans is None:
ans = os.path.join(self.default_path, *path.split('/'))
self.cache[key] = ans
return ans
_resolver = PathResolver()
def get_path(path, data=False, allow_user_override=True):
fpath = _resolver(path, allow_user_override=allow_user_override)
if data:
with open(fpath, 'rb') as f:
return f.read()
return fpath
def get_image_path(path, data=False, allow_user_override=True):
if not path:
return get_path('images', allow_user_override=allow_user_override)
return get_path('images/'+path, data=data, allow_user_override=allow_user_override)
def js_name_to_path(name, ext='.coffee'):
path = (u'/'.join(name.split('.'))) + ext
d = os.path.dirname
base = d(d(os.path.abspath(__file__)))
return os.path.join(base, path)
def _compile_coffeescript(name):
from calibre.utils.serve_coffee import compile_coffeescript
src = js_name_to_path(name)
with open(src, 'rb') as f:
cs, errors = compile_coffeescript(f.read(), src)
if errors:
for line in errors:
print (line)
raise Exception('Failed to compile coffeescript'
': %s'%src)
return cs
def compiled_coffeescript(name, dynamic=False):
import zipfile
zipf = get_path('compiled_coffeescript.zip', allow_user_override=False)
with zipfile.ZipFile(zipf, 'r') as zf:
if dynamic:
import json
existing_hash = json.loads(zf.comment or '{}').get(name + '.js')
if existing_hash is not None:
import hashlib
with open(js_name_to_path(name), 'rb') as f:
if existing_hash == hashlib.sha1(f.read()).hexdigest():
return zf.read(name + '.js')
return _compile_coffeescript(name)
else:
return zf.read(name+'.js')
__builtin__.__dict__['P'] = get_path
__builtin__.__dict__['I'] = get_image_path
| gpl-3.0 | 5,682,038,873,633,076,000 | 30.842975 | 87 | 0.562938 | false | 3.659069 | false | false | false |
thaines/rfam | bin/prman_AlfParser.py | 1 | 9166 | import pyparsing as pp
import re
import copy
class prman_AlfParser:
def __init__(self):
self.keywords = ['Job', 'Task', 'RemoteCmd']
def parseFile(self, fileText):
commands = self.__parseCommandStructure(fileText, 0, isStart = True)
#print(commands)
textureCmds, Cmds, frames = self.extractCommandHierarchy(commands)
return [textureCmds, Cmds, frames]
def printCommands(self, cmds, currentIndent = 0):
if isinstance(cmds, list):
for e in cmds:
self.printCommands(e, currentIndent + 1)
print('---------------------')
else:
tabs = ''
for i in range(currentIndent):
tabs += '\t'
print(tabs + repr(cmds))
def __matchBracket(self, str):
if str[0] != '{':
return None
num_open = 0
for i, c in enumerate(str):
if c == '{':
num_open += 1
elif c == '}':
num_open -= 1
if num_open == 0:
return str[1:i]
return None
def leadingSpace(self, text):
return len(text) - len(text.lstrip())
def removingLeadingNewLines(self, text):
return text.lstrip('\n')
def determineCommandLength(self, text):
if text[0] == '\n':
raise ValueError('Determine command length should never take newline as first char!')
text = copy.deepcopy(text)
lines = text.split('\n')
lengths = [len(l) for l in lines]
currentIndent = self.leadingSpace(lines[0])
extent = len(lines[0])
for i, l in enumerate(lines[1:]):
if self.leadingSpace(l) != currentIndent:
extent += lengths[i + 1] + 1
else:
extent += lengths[i + 1] + 1
return extent
return extent
def extractAllArgs(self, text):
currentIndent = 0
parsingBracket = False
parsingSimple = False
args = []
argNames = []
resultText = ''
currentBracketText = ''
i = 0
while i < len(text):
if parsingBracket:
#process indents
if text[i] == '}':
currentIndent -= 1
currentBracketText += text[i]
if currentIndent == 0:
args.append(currentBracketText[1:-1])
currentBracketText = ''
parsingBracket = False
currentIndent = 0
elif text[i] == '{':
currentBracketText += text[i]
currentIndent += 1
else:
currentBracketText += text[i]
elif parsingSimple:
if text[i] == ' ':
args.append(currentBracketText )
currentBracketText = ''
parsingSimple = False
else:
currentBracketText += text[i]
else:
if text[i] == '-':
counter = 1
argName = ''
while True:
if text[i + counter] == ' ':
argNames.append(argName)
if text[i + counter + 1] == '{':
currentIndent = 0
parsingBracket = True
i = i + counter
else:
parsingSimple = True
i = i + counter
break
else:
argName += text[i + counter]
counter += 1
i += 1
return argNames, args, resultText
def parseOptions(self, text):
optsNames, opts, textWithoutOpts = self.extractAllArgs(text)
result = {}
for i in range(len(optsNames)):
result[optsNames[i]] = opts[i]
return result
def parseJob(self, text):
newJob = self.parseOptions(text)
newJob['type'] = 'job'
return newJob
def parseRemoteCmd(self, text):
#grab the actual command
i = len(text) - 1
actualCommand = ''
while i > 0:
if text[i] == '}':
break
else:
i -= 1
while i > 0:
if text[i] == '{':
actualCommand = text[i] + actualCommand
break
else:
actualCommand = text[i] + actualCommand
i -=1
newCmd = self.parseOptions(text[:i])
newCmd['type'] = 'remoteCommand'
newCmd['command'] = actualCommand[1:-1]
return newCmd
def parseTask(self, text):
#parse Task Name
taskName = ''
start = text.find('{') + 1
for i in range(start, len(text)):
if text[i] == '}':
break
else:
taskName += text[i]
text = text[i+1:]
newTask = self.parseOptions(text)
newTask['type'] = 'task'
newTask['taskName'] = taskName
return newTask
def __parseCommandStructure(self, text, indentLevel, isStart = False):
structure = []
text = copy.deepcopy(text)
if isStart:
text = text[17:]
starts = [text.find(k) for k in self.keywords]
for i in range(len(starts)):
if starts[i] < 0:
starts[i] = 111111111111111111
lowestStartIdx = starts.index(min(starts))
#move back until new line
startIdx = starts[lowestStartIdx]
if startIdx == 111111111111111111:
return None
while startIdx > 0:
if text[startIdx - 1] == '\t':
startIdx -= 1
else:
break
if lowestStartIdx == 0: #Job
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseJob(text[startIdx+3:startIdx+length])
elif lowestStartIdx == 1: #Task
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseTask(text[startIdx+4:startIdx+length])
elif lowestStartIdx == 2: #RemoteCmd
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseRemoteCmd(text[startIdx+9:startIdx+length])
try: #why does hasattr not work here?
#print('Attempting to parse subtasks')
newItem['subtasks'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['subtasks']), indentLevel+1)
except:
pass
try:
newItem['cmds'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['cmds']), indentLevel+1)
except:
pass
structure.append(newItem)
nextCommands = self.__parseCommandStructure(text[startIdx+length:], indentLevel)
if nextCommands:
for c in nextCommands:
structure.append(c)
return structure
def extractCommandsForFrame(self, task):
frames = []
cmds = {}
for t in task['subtasks']:
subcmds = []
#extract frame index
frameLinearIdx = int(t['taskName'].replace('Frame', ''))
frames.append(frameLinearIdx)
for t_sub in t['subtasks']:
try:
for c in t_sub['cmds']:
subcmds.append(c)
except:
pass
if subcmds:
cmds[str(frameLinearIdx)] = subcmds
return cmds, frames
def extractCommandsForTexture(self, task):
cmds = []
for t in task['subtasks']:
try:
for c in t['cmds']:
cmds.append(c)
except:
pass
return cmds
def extractCommandHierarchy(self, jobs):
textureCommands = []
commands = {}
for j in jobs:
for t in j['subtasks']:
#get all texture conversion tasks
if t['taskName'] == 'Job Textures':
try:
newCommands = self.extractCommandsForTexture(t)
#textureCommands.append(newCommands)
for c in newCommands:
textureCommands.append(c)
except:
pass
#get commands for all frames
else:
newCommands, frames = self.extractCommandsForFrame(t)
commands.update(newCommands)
return textureCommands, commands, frames
def main():
with open('data/blue/shots/spool.alf', 'r') as myfile:
data = myfile.read()
parser = prman_AlfParser()
textureCmds, Cmds, frames = parser.parseFile(data)
print('Frames: ', frames)
if __name__ == "__main__":
main()
| gpl-3.0 | -8,844,686,984,876,143,000 | 32.452555 | 128 | 0.47818 | false | 4.636318 | false | false | false |
rainysia/dotfiles | doc/python/test/selenium_localchromeff_remoteIE.py | 1 | 1961 | #!/usr/bin/env python
# coding=utf-8
#chrome localhost
'''
import os
from selenium import webdriver
chromedriver = "/home/softs/selenium/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
driver.get("http://baidu.com")
driver.quit()
'''
#firefox(iceweasel) localhost
'''
import os
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://www.baidu.com')
browser.save_screenshot('screen.png')
browser.quit()
'''
#remote chrome
#remote IE
import os
# For Chinese
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
ie_desired_cap = {'os': 'Windows', 'os_version': '2008', 'browser': 'IE', 'browser_version': '9.0', 'resolution' : '1024x768'}
tommy_remote_url = 'http://192.168.85.123:4444/wd/hub'
derek_remote_url = 'http://192.168.87.72:18181/wd/hub'
# command_executor = 'http://USERNAME:[email protected]:80/wd/hub'
driver = webdriver.Remote(
command_executor=derek_remote_url,
desired_capabilities=ie_desired_cap)
#google, name=q
driver.get("http://www.baidu.com")
eg_title = "百度" #有中文,需要import sys reload(sys) sys.setdefaultencoding('utf-8')
print driver.title
#print help(driver)
try:
if not eg_title in driver.title:
raise Exception("Unable to load ",eg_title," page!")
elem = driver.find_element_by_name("wd")
elem.send_keys("domain")
elem.submit()
#two ways to wait, explict & implicit
#WebDriverWait.until(condition-that-finds-the-element) #explict
#driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS) #implicit
print driver.title
sleep(10)
print '12345\n'
except Exception, e:
raise e
finally:
#driver.implicitly_wait(10)
#driver.set_script_timeout(10)
driver.quit()
| mit | -1,273,232,541,183,339,300 | 25.310811 | 126 | 0.717514 | false | 3.140323 | false | false | false |
CanalTP/navitia | source/jormungandr/jormungandr/scenarios/tests/journey_compare_tests.py | 1 | 43791 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from copy import deepcopy
from jormungandr.scenarios import journey_filter as jf
from jormungandr.scenarios.utils import DepartureJourneySorter, ArrivalJourneySorter
import navitiacommon.response_pb2 as response_pb2
from jormungandr.scenarios.new_default import sort_journeys
from jormungandr.utils import str_to_time_stamp
import random
import itertools
import functools
def empty_journeys_test():
response = response_pb2.Response()
sort_journeys(response, 'arrival_time', True)
assert not response.journeys
def different_arrival_times_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
def different_departure_times_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.departure_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.departure_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
sort_journeys(response, 'departure_time', True)
assert response.journeys[0].departure_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].departure_date_time == str_to_time_stamp("20140422T0800")
def different_duration_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 3 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 3 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 3 * 60
assert response.journeys[1].duration == 5 * 60
def different_nb_transfers_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 25 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25 * 60
assert response.journeys[1].duration == 25 * 60
assert response.journeys[0].nb_transfers == 0
assert response.journeys[1].nb_transfers == 1
def different_duration_non_pt_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey1.sections[4].type = response_pb2.STREET_NETWORK
journey1.sections[4].duration = 10 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 1
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 5 * 60
journey2.sections[1].type = response_pb2.TRANSFER
journey2.sections[1].duration = 3 * 60
journey2.sections[2].type = response_pb2.WAITING
journey2.sections[2].duration = 2 * 60
journey2.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[3].duration = 15 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25 * 60
assert response.journeys[1].duration == 25 * 60
assert response.journeys[0].nb_transfers == 1
assert response.journeys[1].nb_transfers == 1
# We want to have journey2 in first, this is the one with 4 sections
assert len(response.journeys[0].sections) == 4
assert len(response.journeys[1].sections) == 5
def create_dummy_journey():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp("20140422T0800")
journey.duration = 25 * 60
journey.nb_transfers = 1
s = journey.sections.add()
s.type = response_pb2.PUBLIC_TRANSPORT
s.origin.uri = "stop_point_1"
s.destination.uri = "stop_point_2"
s.vehicle_journey.uri = "vj_toto"
s.duration = 5 * 60
s = journey.sections.add()
s.type = response_pb2.TRANSFER
s.duration = 3 * 60
s = journey.sections.add()
s.type = response_pb2.WAITING
s.duration = 2 * 60
s = journey.sections.add()
s.type = response_pb2.PUBLIC_TRANSPORT
s.origin.uri = "stop_point_3"
s.destination.uri = "stop_point_4"
s.duration = 15 * 60
s = journey.sections.add()
s.type = response_pb2.STREET_NETWORK
s.duration = 10 * 60
return journey
def journey_pairs_gen(list_responses):
return itertools.combinations(jf.get_qualified_journeys(list_responses), 2)
def test_get_qualified_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.tags.append("a_tag")
journey2 = responses[0].journeys.add()
journey2.tags.append("to_delete")
journey3 = responses[0].journeys.add()
journey3.tags.append("another_tag")
journey3.tags.append("to_delete")
for qualified in jf.get_qualified_journeys(responses):
assert qualified.tags[0] == 'a_tag'
def test_num_qualifed_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.tags.append("a_tag")
journey2 = responses[0].journeys.add()
journey2.tags.append("to_delete")
journey3 = responses[0].journeys.add()
journey3.tags.append("another_tag")
assert jf.nb_qualifed_journeys(responses) == 2
def test_similar_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
journey2 = responses[0].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[0].uris.vehicle_journey = 'bob'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test2():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bob'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test3():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bobette'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_different_transfer():
"""
If 2 journeys take the same vjs but with a different number of sections,
one should be filtered
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections.add()
journey1.duration = 42
journey1.sections[-1].uris.vehicle_journey = 'bobette'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bobette'
jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_different_waiting_durations():
"""
If 2 journeys take the same vj, same number of sections but with different waiting durations,
filter one with smaller waiting duration
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 600
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 600
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 25
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 175
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {})
assert 'to_delete' not in journey2.tags
assert 'to_delete' in journey1.tags
def test_similar_journeys_multi_trasfer_and_different_waiting_durations():
"""
If 2 journeys take the same vj, same number of sections and several waitings with different waiting durations,
for each journey find "min waiting duration"
keep the journey which has larger "min waiting duration"
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 1000
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 10
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 190
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'boby'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 1000
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 20
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 180
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 100
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 100
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'boby'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_with_and_without_waiting_section():
"""
If 2 journeys take the same vj, one with a waiting section and another without,
filtere one with transfer but without waiting section
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 600
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 600
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_walking_bike():
"""
If we have 2 direct path, one walking and one by bike, we should
not filter any journey
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 42
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 42
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Bike
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' not in journey2.tags
def test_similar_journeys_car_park():
"""
We have to consider a journey with
CAR / PARK / WALK to be equal to CAR / PARK
"""
responses = [response_pb2.Response()]
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Car
journey1.sections.add()
journey1.sections[-1].type = response_pb2.PARK
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Car
journey2.sections.add()
journey2.sections[-1].type = response_pb2.PARK
assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_similar_journeys_bss_park():
"""
We have to consider a journey with
WALK / GET A BIKE / BSS to be equals to GET A BIKE / BSS
"""
responses = [response_pb2.Response()]
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey1.sections.add()
journey1.sections[-1].type = response_pb2.BSS_RENT
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Bss
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.BSS_RENT
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Bss
assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_similar_journeys_crowfly_rs():
"""
We have to consider a journey with
CROWFLY WALK to be different than CROWFLY Ridesharing
"""
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.CROW_FLY
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.CROW_FLY
journey2.sections[-1].street_network.mode = response_pb2.Ridesharing
assert not jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_departure_sort():
"""
we want to sort by departure hour, then by duration
"""
j1 = response_pb2.Journey()
j1.departure_date_time = str_to_time_stamp('20151005T071000')
j1.arrival_date_time = str_to_time_stamp('20151005T081900')
j1.duration = j1.arrival_date_time - j1.departure_date_time
j1.nb_transfers = 0
j2 = response_pb2.Journey()
j2.departure_date_time = str_to_time_stamp('20151005T072200')
j2.arrival_date_time = str_to_time_stamp('20151005T083500')
j2.duration = j2.arrival_date_time - j2.departure_date_time
j2.nb_transfers = 0
j3 = response_pb2.Journey()
j3.departure_date_time = str_to_time_stamp('20151005T074500')
j3.arrival_date_time = str_to_time_stamp('20151005T091200')
j3.duration = j3.arrival_date_time - j3.departure_date_time
j3.nb_transfers = 0
j4 = response_pb2.Journey()
j4.departure_date_time = str_to_time_stamp('20151005T074500')
j4.arrival_date_time = str_to_time_stamp('20151005T091100')
j4.duration = j4.arrival_date_time - j4.departure_date_time
j4.nb_transfers = 0
j5 = response_pb2.Journey()
j5.departure_date_time = str_to_time_stamp('20151005T074500')
j5.arrival_date_time = str_to_time_stamp('20151005T090800')
j5.duration = j5.arrival_date_time - j5.departure_date_time
j5.nb_transfers = 0
result = [j1, j2, j3, j4, j5]
random.shuffle(result)
comparator = DepartureJourneySorter(True)
result.sort(key=functools.cmp_to_key(comparator))
assert result[0] == j1
assert result[1] == j2
assert result[2] == j5
assert result[3] == j4
assert result[4] == j3
def test_arrival_sort():
"""
we want to sort by arrival hour, then by duration
"""
j1 = response_pb2.Journey()
j1.departure_date_time = str_to_time_stamp('20151005T071000')
j1.arrival_date_time = str_to_time_stamp('20151005T081900')
j1.duration = j1.arrival_date_time - j1.departure_date_time
j1.nb_transfers = 0
j2 = response_pb2.Journey()
j2.departure_date_time = str_to_time_stamp('20151005T072200')
j2.arrival_date_time = str_to_time_stamp('20151005T083500')
j2.duration = j2.arrival_date_time - j2.departure_date_time
j2.nb_transfers = 0
j3 = response_pb2.Journey()
j3.departure_date_time = str_to_time_stamp('20151005T074500')
j3.arrival_date_time = str_to_time_stamp('20151005T091200')
j3.duration = j3.arrival_date_time - j3.departure_date_time
j3.nb_transfers = 0
j4 = response_pb2.Journey()
j4.departure_date_time = str_to_time_stamp('20151005T075000')
j4.arrival_date_time = str_to_time_stamp('20151005T091200')
j4.duration = j4.arrival_date_time - j4.departure_date_time
j4.nb_transfers = 0
j5 = response_pb2.Journey()
j5.departure_date_time = str_to_time_stamp('20151005T075500')
j5.arrival_date_time = str_to_time_stamp('20151005T091200')
j5.duration = j5.arrival_date_time - j5.departure_date_time
j5.nb_transfers = 0
result = [j1, j2, j3, j4, j5]
random.shuffle(result)
comparator = ArrivalJourneySorter(True)
result.sort(key=functools.cmp_to_key(comparator))
assert result[0] == j1
assert result[1] == j2
assert result[2] == j5
assert result[3] == j4
assert result[4] == j3
def test_heavy_journey_walking():
"""
we don't filter any journey with walking
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
def test_heavy_journey_bike():
"""
the first time the duration of the biking section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.durations.bike = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
journey.durations.bike = journey.sections[-1].duration = 5
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_filter_wrapper():
"""
Testing that filter_wrapper is fine (see filter_wrapper doc)
"""
class LoveHateFilter(jf.SingleJourneyFilter):
message = 'i_dont_like_you'
def __init__(self, love=True):
self.love = love
def filter_func(self, journey):
return self.love
ref_journey = response_pb2.Journey()
# first we test when debug-mode deactivated (each time both OK-filter and KO-filter)
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=False))
assert not wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
# test using without debug mode (should be deactivated)
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=False))
assert not wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
# test when debug-mode is activated
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=False))
assert wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' in j.tags
def test_heavy_journey_car():
"""
the first time the duration of the car section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.durations.car = journey.sections[-1].duration = 25
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
journey.durations.car = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_heavy_journey_taxi():
"""
the first time the duration of the taxi section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.durations.taxi = journey.sections[-1].duration = 25
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20)
assert f.filter_func(journey)
journey.durations.taxi = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_heavy_journey_bss():
"""
we should not remove any bss journey since it is already in concurrence with the walking
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.BSS_RENT
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.BSS_PUT_BACK
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
journey.durations.bike = 5
journey.durations.walking = 10
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
def test_activate_deactivate_min_bike():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : Bike
B->C : public transport
C->D : Bike
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 7
journey.durations.bike = 12
f = jf.FilterTooShortHeavyJourneys(min_bike=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike'])
assert f.filter_func(journey)
# case 6: request with bike only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.bike = 29
f = jf.FilterTooShortHeavyJourneys(min_bike=17, orig_modes=['bike'], dest_modes=['bike'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.bike = 12
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'], dest_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_activate_deactivate_min_car():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : car
B->C : public transport
C->D : car
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.sections[-1].duration = 7
journey.durations.car = 12
f = jf.FilterTooShortHeavyJourneys(min_car=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car'])
assert f.filter_func(journey)
# case 6: request with car only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.car = 29
f = jf.FilterTooShortHeavyJourneys(min_car=17, orig_modes=['car'], dest_modes=['car'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.car = 12
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'], dest_modes=['car', 'walking'])
assert not f.filter_func(journey)
def test_activate_deactivate_min_taxi():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : taxi
B->C : public transport
C->D : taxi
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.sections[-1].duration = 7
journey.durations.taxi = 12
f = jf.FilterTooShortHeavyJourneys(min_taxi=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi'])
assert f.filter_func(journey)
# case 6: request with taxi only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.taxi = 29
f = jf.FilterTooShortHeavyJourneys(min_taxi=17, orig_modes=['taxi'], dest_modes=['taxi'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.taxi = 12
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'], dest_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
def test_filter_direct_path_mode_car():
# is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["bike"])
assert not f.filter_func(journey)
# is_dp and is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["car"])
assert f.filter_func(journey)
# is_dp and is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["taxi", "surf", "car", "bike"])
assert f.filter_func(journey)
# not is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
f = jf.FilterDirectPathMode(["bike"])
assert f.filter_func(journey)
# not is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
f = jf.FilterDirectPathMode(["car"])
assert f.filter_func(journey)
def test_heavy_journey_ridesharing():
"""
the first time the duration of the ridesharing section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Ridesharing
journey.durations.ridesharing = journey.sections[-1].duration = 25
# Ridesharing duration is superior to min_ridesharing value so we have ridesharing section
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking'])
assert f.filter_func(journey)
# Ridesharing duration is inferior to min_ridesharing value but there is no walking option
# In this case we have ridesharing section
journey.durations.ridesharing = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing'])
assert f.filter_func(journey)
# Ridesharing duration is inferior to min_ridesharing value and there is also walking option
# In this case we have reject ridesharing section
journey.durations.ridesharing = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking'])
assert not f.filter_func(journey)
| agpl-3.0 | -7,305,518,788,665,118,000 | 34.982744 | 115 | 0.689343 | false | 2.944329 | true | false | false |
ami/lob-python | lob/api_requestor.py | 1 | 2714 | import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else:
#pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
| mit | 2,569,158,319,488,001,500 | 34.710526 | 122 | 0.542373 | false | 4.062874 | false | false | false |
castedo/celauth | celauth/providers.py | 1 | 4151 |
import urlparse
from openid.consumer import consumer
from openid.extensions import sreg, ax
from celauth import OpenIDCase
from celauth.dj.celauth.openid_store import DjangoOpenIDStore
class OpenIDChoices(object):
def __init__(self, data):
self.data = data
def ids(self, id_prefix=''):
return [id_prefix + x[0] for x in self.data]
def texts(self):
return [x[1] for x in self.data]
def urls_by_id(self, id_prefix=''):
return dict( (id_prefix + x[0], x[2]) for x in self.data )
OPENID_PROVIDERS = OpenIDChoices([
('google', 'Google', 'https://www.google.com/accounts/o8/id'),
('yahoo', 'Yahoo!', 'https://me.yahoo.com/'),
('aol', 'AOL', 'https://openid.aol.com/'),
('stackexchange', 'StackExchange', 'https://openid.stackexchange.com/'),
('launchpad', 'Launchpad', 'https://login.launchpad.net/'),
('intuit', 'Intuit', 'https://openid.intuit.com/openid/xrds'),
])
class TestOpenIDHelper:
def __init__(self, real):
self.case = None
self.real = real
def initial_response(self, request, user_url, return_url):
urlp = urlparse.urlparse(user_url)
if urlp.netloc not in ('example.com', 'example.org', 'example.net'):
return self.real.initial_response(request, user_url, return_url)
if urlp.fragment:
email = urlp.fragment + '@' + urlp.netloc
urlp = list(urlp)
urlp[5] = '' # remove fragment
user_url = urlparse.ParseResult(*urlp).geturl()
else:
email = None
self.case = OpenIDCase(user_url, user_url, email)
return return_url
def make_case(self, request):
if not self.case:
return self.real.make_case(request)
ret = self.case
self.case = None
return ret
EMAIL_AX_TYPE_URI = 'http://axschema.org/contact/email'
class LiveOpenIDHelper:
def _openid_consumer(self, request):
openid_store = DjangoOpenIDStore()
return consumer.Consumer(request.session, openid_store)
def initial_response(self, request, user_url, return_url):
oc = self._openid_consumer(request)
openid_request = oc.begin(user_url)
if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri):
ax_request = ax.FetchRequest()
ax_request.add(ax.AttrInfo(EMAIL_AX_TYPE_URI,
alias='email',
required=True,
))
openid_request.addExtension(ax_request)
else:
sreg_request = sreg.SRegRequest(required=['email'],
optional=[],
)
openid_request.addExtension(sreg_request)
realm = request.build_absolute_uri('/')
if openid_request.shouldSendRedirect():
return openid_request.redirectURL(realm, return_url)
else:
return openid_request.htmlMarkup(realm, return_url)
def make_case(self, request):
oc = self._openid_consumer(request)
current_url = request.build_absolute_uri()
query_params = dict(request.REQUEST.items())
response = oc.complete(query_params, current_url)
if response.status == consumer.CANCEL:
return "OpenID sign in cancelled"
if response.status == consumer.SUCCESS:
email = None
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_response:
email = sreg_response.get('email', None)
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
email = ax_response.getSingle(EMAIL_AX_TYPE_URI, email)
return OpenIDCase(response.identity_url, response.getDisplayIdentifier(), email)
return response.message or "Internal openid library error" #should throw exception
facade = LiveOpenIDHelper()
def enable_test_openids():
global facade
facade = TestOpenIDHelper(facade)
| mit | 7,795,188,678,148,681,000 | 36.736364 | 92 | 0.589737 | false | 3.960878 | false | false | false |
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/site/notify/SiteActivationNotification.py | 1 | 1716 | ##
##
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 09/10/14 #3623 randerso Manually created, do not regenerate
#
##
class SiteActivationNotification(object):
def __init__(self):
self.type = None
self.status = None
self.primarySite = None
self.modifiedSite = None
self.runMode = None
self.serverName = None
self.pluginName = None
def getType(self):
return self.type
def setType(self, type):
self.type = type
def getStatus(self):
return self.status
def setStatus(self, status):
self.status = status
def getPrimarySite(self):
return self.primarysite
def setPrimarySite(self, primarysite):
self.primarysite = primarysite
def getModifiedSite(self):
return self.modifiedSite
def setModifiedSite(self, modifiedSite):
self.modifiedSite = modifiedSite
def getRunMode(self):
return self.runMode
def setRunMode(self, runMode):
self.runMode = runMode
def getServerName(self):
return self.serverName
def setServerName(self, serverName):
self.serverName = serverName
def getPluginName(self):
return self.pluginName
def setPluginName(self, pluginName):
self.pluginName = pluginName
def __str__(self):
return self.pluginName.upper() + ":" \
+ self.status + ":" \
+ self.type + " " \
+ self.modifiedSite.upper() + " on " \
+ self.serverName + ":" \
+ self.runMode
| bsd-3-clause | 1,471,667,875,984,787,200 | 23.169014 | 85 | 0.556527 | false | 4.13494 | false | false | false |
reybalgs/PyRecipe-4-U | models/recipemodel.py | 1 | 3188 | ###############################################################################
#
# recipemodel.py
#
# Provides the class model for a recipe. The class model is passed around in
# the application proper.
#
###############################################################################
import simplejson as json
class RecipeModel():
def export_recipe(self):
"""
This function exports the current recipe object as a JSON-encoded
recipe (.rcpe) file.
Actually just returns a JSON-encoded string
"""
# Dump the object into a JSON-formatted string
json_recipe = json.dumps({"name":self.name,"course":self.course,
"serving_size":self.servingSize,"ingredients":self.ingredients,
"instructions":self.instructions,"images":self.images},
separators=(',',':'))
# Return the string
return json_recipe
def import_recipe(self, raw_json):
"""
Parses a JSON-encoded .rcpe file and then sets it to itself.
The string containing the [contents] of the JSON file is passed into
this function.
"""
# Put the decoded JSON string into a "raw" recipe object
raw_recipe = json.loads(raw_json)
print raw_recipe # print it for now
self.name = raw_recipe['name']
self.course = raw_recipe['course']
self.servingSize = raw_recipe['serving_size']
self.ingredients = raw_recipe['ingredients']
self.instructions = raw_recipe['instructions']
self.images = raw_recipe['images']
def print_recipe_information(self):
"""
A useful debugging function that prints the entirety of the recipe
"""
# Print basic information
print '\nName: ' + self.name
print 'Course: ' + self.course
print 'Serving Size: ' + str(self.servingSize)
# Print the ingredients
print '\nIngredients:'
if len(self.ingredients) == 0:
print 'No ingredients.'
else:
for ingredient in self.ingredients:
print(ingredient['name'] + str(ingredient['quantity']) +
ingredient['unit'])
# Print the instructions
print '\nInstructions:'
if len(self.instructions) == 0:
print 'No instructions.'
else:
for instruction in self.instructions:
print instruction
# Print the filepaths of the images
print '\nImage paths:'
if len(self.images) == 0:
print 'No images.'
else:
for filePath in self.images:
print filePath
def get_recipe(self, recipe):
"""
Assigns a given recipe to this recipe.
"""
self.name = recipe.name
self.course = recipe.course
self.servingSize = recipe.servingSize
self.ingredients = recipe.ingredients
self.instructions = recipe.instructions
def __init__(self):
self.name = 'noname'
self.course = 'none'
self.servingSize = 0
self.ingredients = []
self.instructions = []
self.images = []
| gpl-3.0 | 5,876,512,158,699,858,000 | 31.865979 | 79 | 0.553011 | false | 4.483826 | false | false | false |
rbuffat/pyidf | tests/test_controllerwatercoil.py | 1 | 2641 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.controllers import ControllerWaterCoil
log = logging.getLogger(__name__)
class TestControllerWaterCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_controllerwatercoil(self):
pyidf.validation_level = ValidationLevel.error
obj = ControllerWaterCoil()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_control_variable = "Temperature"
obj.control_variable = var_control_variable
# alpha
var_action = "Normal"
obj.action = var_action
# alpha
var_actuator_variable = "Flow"
obj.actuator_variable = var_actuator_variable
# node
var_sensor_node_name = "node|Sensor Node Name"
obj.sensor_node_name = var_sensor_node_name
# node
var_actuator_node_name = "node|Actuator Node Name"
obj.actuator_node_name = var_actuator_node_name
# real
var_controller_convergence_tolerance = 7.7
obj.controller_convergence_tolerance = var_controller_convergence_tolerance
# real
var_maximum_actuated_flow = 8.8
obj.maximum_actuated_flow = var_maximum_actuated_flow
# real
var_minimum_actuated_flow = 9.9
obj.minimum_actuated_flow = var_minimum_actuated_flow
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.controllerwatercoils[0].name, var_name)
self.assertEqual(idf2.controllerwatercoils[0].control_variable, var_control_variable)
self.assertEqual(idf2.controllerwatercoils[0].action, var_action)
self.assertEqual(idf2.controllerwatercoils[0].actuator_variable, var_actuator_variable)
self.assertEqual(idf2.controllerwatercoils[0].sensor_node_name, var_sensor_node_name)
self.assertEqual(idf2.controllerwatercoils[0].actuator_node_name, var_actuator_node_name)
self.assertAlmostEqual(idf2.controllerwatercoils[0].controller_convergence_tolerance, var_controller_convergence_tolerance)
self.assertAlmostEqual(idf2.controllerwatercoils[0].maximum_actuated_flow, var_maximum_actuated_flow)
self.assertAlmostEqual(idf2.controllerwatercoils[0].minimum_actuated_flow, var_minimum_actuated_flow) | apache-2.0 | 5,792,204,171,159,146,000 | 36.742857 | 131 | 0.677395 | false | 3.683403 | false | false | false |
macosforge/ccs-calendarserver | txdav/caldav/datastore/scheduling/ischedule/remoteservers.py | 1 | 6936 | ##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from twistedcaldav.config import config, fullServerPath
from twistedcaldav import xmlutil
"""
XML based iSchedule configuration file handling. This is for handling of remote servers. The localservers.py module
handles servers that are local (podded).
"""
__all__ = [
"IScheduleServers",
]
log = Logger()
class IScheduleServers(object):
_fileInfo = None
_xmlFile = None
_servers = None
_domainMap = None
def __init__(self):
if IScheduleServers._servers is None:
self._loadConfig()
def _loadConfig(self):
if config.Scheduling.iSchedule.RemoteServers:
if IScheduleServers._servers is None:
IScheduleServers._xmlFile = FilePath(
fullServerPath(
config.ConfigRoot,
config.Scheduling.iSchedule.RemoteServers,
)
)
if IScheduleServers._xmlFile.exists():
IScheduleServers._xmlFile.restat()
fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize())
if fileInfo != IScheduleServers._fileInfo:
parser = IScheduleServersParser(IScheduleServers._xmlFile)
IScheduleServers._servers = parser.servers
self._mapDomains()
IScheduleServers._fileInfo = fileInfo
else:
IScheduleServers._servers = ()
IScheduleServers._domainMap = {}
else:
IScheduleServers._servers = ()
IScheduleServers._domainMap = {}
def _mapDomains(self):
IScheduleServers._domainMap = {}
for server in IScheduleServers._servers:
for domain in server.domains:
IScheduleServers._domainMap[domain] = server
def mapDomain(self, domain):
"""
Map a calendar user address domain to a suitable server that can
handle server-to-server requests for that user.
"""
return IScheduleServers._domainMap.get(domain)
ELEMENT_SERVERS = "servers"
ELEMENT_SERVER = "server"
ELEMENT_URI = "uri"
ELEMENT_AUTHENTICATION = "authentication"
ATTRIBUTE_TYPE = "type"
ATTRIBUTE_BASICAUTH = "basic"
ELEMENT_USER = "user"
ELEMENT_PASSWORD = "password"
ELEMENT_ALLOW_REQUESTS_FROM = "allow-requests-from"
ELEMENT_ALLOW_REQUESTS_TO = "allow-requests-to"
ELEMENT_DOMAINS = "domains"
ELEMENT_DOMAIN = "domain"
ELEMENT_CLIENT_HOSTS = "hosts"
ELEMENT_HOST = "host"
class IScheduleServersParser(object):
"""
Server-to-server configuration file parser.
"""
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.xmlFile)
def __init__(self, xmlFile):
self.servers = []
# Read in XML
_ignore_etree, servers_node = xmlutil.readXML(xmlFile.path, ELEMENT_SERVERS)
self._parseXML(servers_node)
def _parseXML(self, node):
"""
Parse the XML root node from the server-to-server configuration document.
@param node: the L{Node} to parse.
"""
for child in node:
if child.tag == ELEMENT_SERVER:
self.servers.append(IScheduleServerRecord())
self.servers[-1].parseXML(child)
class IScheduleServerRecord (object):
"""
Contains server-to-server details.
"""
def __init__(self, uri=None, rewriteCUAddresses=True, moreHeaders=[], podding=False):
"""
@param recordType: record type for directory entry.
"""
self.uri = ""
self.authentication = None
self.allow_from = False
self.allow_to = True
self.domains = []
self.client_hosts = []
self.rewriteCUAddresses = rewriteCUAddresses
self.moreHeaders = moreHeaders
self._podding = podding
if uri:
self.uri = uri
self._parseDetails()
def details(self):
return (self.ssl, self.host, self.port, self.path,)
def podding(self):
return self._podding
def redirect(self, location):
"""
Permanent redirect for the lifetime of this record.
"""
self.uri = location
self._parseDetails()
def parseXML(self, node):
for child in node:
if child.tag == ELEMENT_URI:
self.uri = child.text
elif child.tag == ELEMENT_AUTHENTICATION:
self._parseAuthentication(child)
elif child.tag == ELEMENT_ALLOW_REQUESTS_FROM:
self.allow_from = True
elif child.tag == ELEMENT_ALLOW_REQUESTS_TO:
self.allow_to = True
elif child.tag == ELEMENT_DOMAINS:
self._parseList(child, ELEMENT_DOMAIN, self.domains)
elif child.tag == ELEMENT_CLIENT_HOSTS:
self._parseList(child, ELEMENT_HOST, self.client_hosts)
else:
raise RuntimeError("[{}] Unknown attribute: {}".format(self.__class__, child.tag,))
self._parseDetails()
def _parseList(self, node, element_name, appendto):
for child in node:
if child.tag == element_name:
appendto.append(child.text)
def _parseAuthentication(self, node):
if node.get(ATTRIBUTE_TYPE) != ATTRIBUTE_BASICAUTH:
return
for child in node:
if child.tag == ELEMENT_USER:
user = child.text
elif child.tag == ELEMENT_PASSWORD:
password = child.text
self.authentication = ("basic", user, password,)
def _parseDetails(self):
# Extract scheme, host, port and path
if self.uri.startswith("http://"):
self.ssl = False
rest = self.uri[7:]
elif self.uri.startswith("https://"):
self.ssl = True
rest = self.uri[8:]
splits = rest.split("/", 1)
hostport = splits[0].split(":")
self.host = hostport[0]
if len(hostport) > 1:
self.port = int(hostport[1])
else:
self.port = {False: 80, True: 443}[self.ssl]
self.path = "/"
if len(splits) > 1:
self.path += splits[1]
| apache-2.0 | -7,475,544,926,716,643,000 | 30.527273 | 115 | 0.598039 | false | 4.20109 | true | false | false |
spencerlyon2/pygments | pygments/lexers/data.py | 2 | 17895 | # -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
| bsd-2-clause | -6,239,185,721,659,897,000 | 34.157171 | 83 | 0.486784 | false | 4.250594 | false | false | false |
Xeralux/tensorflow | tensorflow/python/keras/_impl/keras/engine/training.py | 1 | 72917 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine import training_arrays
from tensorflow.python.keras._impl.keras.engine import training_eager
from tensorflow.python.keras._impl.keras.engine import training_generator
from tensorflow.python.keras._impl.keras.engine import training_utils
from tensorflow.python.keras._impl.keras.engine.base_layer import Layer
from tensorflow.python.keras._impl.keras.engine.network import Network
from tensorflow.python.keras._impl.keras.utils.generic_utils import slice_arrays
from tensorflow.python.layers.base import _DeferredTensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
if context.executing_eagerly() and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError('Only TF native optimizers are supported in Eager mode.')
self.optimizer = optimizers.get(optimizer)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
if context.executing_eagerly() and sample_weight_mode is not None:
raise ValueError('sample_weight_mode is not supported in Eager mode.')
self.sample_weight_mode = sample_weight_mode
if context.executing_eagerly() and weighted_metrics is not None:
raise ValueError('weighted_metrics is not supported in Eager mode.')
self.weighted_metrics = weighted_metrics
if context.executing_eagerly() and target_tensors is not None:
raise ValueError('target_tensors is not supported in Eager mode.')
self.target_tensors = target_tensors
if not self.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name + '" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name + '" during training.')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [training_utils.weighted_masked_objective(fn)
for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
if not context.executing_eagerly():
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
# initialization for Eager mode execution
if context.executing_eagerly():
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
self.metrics_tensors = []
self.metrics_names = ['loss']
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self.metrics_names.append(self.output_names[i] + '_loss')
self.nested_metrics = training_utils.collect_metrics(metrics,
self.output_names)
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
self._feed_sample_weight_modes.append(None)
self.sample_weights = []
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' + str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError(
'Unknown entry in '
'sample_weight_mode dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError(
'Output "' + name + '" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + 'sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' + str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(array_ops.placeholder_with_default(
[[1.]], shape=[None, None], name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(array_ops.placeholder_with_default(
[1.], shape=[None], name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare metrics.
self.weighted_metrics = weighted_metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = training_utils.collect_metrics(metrics, self.output_names)
nested_weighted_metrics = training_utils.collect_metrics(weighted_metrics,
self.output_names)
self.metrics_updates = []
self.stateful_metric_names = []
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weights = sample_weights[i]
output_metrics = nested_metrics[i]
output_weighted_metrics = nested_weighted_metrics[i]
def handle_metrics(metrics, weights=None):
metric_name_prefix = 'weighted_' if weights is not None else ''
for metric in metrics:
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
# custom handling of accuracy/crossentropy
# (because of class mode duality)
output_shape = self.outputs[i].get_shape().as_list()
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy/crossentropy
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.binary_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.binary_crossentropy
elif self.loss_functions[
i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy/crossentropy with sparse targets
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.sparse_categorical_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.sparse_categorical_crossentropy
else:
# case: categorical accuracy/crossentropy
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.categorical_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.categorical_crossentropy
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
metric_name = metric_name_prefix + suffix
else:
metric_fn = metrics_module.get(metric)
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
# Get metric name as string
if hasattr(metric_fn, 'name'):
metric_name = metric_fn.name
else:
metric_name = metric_fn.__name__
metric_name = metric_name_prefix + metric_name
with K.name_scope(metric_name):
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=masks[i])
# Append to self.metrics_names, self.metric_tensors,
# self.stateful_metric_names
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[i], metric_name)
# Dedupe name
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_result)
# Keep track of state updates created by
# stateful metrics (i.e. metrics layers).
if isinstance(metric_fn, Layer):
self.stateful_metric_names.append(metric_name)
self.metrics_updates += metric_fn.updates
handle_metrics(output_metrics)
handle_metrics(output_weighted_metrics, weights=weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(self.sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.warning(
UserWarning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?'))
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self._feed_inputs)
# Stateful metrics updates
updates += self.metrics_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: An array or list of arrays, to be used as input data. If the model
has known, named inputs, this could also be a dict mapping input names
to the corresponding array.
y: An array or list of arrays, to be used as target data. If the model
has known, named outputs, this could also be a dict mapping output names
to the corresponding array.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
Returns:
A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
# First, we build/compile the model on the fly if necessary.
all_inputs = []
if not self.built:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
raise ValueError('Please do not pass a dictionary as model inputs.')
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
self._set_inputs(x)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if y is not None:
if isinstance(y, (list, tuple)):
all_inputs += list(y)
else:
all_inputs.append(y)
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# If `x` and `y` were all symbolic, then no model should not be fed any
# inputs and targets.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if any(tensor_util.is_tensor(v) for v in all_inputs):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if context.executing_eagerly():
# In eager mode, do not do shape validation.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if context.executing_eagerly():
feed_output_names = self.output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
elif not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not context.executing_eagerly():
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _set_inputs(self, inputs, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
"""
if self.__class__.__name__ == 'Sequential':
# Note: we can't test whether the model is `Sequential` via `isinstance`
# since `Sequential` depends on `Model`.
if isinstance(inputs, list):
assert len(inputs) == 1
inputs = inputs[0]
self.build(input_shape=(None,) + inputs.shape[1:])
elif context.executing_eagerly():
self._eager_set_inputs(inputs)
else:
self._symbolic_set_inputs(inputs, training=training)
def _set_scope(self, scope=None):
"""Modify the Layer scope creation logic to create ResourceVariables."""
super(Model, self)._set_scope(scope=scope)
# Subclassed Models create ResourceVariables by default. This makes it
# easier to use Models in an eager/graph agnostic way (since eager execution
# always uses ResourceVariables).
if not self._is_graph_network:
self._scope.set_use_resource(True)
def _eager_set_inputs(self, inputs):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
We assume the number and ndim of outputs
does not change over different calls.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
Raises:
ValueError: If the model's inputs are already set.
"""
assert context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of model inputs/outputs as DeferredTensors,
# to keep track of number of inputs and outputs and their ndim.
if isinstance(inputs, (list, tuple)):
dummy_output_values = self.call(
[ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs])
dummy_input_values = list(inputs)
else:
dummy_output_values = self.call(
ops.convert_to_tensor(inputs, dtype=K.floatx()))
dummy_input_values = [inputs]
if isinstance(dummy_output_values, (list, tuple)):
dummy_output_values = list(dummy_output_values)
else:
dummy_output_values = [dummy_output_values]
self.outputs = [
_DeferredTensor(shape=(None for _ in v.shape),
dtype=v.dtype) for v in dummy_output_values]
self.inputs = [
_DeferredTensor(shape=(None for _ in v.shape),
dtype=v.dtype) for v in dummy_input_values]
self.input_names = [
'input_%d' % (i + 1) for i in range(len(dummy_input_values))]
self.output_names = [
'output_%d' % (i + 1) for i in range(len(dummy_output_values))]
self.built = True
def _symbolic_set_inputs(self, inputs, outputs=None, training=None):
"""Set model's inputs and output specs based.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
outputs: None, a data tensor, or a list of data tensors. If None, the
outputs will be determined by invoking self.call(), otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If the model's inputs are already set.
"""
assert not context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
self.inputs = []
self.input_names = []
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
if isinstance(inputs, (list, tuple)):
inputs = list(inputs)
else:
inputs = [inputs]
for i, v in enumerate(inputs):
name = 'input_%d' % (i + 1)
self.input_names.append(name)
if isinstance(v, list):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + v.shape[1:]
placeholder = K.placeholder(shape=shape, name=name)
self.inputs.append(placeholder)
self._feed_inputs.append(placeholder)
self._feed_input_names.append(name)
self._feed_input_shapes.append(shape)
else:
# Assumed tensor - TODO(fchollet) additional type check?
self.inputs.append(v)
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(name)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
if len(self.inputs) == 1:
if self._expects_training_arg:
outputs = self.call(self.inputs[0], training=training)
else:
outputs = self.call(self.inputs[0])
else:
if self._expects_training_arg:
outputs = self.call(self.inputs, training=training)
else:
outputs = self.call(self.inputs)
if isinstance(outputs, (list, tuple)):
outputs = list(outputs)
else:
outputs = [outputs]
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Numpy array of training data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
TensorFlow data tensors.
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
TensorFlow data tensors.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling.
validation_data: tuple `(x_val, y_val)` or tuple
`(x_val, y_val, val_sample_weights)` on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
if x is None and y is None and steps_per_epoch is None:
raise ValueError('If fitting from data tensors, '
'you should specify the `steps_per_epoch` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size)
# Prepare validation data.
if validation_data:
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' % len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size)
elif validation_split and 0. < validation_split < 1.:
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if context.executing_eagerly():
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self, x, y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Numpy array of test data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
TensorFlow data tensors.
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
TensorFlow data tensors.
batch_size: Integer or `None`.
Number of samples per evaluation step.
If unspecified, `batch_size` will default to 32.
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and y is None and steps is None:
raise ValueError('If evaluating from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size)
if context.executing_eagerly():
return training_eager.test_loop(
self, inputs=x, targets=y, sample_weights=sample_weights,
batch_size=batch_size, verbose=verbose, steps=steps)
else:
return training_arrays.test_loop(
self, inputs=x, targets=y, sample_weights=sample_weights,
batch_size=batch_size, verbose=verbose, steps=steps)
def predict(self, x, batch_size=None, verbose=0, steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: The input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and steps is None:
raise ValueError('If predicting from data tensors, '
'you should specify the `steps` '
'argument.')
x, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight)
if context.executing_eagerly():
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if context.executing_eagerly():
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input samples, as a Numpy array.
Returns:
Numpy array(s) of predictions.
"""
x, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
inputs = [ops.convert_to_tensor(val, dtype=K.floatx()) for val in x]
return self(inputs) # pylint: disable=not-callable
if not context.executing_eagerly():
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single batch.
Therefore, all arrays in this tuple must have the same length (equal
to the size of this batch). Different batches may have different
sizes.
For example, the last batch of the epoch is commonly smaller than
the
others, if the size of the dataset is not divisible by the batch
size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`fit_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`evaluate_generator` is not yet enabled for '
'unbuilt Model subclasses')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`predict_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
| apache-2.0 | -4,597,388,592,989,543,400 | 41.344367 | 99 | 0.614041 | false | 4.3372 | true | false | false |
annahs/atmos_research | WHI_long_term_2min_data_to_db.py | 1 | 8596 | import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import dates
start = datetime(2009,7,15,4) #2009 - 20090628 2010 - 20100610 2012 - 20100405
end = datetime(2009,8,17) #2009 - 20090816 2010 - 20100726 2012 - 20100601
timestep = 6.#1./30 #hours
sample_min = 117 #117 for all 2009-2012
sample_max = 123 #123 for all 2009-2012
yag_min = 3.8 #3.8 for all 2009-2012
yag_max = 6 #6 for all 2009-2012
BC_VED_min = 70
BC_VED_max = 220
min_scat_pkht = 20
mass_min = ((BC_VED_min/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
mass_max = ((BC_VED_max/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
lag_threshold_2009 = 0.1
lag_threshold_2010 = 0.25
lag_threshold_2012 = 1.5
print 'mass limits', mass_min, mass_max
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def check_spike_times(particle_start_time,particle_end_time):
cursor.execute('''SELECT count(*)
FROM whi_spike_times_2009to2012
WHERE (spike_start_UTC <= %s AND spike_end_UTC > %s)
OR (spike_start_UTC <= %s AND spike_end_UTC > %s)
''',
(particle_start_time,particle_start_time,particle_end_time,particle_end_time))
spike_count = cursor.fetchall()[0][0]
return spike_count
def get_hysplit_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_hysplit_hourly_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
hy_id_list = cursor.fetchall()
if hy_id_list == []:
hy_id = None
else:
hy_id = hy_id_list[0][0]
return hy_id
def get_met_info(particle_start_time):
cursor.execute('''SELECT id,pressure_Pa,room_temp_C
FROM whi_sampling_conditions
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
met_list = cursor.fetchall()
if met_list == []:
met_list = [[np.nan,np.nan,np.nan]]
return met_list[0]
def get_gc_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_gc_hourly_bc_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
gc_id_list = cursor.fetchall()
if gc_id_list == []:
gc_id = None
else:
gc_id = gc_id_list[0][0]
return gc_id
def get_sample_factor(UNIX_start):
date_time = datetime.utcfromtimestamp(UNIX_start)
sample_factors_2012 = [
[datetime(2012,4,4,19,43,4), datetime(2012,4,5,13,47,9), 3.0],
[datetime(2012,4,5,13,47,9), datetime(2012,4,10,3,3,25), 1.0],
[datetime(2012,4,10,3,3,25), datetime(2012,5,16,6,9,13), 3.0],
[datetime(2012,5,16,6,9,13), datetime(2012,6,7,18,14,39), 10.0],
]
if date_time.year in [2009,2010]:
sample_factor = 1.0
if date_time.year == 2012:
for date_range in sample_factors_2012:
start_date = date_range[0]
end_date = date_range[1]
range_sample_factor = date_range[2]
if start_date<= date_time < end_date:
sample_factor = range_sample_factor
return sample_factor
def lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos):
long_lags = 0
short_lags = 0
lag_time = np.nan
if (-10 < lag_time < 10):
lag_time = (BB_incand_pk_pos-BB_scat_pk_pos)*0.2 #us
if start_dt.year == 2009 and lag_time > lag_threshold_2009:
long_lags = 1
elif start_dt.year == 2010 and lag_time > lag_threshold_2010:
long_lags = 1
elif start_dt.year == 2012 and lag_time > lag_threshold_2012:
long_lags = 1
else:
short_lags = 1
return [lag_time,long_lags,short_lags]
#query to add 1h mass conc data
add_data = ('''INSERT INTO whi_sp2_2min_data
(UNIX_UTC_start_time,UNIX_UTC_end_time,number_particles,rBC_mass_conc,rBC_mass_conc_err,volume_air_sampled,sampling_duration,mean_lag_time,sample_factor,hysplit_hourly_id,whi_sampling_cond_id,gc_hourly_id)
VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(number_particles)s,%(rBC_mass_conc)s,%(rBC_mass_conc_err)s,%(volume_air_sampled)s,%(sampling_duration)s,%(mean_lag_time)s,%(sample_factor)s,%(hysplit_hourly_id)s,%(whi_sampling_cond_id)s,%(gc_hourly_id)s)'''
)
#
multiple_records = []
i=1
while start <= end:
long_lags = 0
short_lags = 0
if (4 <= start.hour < 16):
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = UNIX_start + timestep*3600.0
print start, UNIX_start+60
print datetime.utcfromtimestamp(UNIX_end)
#filter on hk data here
cursor.execute('''(SELECT
mn.UNIX_UTC_ts_int_start,
mn.UNIX_UTC_ts_int_end,
mn.rBC_mass_fg_BBHG,
mn.rBC_mass_fg_BBHG_err,
mn.BB_incand_pk_pos,
mn.BB_scat_pk_pos,
mn.BB_scat_pkht,
hk.sample_flow,
mn.BB_incand_HG
FROM whi_sp2_particle_data mn
FORCE INDEX (hourly_binning)
JOIN whi_hk_data hk on mn.HK_id = hk.id
WHERE
mn.UNIX_UTC_ts_int_start >= %s
AND mn.UNIX_UTC_ts_int_end < %s
AND hk.sample_flow >= %s
AND hk.sample_flow < %s
AND hk.yag_power >= %s
AND hk.yag_power < %s)''',
(UNIX_start,UNIX_end,sample_min,sample_max,yag_min,yag_max))
ind_data = cursor.fetchall()
data={
'rBC_mass_fg':[],
'rBC_mass_fg_err':[],
'lag_time':[]
}
total_sample_vol = 0
for row in ind_data:
ind_start_time = float(row[0])
ind_end_time = float(row[1])
bbhg_mass_corr11 = float(row[2])
bbhg_mass_corr_err = float(row[3])
BB_incand_pk_pos = float(row[4])
BB_scat_pk_pos = float(row[5])
BB_scat_pk_ht = float(row[6])
sample_flow = float(row[7]) #in vccm
incand_pkht = float(row[8])
#filter spike times here
if check_spike_times(ind_start_time,ind_end_time):
print 'spike'
continue
#skip the long interval
if (ind_end_time - ind_start_time) > 540:
print 'long interval'
continue
#skip if no sample flow
if sample_flow == None:
print 'no flow'
continue
#get sampling conditions id and met conditions
met_data = get_met_info(UNIX_start)
met_id = met_data[0]
pressure = met_data[1]
temperature = met_data[2]+273.15
correction_factor_for_STP = (273*pressure)/(101325*temperature)
sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60)*correction_factor_for_STP #/60 b/c sccm and time in secs
total_sample_vol = total_sample_vol + sample_vol
bbhg_mass_corr = 0.01244+0.0172*incand_pkht
if (mass_min <= bbhg_mass_corr < mass_max):
#get sample factor
sample_factor = get_sample_factor(UNIX_start)
data['rBC_mass_fg'].append(bbhg_mass_corr*sample_factor)
data['rBC_mass_fg_err'].append(bbhg_mass_corr_err)
#only calc lag time if there is a scattering signal
if BB_scat_pk_ht > min_scat_pkht:
lags = lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos)
data['lag_time'].append(lags[0])
long_lags += lags[1]
short_lags += lags[2]
tot_rBC_mass_fg = sum(data['rBC_mass_fg'])
tot_rBC_mass_uncer = sum(data['rBC_mass_fg_err'])
rBC_number = len(data['rBC_mass_fg'])
mean_lag = float(np.mean(data['lag_time']))
if np.isnan(mean_lag):
mean_lag = None
#get hysplit_id
hysplit_id = None #get_hysplit_id(UNIX_start)
#get GC id
gc_id = None #get_gc_id(UNIX_start)
if total_sample_vol != 0:
mass_conc = (tot_rBC_mass_fg/total_sample_vol)
mass_conc_uncer = (tot_rBC_mass_uncer/total_sample_vol)
#add to db
single_record = {
'UNIX_UTC_start_time' :UNIX_start,
'UNIX_UTC_end_time' :UNIX_end,
'number_particles' :rBC_number,
'rBC_mass_conc' :mass_conc,
'rBC_mass_conc_err' :mass_conc_uncer,
'volume_air_sampled' :total_sample_vol,
'sampling_duration' :(total_sample_vol/2),
'mean_lag_time' :mean_lag,
'number_long_lag' :long_lags,
'number_short_lag' :short_lags,
'sample_factor' :sample_factor,
'hysplit_hourly_id' :hysplit_id,
'whi_sampling_cond_id' :met_id,
'gc_hourly_id' :gc_id,
}
multiple_records.append((single_record))
#bulk insert to db table
if i%1 == 0:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
#increment count
i+= 1
start += timedelta(hours = timestep)
#bulk insert of remaining records to db
if multiple_records != []:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
cnx.close()
| mit | 6,663,575,853,630,564,000 | 28.040541 | 268 | 0.640181 | false | 2.56291 | false | false | false |
topix-hackademy/social-listener | application/twitter/tweets/collector.py | 1 | 3236 | from application.mongo import Connection
from application.twitter.interface import TwitterInterface
from application.twitter.tweets.fetcher import TweetsFetcher
from application.processmanager import ProcessManager
from application.utils.helpers import what_time_is_it
import logging
class TweetCollector(TwitterInterface):
def __init__(self, user, *args, **kwargs):
"""
Twitter Collector. This class is used for retrieve tweets from a specific user
"""
super(TweetCollector, self).__init__(*args, **kwargs)
self.user = user
self.process_name = "Tweets Collector: <%s>" % user
self.fetcherInstance = TweetsFetcher(self.auth, self.user, self.process_name)
def __str__(self):
"""
String representation
:return:
"""
return "Tweet Collector for user <{user}>".format(user=self.user)
def start(self, process_manager):
"""
Start async job for user's tweets
:param process_manager: Process manager instance
:return:
"""
try:
process_manager.create_process(target=self.fetcher,
name=self.process_name,
ptype='twitter_collector')
except Exception:
raise Exception('Error Creating new Process')
def fetcher(self):
"""
Tweets loader
:return:
"""
for page in self.fetcherInstance.get_tweets():
for tweet in page:
try:
if not Connection.Instance().db.twitter.find_one({'user': tweet.user.screen_name,
'source': 'collector',
'data.id': tweet.id}):
Connection.Instance().db.twitter.insert_one({
'source': 'collector',
'data': {
'created_at': tweet.created_at,
'favorite_count': tweet.favorite_count,
'geo': tweet.geo,
'id': tweet.id,
'source': tweet.source,
'in_reply_to_screen_name': tweet.in_reply_to_screen_name,
'in_reply_to_status_id': tweet.in_reply_to_status_id,
'in_reply_to_user_id': tweet.in_reply_to_user_id,
'retweet_count': tweet.retweet_count,
'retweeted': tweet.retweeted,
'text': tweet.text,
'entities': tweet.entities
},
'user': tweet.user.screen_name,
'created': what_time_is_it()
})
except Exception as genericException:
logging.error("MongoDB Insert Error in collector: %s" % genericException)
import multiprocessing
ProcessManager.terminate_process(multiprocessing.current_process().pid, True)
| mit | -6,503,210,330,713,277,000 | 43.328767 | 101 | 0.491656 | false | 5.048362 | false | false | false |
baris/pushmanager | testing/testdb.py | 1 | 4248 | #!/usr/bin/python
from datetime import datetime, timedelta
import os
import sqlite3
import tempfile
import time
from core import db
def create_temp_db_file():
fd, db_file_path = tempfile.mkstemp(suffix="pushmanager.db")
os.close(fd)
return db_file_path
def get_temp_db_uri(dbfile=None):
if not dbfile:
dbfile = create_temp_db_file()
return "sqlite:///" + dbfile
def make_test_db(dbfile=None):
if not dbfile:
dbfile = create_temp_db_file()
testsql = open(
os.path.join(
os.path.dirname(__file__),
"testdb.sql"
)
).read()
test_db = sqlite3.connect(dbfile)
test_db.cursor().executescript(testsql)
test_db.commit()
test_db.close()
return dbfile
class FakeDataMixin(object):
now = time.time()
yesterday = time.mktime((datetime.now() - timedelta(days=1)).timetuple())
push_data = [
[10, 'OnePush', 'bmetin', 'deploy-1', 'abc', 'live', yesterday, now, 'regular', ''],
[11, 'TwoPush', 'troscoe', 'deploy-2', 'def', 'accepting', now, now, 'regular', ''],
[12, 'RedPush', 'heyjoe', 'deploy-3', 'ghi', 'accepting', now, now, 'regular', ''],
[13, 'BluePush', 'humpty', 'deploy-4', 'jkl', 'accepting', now, now, 'regular', ''],
]
push_keys = [
'id', 'title', 'user', 'branch', 'revision', 'state',
'created', 'modified', 'pushtype', 'extra_pings'
]
fake_revision = "0"*40
request_data = [
[10, 'keysersoze', 'requested', 'keysersoze', 'usual_fix', '', now, now, 'Fix stuff', 'no comment', 12345, '', fake_revision],
[11, 'bmetin', 'requested', 'bmetin', 'fix1', '', now, now, 'Fixing more stuff', 'yes comment', 234, '', fake_revision],
[12, 'testuser1', 'requested', 'testuser2', 'fix1', 'search', now, now, 'Fixing1', 'no comment', 123, '', fake_revision],
[13, 'testuser2', 'requested', 'testuser2', 'fix2', 'search', now, now, 'Fixing2', 'yes comment', 456, '', fake_revision],
]
request_keys = [
'id', 'user', 'state', 'repo', 'branch', 'tags', 'created', 'modified',
'title', 'comments', 'reviewid', 'description', 'revision'
]
def on_db_return(self, success, db_results):
assert success
def make_push_dict(self, data):
return dict(zip(self.push_keys, data))
def make_request_dict(self, data):
return dict(zip(self.request_keys, data))
def insert_pushes(self):
push_queries = []
for pd in self.push_data:
push_queries.append(db.push_pushes.insert(self.make_push_dict(pd)))
db.execute_transaction_cb(push_queries, self.on_db_return)
def insert_requests(self):
request_queries = []
for rd in self.request_data:
request_queries.append(db.push_requests.insert(self.make_request_dict(rd)))
db.execute_transaction_cb(request_queries, self.on_db_return)
def insert_pushcontent(self, requestid, pushid):
db.execute_cb(
db.push_pushcontents.insert({'request': requestid, 'push': pushid}),
self.on_db_return
)
def get_push_for_request(self, requestid):
pushid = [None]
def on_select_return(success, db_results):
assert success
_, pushid[0] = db_results.fetchone()
# check if we have a push in with request
first_pushcontent_query = db.push_pushcontents.select(
db.push_pushcontents.c.request == requestid
)
db.execute_cb(first_pushcontent_query, on_select_return)
return pushid[0]
def get_pushes(self):
pushes = [None]
def on_select_return(success, db_results):
assert success
pushes[0] = db_results.fetchall()
db.execute_cb(db.push_pushes.select(), on_select_return)
return pushes[0]
def get_requests(self):
requests = [None]
def on_select_return(success, db_results):
assert success
requests[0] = db_results.fetchall()
db.execute_cb(db.push_requests.select(), on_select_return)
return requests[0]
def get_requests_by_user(self, user):
return [req for req in self.get_requests() if req['user'] == user]
| apache-2.0 | -2,265,830,634,000,755,700 | 32.714286 | 134 | 0.591102 | false | 3.360759 | true | false | false |
Subsets and Splits