repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lesina/Hack70 | src/core/models.py | 1 | 1253 | # coding: utf-8
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import AbstractUser, AbstractBaseUser
from courses.models import Course
class User(AbstractUser):
avatar = models.ImageField(u'фото профиля',upload_to='avatars', blank=False, default=u'avatars/default-avatar.jpg')
first_name = models.CharField(u'имя', max_length=30, blank=False)
last_name = models.CharField(u'фамилия', max_length=30, blank=False)
email = models.EmailField(u'e-mail', blank=False, unique=True)
permission = models.IntegerField(default=0) #0 - student, 1 - prep and so on
USER_TYPE = ((None, u'Ваша должность'), (u'Студент', u'Студент'), (u'Преподаватель', u'Преподаватель'))
user_type = models.CharField(u'должность', choices=USER_TYPE, max_length=25, blank=False)
courses = models.ManyToManyField(Course, related_name='members')
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def get_absolute_url(self):
from django.urls import reverse
return reverse('core:user', kwargs={'slug': self.username})
| gpl-3.0 | -6,417,249,370,478,180,000 | 37.2 | 119 | 0.709424 | false |
certik/sfepy | sfepy/fem/equations.py | 1 | 10524 | from sfepy.base.base import *
from parseEq import create_bnf
from materials import Materials
from sfepy.terms import Terms, Term, term_table, DataCaches, cache_table
"""
Note:
- create found materials, variables from configuration/input file data
... no - user should be able to create objects even if they are not
used in equations
"""
def parse_terms( regions, desc, itps ):
"""
Parse equation given by 'desc' into terms. Assign each term its region.
"""
# Parse.
term_descs = []
bnf = create_bnf( term_descs, itps )
try:
bnf.parseString( desc )
except:
print 'cannot parse:\n', desc
raise
# Construct terms.
terms = OneTypeList( Term )
for td in term_descs:
## print td
## pause()
try:
constructor = term_table[td.name]
except:
msg = "term '%s' is not in %s" % (td.name,
sorted( term_table.keys() ))
raise ValueError( msg )
region = regions[td.region]
arg_names = []
arg_steps = {}
arg_derivatives = {}
for arg in td.args:
if len( arg ) == 2:
name, step = arg
derivative = None
else:
name, step = arg[1]
derivative = arg[2]
arg_names.append( name )
arg_steps[name] = step
arg_derivatives[name] = derivative
term = constructor( region, td.name, td.sign )
term.arg_names = arg_names
term.arg_steps = arg_steps
term.arg_derivatives = arg_derivatives
term.integral_name = td.integral
terms.append( term )
return terms
def setup_term_args( terms, variables, materials, user = None ):
"""terms ... can be both Terms or Term class
- checks term argument existence in variables, materials, user
- checks equality of field and term subdomain lists (igs)"""
terms.classify_args( variables )
for term in terms:
igs = term.char_fun.igs
vns = term.get_variable_names()
for name in vns:
if name not in variables.names:
output( 'variable "%s" not found' % name )
raise IndexError
field = variables[name].field
if not set( igs ).issubset( set( field.aps.igs ) ):
output( ('%s: incompatible regions: (term, field)'
+ ' (%s(%s) in %s(%s)') %\
(term.name, igs, name, field.igs(), field.name) )
raise ValueError
mns = term.get_material_names()
for name in mns:
if name not in materials.names:
output( 'material "%s" not found' % name )
raise IndexError
mat = materials[name]
if not set( igs ).issubset( set( mat.igs ) ):
output( ('%s: incompatible regions: (term, material)'
+ ' (%s(%s) in %s(%s)') %\
(term.name, igs, name, mat.igs, mat.name) )
raise ValueError
if user is None:
return
uns = terms.get_user_names()
uks = user.keys()
for name in uns:
if name not in uks:
output( 'user data "%s" not found' % name )
raise IndexError
# print '********* ok'
##
# 24.07.2006, c
def build_args( term, variables, materials, **kwargs ):
args = kwargs
vns = term.get_variable_names()
for vn in vns:
args[vn] = variables[vn]
mns = term.get_material_names()
for mn in mns:
args[mn] = materials[mn]
return args
##
# 21.07.2006, c
class Equations( Container ):
##
# c: 18.04.2006, r: 20.02.2008
def from_conf( conf ):
objs = OneTypeList( Equation )
conf = copy( conf )
tps = conf.pop( 'namespaces', {} )
itps = invert_dict( tps, True )
ii = 0
for name, desc in conf.iteritems():
output( 'equation "%s":' % name )
output( desc )
eq = Equation( name = name,
desc = desc,
itps = itps )
objs.append( eq )
ii += 1
obj = Equations( objs, itps = itps )
return obj
from_conf = staticmethod( from_conf )
def setup_terms( self, regions, variables, materials, caches = None,
user = None ):
"""Parse equations and create term instances.
Grabs references to materials and variables."""
if caches is None:
self.caches = DataCaches()
else:
self.caches = caches
self.materials = materials
self.variables = variables
for eq in self:
eq.setup_terms( regions, variables, materials, self.caches, user )
##
# c: ??, r: 26.02.2008
def describe_geometry( self, geometries, variables, integrals ):
output( 'describing geometries...' )
tt = time.clock()
for eq in self:
eq.describe_geometry( geometries, variables, integrals )
output( '...done in %.2f s' % (time.clock() - tt) )
##
# 24.08.2006, c
# 24.04.2007
def get_term_geometries( self ):
tgs = set()
for eq in self:
tgs.update( eq.get_term_geometries() )
return tgs
##
# 16.11.2007, c
def get_term_integral_names( self ):
i_names = set()
for eq in self:
i_names.update( eq.get_term_integral_names() )
return i_names
def get_variable_names( self ):
"""Return the list of names of all variables used in equations."""
vns = set()
for eq in self:
for term in eq.terms:
vns.update( term.get_variable_names() )
return list( vns )
##
# 27.02.2007, c
def invalidate_term_caches( self ):
for cache in self.caches.itervalues():
cache.clear()
##
# c: 07.05.2008, r: 07.05.2008
def reset_term_caches( self ):
for cache in self.caches.itervalues():
cache.reset()
##
# 02.03.2007, c
def set_cache_mode( self, cache_override ):
for cache in self.caches.itervalues():
cache.set_mode( cache_override )
def time_update( self, ts ):
for eq in self:
for term in eq.terms:
term.time_update( ts )
##
# c: 02.04.2008, r: 02.04.2008
def init_time( self, ts ):
for cache in self.caches.itervalues():
cache.init_time( ts )
##
# 08.06.2007, c
def advance( self, ts ):
for cache in self.caches.itervalues():
cache.advance( ts.step + 1 )
##
# 21.07.2006, c
class Equation( Struct ):
##
# 25.07.2006, c
# 28.08.2006
# 12.02.2007
def from_desc( name, desc, term_prefixes = None ):
if term_prefixes is None: term_prefixes = {}
obj = Equation( name = name, desc = desc,
itps = invert_dict( term_prefixes, True ) )
return obj
from_desc = staticmethod( from_desc )
##
# 21.07.2006, c
# 25.07.2006
# 01.08.2006
# 11.08.2006
# 12.02.2007
# 27.02.2007
def parse_terms( self, regions ):
terms = parse_terms( regions, self.desc, self.itps )
self.terms = Terms( terms )
##
# 21.07.2006, c
# 24.07.2006
# 22.08.2006
# 25.08.2006
# 27.11.2006
# 20.02.2007
def setup_term_args( self, variables, materials, user = None ):
"""- checks term argument existence in variables, materials, user
- checks compatability of field and term subdomain lists (igs)"""
setup_term_args( self.terms, variables, materials, user )
##
# 29.11.2006, c
# 27.02.2007
# 08.06.2007
# 11.06.2007
def assign_term_caches( self, caches ):
"""
History sizes for a particular cache instance are taken as maximum
of history_sizes requirements of all terms using the instance.
"""
for term in self.terms:
if not hasattr( term, 'use_caches' ): continue
## print term.name
for name, arg_lists in term.use_caches.iteritems():
## print term.arg_names
## print name, arg_lists
for args in arg_lists:
# Order should be handled in terms...
args = copy( args )
if type( args[-1] ) == dict:
history_sizes = args.pop()
else:
history_sizes = None
ans = [term.get_arg_name( arg, full = True ) for arg in args]
cname = '_'.join( [name] + ans )
## print term.name, name, arg_lists, args, self.name, cname
## print history_sizes
## debug()
if caches.has_key( cname ):
caches[cname].merge_history_sizes( history_sizes )
else:
## print 'new'
try:
constructor = cache_table[name]
except:
raise RuntimeError, 'cache not found! %s in %s'\
% (name, sorted( cache_table.keys() ))
cache = constructor( cname, ans, history_sizes )
caches.insert_cache( cache )
caches.insert_term( cname, term.name, ans )
term.caches = caches
def setup_terms( self, regions, variables, materials, caches,
user = None ):
"""Parse equation and create term instances."""
self.parse_terms( regions )
self.setup_term_args( variables, materials, user )
self.assign_term_caches( caches )
##
#
def describe_geometry( self, geometries, variables, integrals ):
for term in self.terms:
term.describe_geometry( geometries, variables, integrals )
##
# 24.04.2007, c
def get_term_geometries( self ):
tgs = set()
for term in self.terms:
for tg in term.get_geometry():
tgs.add( tg )
return tgs
##
# 16.11.2007, c
def get_term_integral_names( self ):
i_names = set()
for term in self.terms:
i_names.add( term.integral_name )
return i_names
| bsd-3-clause | 3,473,490,920,846,410,000 | 29.682216 | 81 | 0.515013 | false |
averagehuman/django-instance | instance/cli/controller.py | 1 | 2384 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import shutil
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
import logbook
from django.utils.text import slugify
from django.core.management import call_command
from instance.conf import settings, SITE_STORAGE_ROOT_VARIABLE
from instance.utils import pathjoin, pathexists, text_type, mkdir, dirname
log = logbook.Logger(__name__)
class UIError(Exception):
pass
class PathExists(UIError):
def __str__(self):
return "path exists '%s'" % self.args[0]
class PathDoesNotExist(UIError):
def __str__(self):
return "invalid path '%s'" % self.args[0]
class Writer(object):
def __init__(self, stream=None, encoding='utf-8'):
self.stream = stream or BytesIO()
def write(self, text=''):
self.stream.write(text.encode(self.encoding))
self.stream.write('\n')
class SiteController(object):
def __init__(self, site=None):
from instance.models import DjangoSite
self._site = site
self._cls = DjangoSite
def exists(self, uid):
return self._cls.objects.filter(uid__iexact=uid).count() > 0
def init_site(self, uid, name, fqdn, title=None):
cls = self._cls
try:
site = cls.objects.get(uid__iexact=uid)
except cls.DoesNotExist:
site = cls.objects.create(
uid=uid, name=name, title=title or name,
fqdn=fqdn,
)
log.info("created site '%s'" % site)
settings.init_site(site)
self.set_default(site.uid)
def set_default(self, uid):
self._cls.objects.set_default(uid)
def remove_site(self, uid):
try:
self._cls.objects.get(uid__iexact=uid).delete()
except self._cls.DoesNotExist:
raise UIError("invalid site uid: '%s'" % uid)
@property
def site(self):
if self._site is None:
self._site = settings.get_current_site()
return self._site
def itersites(self):
for obj in self._cls.objects.all():
default = ' *' if obj.is_default else ''
yield obj.pk, obj.uid, obj.name, obj.fqdn, default
def call_django_command(self, cmd, *args, **kwargs):
return call_command(cmd, *args, **kwargs)
| bsd-3-clause | -8,852,039,985,815,156,000 | 25.488889 | 74 | 0.60948 | false |
xarg/rview | encoder/tasks.py | 1 | 3778 | import os
from os import listdir
from os.path import join
import time
import traceback
import logging
from invoke import run, task
import dateutil.parser
import cv2
logging.basicConfig()
logger = logging.getLogger("tasks")
MEDIA_PATH = join(os.path.dirname(__file__), "static")
ENCONDE_CMD = "mencoder -nosound -ovc lavc -lavcopts vcodec=mpeg4:aspect=16/9:vbitrate=8000000 -vf scale=1920:1080 %(options)s -o %(out)s.avi -mf type=jpeg:fps=%(fps)d mf://@frames.txt"
def sort_by_date(filename):
datestr, ext = os.path.splitext(os.path.basename(filename))
return dateutil.parser.parse(datestr)
def detect(path):
img = cv2.imread(path)
cascade = cv2.CascadeClassifier("./haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.1, 6, cv2.cv.CV_HAAR_SCALE_IMAGE, (20, 20))
if len(rects) == 0:
return [], img
rects[:, 2:] += rects[:, :2]
return rects, img
def blur(rects, img, dst):
result_image = img.copy()
for x1, y1, x2, y2 in rects:
# Get the origin co-ordinates and the length and width till where the face extends
# get the rectangle img around all the faces
#cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
sub_face = img[y1:y2, x1:x2]
# apply a gaussian blur on this new recangle image
sub_face = cv2.medianBlur(sub_face, 53)
# merge this blurry rectangle to our final image
result_image[y1:y1+sub_face.shape[0], x1:x1+sub_face.shape[1]] = sub_face
cv2.imwrite(dst, result_image);
def modify(src, dst):
""" Given a source and a destination image path try to blur using opencv"""
run("cp %s %s" % (src, dst))
#rects, img = detect(src)
#blur(rects, img, dst)
@task
def encode():
""" Create a timelapse """
stream_path = join(MEDIA_PATH, "streams")
if not os.path.exists(stream_path):
os.makedirs(stream_path)
# where the modified pictures are
modified_path = join(MEDIA_PATH, "_modified")
if not os.path.exists(modified_path):
os.makedirs(modified_path)
while True:
try:
for stream in listdir(MEDIA_PATH):
# create stream path if it doesn't exist yet
if stream != "streams" and stream != '_modified':
modified_stream = join(modified_path, stream)
if not os.path.exists(modified_stream):
os.makedirs(modified_stream)
modified_images = listdir(modified_stream)
image_dir = join(MEDIA_PATH, stream)
for image in listdir(image_dir):
original_image_path = join(image_dir, image)
modified_image_path = join(modified_stream, image)
if image not in modified_images:
# modify image with copy
modify(original_image_path, modified_image_path)
photos = listdir(modified_stream)
photos = [join(modified_stream, p) for p in photos]
photos = sorted(photos, key=sort_by_date)
with open("frames.txt", "w+") as fd:
fd.write("\n".join(photos))
output_path = join(stream_path, stream)
options = ""
if stream == 'iphone4s':
options = "-flip"
encode_cmd = ENCONDE_CMD % {
'fps': 24,
'out': output_path,
'options': options
}
run(encode_cmd)
except Exception as e:
traceback.print_exc()
finally:
time.sleep(90)
| mit | -881,124,830,737,236,500 | 34.980952 | 185 | 0.560614 | false |
giannitedesco/kapital | monop/simplestrategy.py | 1 | 3912 | from strategy import Strategy
import gobject
class SimpleStrategy(Strategy):
def __init__(self):
super(SimpleStrategy, self).__init__()
def do_raise_cash(self, target, hand):
raised = 0
(monopolies, crap) = self.split_hand(hand)
# first try mortgage properties that are not
# part of monopolies
for e in crap:
if raised >= target or e.mortgaged or e.houses > 0:
continue
self.mortgage(e.estateid)
raised += e.mortgageprice
if raised >= target:
return raised
# now try mortgage undeveloped monopolies
monoplist = sum(monopolies, [])
for e in monoplist:
if raised >= target or e.mortgaged or e.houses > 0:
continue
self.unmortgage(e.estateid)
raised += e.mortgageprice
if raised >= target:
return raised
# now to sell houses, sell entire rows at once
# just to keep it simple
for g in monopolies:
if True in map(lambda x:x.mortgaged,g):
continue
if raised >= target:
break
for e in g:
if e.houses <= 0:
continue
self.sell_house(e.estateid)
# FIXME
e.houses -= 1
raised += e.sellhouseprice
# shouldn't really be possible, we're bust
return raised
def raise_cash(self, p, target):
hand = self.hand(p)
for e in hand:
self.msg('I own: [%d]: %s\n'%(e.estateid, e.name),
[e.mortgaged and 'red' or 'dark green'])
self.msg('must raise %d bucks!\n'%target)
raised = self.do_raise_cash(target, hand)
if raised < target:
self.msg('only raised %d bucks\n'%raised,
['bold','red'])
return False
self.msg('raised %d bucks\n'%raised, ['bold','dark green'])
return True
def handle_debt(self, p):
self.msg('handle debts\n')
e = self.s.estates[p.location]
due = self.due(p, e)
if due <= 0:
self.msg('not sure what to do\n')
due = 100
self.raise_cash(p, due)
def handle_purchase(self, p):
e = self.s.estates[p.location]
self.msg('price is %d, i gots %d\n'%(e.price, p.money))
if e.price > p.money:
can_afford = self.raise_cash(p, e.price - p.money)
else:
can_afford = True
if can_afford:
self.msg('BUYING IT, THEY HATIN\n', ['dark green'])
return True
else:
self.msg('CANNOT AFFORD, AUCTION\n', ['red'])
return False
def remain_in_jail(self, i):
# decide whether to pay, use card, or what
if i.money < 50:
self.raise_cash(i, 50 - i.money)
self.msg('BUYING OUT OF JAIL\n', ['red'])
return False
def pay_asset_tax(self, p):
self.msg('got %d bucks\n'%p.money)
e = self.s.estates[p.location]
pc = e.taxpercentage and e.taxpercentage or 10
fixed = e.tax and e.tax or 200
money = p.money
for e in self.hand(p):
self.msg('I own: %s (%d + %d)\n'%(e.name,
e.mortgageprice, e.houses * e.sellhouseprice),
[e.mortgaged and 'red' or 'dark green'])
if not e.mortgaged:
money += e.mortgageprice
money += e.houses * e.sellhouseprice
money = float(pc) * float(money) / 100.0
self.msg('fixed price is %d, assets is %d\n'%(fixed, money))
if money < fixed:
self.msg('PAYING PERCENTAGE\n', ['dark green'])
return True
else:
self.msg('PAYING FIXED\n', ['red'])
return False
def manage_estates(self, p):
money = p.money
hand = self.hand(p)
# unmortgage properties
reserve = 200
for e in hand:
if not e.mortgaged:
continue
if money < e.unmortgageprice + reserve:
continue
self.unmortgage(e.estateid)
money -= e.unmortgageprice
# buy houses
(monopolies, misc) = self.split_hand(hand)
for m in monopolies:
tc = sum(map(lambda x:x.houseprice, m))
if money < reserve + tc:
continue
if m[0].houses < 5:
self.msg('monopoly: buying a level on %s for %d\n'%\
(self.s.groups[m[0].group].name, tc),
['bold', 'dark blue'])
for e in m:
if e.houses >= 5:
continue
self.msg(' - %r\n'%e, ['bold', 'dark blue'])
self.buy_house(e.estateid)
e.houses += 1
money -= tc
gobject.type_register(SimpleStrategy)
| gpl-3.0 | -6,952,979,743,303,447,000 | 23.759494 | 62 | 0.634458 | false |
vlahm/simplefc | simplefc/commands/delete_set.py | 1 | 1360 | """delete a flashcard set"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import input
from builtins import str
from future import standard_library
standard_library.install_aliases()
from .base import Base
import sqlite3
import os
class Delete_set(Base):
def run(self):
name = str(self.options.get('<setname>'))
yn = input("Are you sure you want to delete flashcard set '"
+ name + "'?\n(y/n)\n> ")
if yn == 'y':
path = os.path.dirname(os.path.realpath(__file__))
dbpfp = path + '/../dbpath.txt'
if not os.path.exists(dbpfp):
sys.exit("No database file detected. Use "
"'simplefc create_set <setname>' to get "
"started.")
dbpathfile = open(dbpfp, 'r')
dbpath = dbpathfile.read()
dbpathfile.close()
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
cur.execute('drop table ' + name + ';')
conn.commit()
conn.close()
print("deleted set '" + name + "'")
elif yn == 'n':
print("took no action")
else:
print("required input either 'y' or 'n'.")
| gpl-3.0 | -6,615,158,346,313,196,000 | 31.380952 | 68 | 0.544853 | false |
Kvoti/ditto | ditto/comments/api.py | 1 | 1717 | from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django_comments.models import Comment
from rest_framework import serializers, generics, filters
class CommentSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(slug_field='username', read_only=True)
content_type = serializers.SlugRelatedField(
slug_field='model',
queryset=ContentType.objects.all())
class Meta:
model = Comment
fields = (
'id',
'content_type',
'object_pk',
'user',
'comment',
'submit_date',
)
read_only_fields = ('user', 'id', 'submit_date')
# TODO validate object_pk
# def validate():
# pass
class CommentList(generics.ListCreateAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('content_type__model', 'object_pk')
def create(self, *args, **kwargs):
response = super(CommentList, self).create(*args, **kwargs)
all_comments = Comment.objects.for_model(
self.comment.content_object).order_by('-submit_date')
serializer = CommentSerializer(
all_comments, many=True)
response.data = serializer.data
return response
def perform_create(self, serializer):
self.comment = serializer.save(
user=self.request.user,
site=Site.objects.get_current(),
)
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', CommentList.as_view(), name="create_comment"),
)
| bsd-3-clause | -8,549,623,033,277,502,000 | 30.218182 | 78 | 0.635411 | false |
Bergurth/anagram8 | newAnagram2.py | 1 | 6381 | import sys
import re
from substring import *
from itertools import combinations
from collections import defaultdict
if len(sys.argv) < 2: #check for whether input specified or not
print "No jumbled word specified.Please enter a jumbled word."
sys.exit(0)
input_string = sys.argv[1]
uni_input = unicode(input_string,'utf8')
output_file = sys.argv[2]
# dictionary part
try:
inputFile = open("islenska2-20081208.txt","r")
except Exception, e:
print "Exception while reading the dictionary file . Following are the details of the exception :\n\r"+ str(e)
sys.exit(0)
#words = inputFile.read().strip().split("\r\n") #This would be faster but cannot use as it input file format could vary.
fileContents = words = inputFile.read().strip() #Striping whitespaces
wordsOrig = re.split("[\s\n\r]+",fileContents) #Splitting the file into word tokes based on either spaces/new line/carriage return
words1 = []
words = []
for word in wordsOrig:
words1.append( word.decode('iso-8859-1').encode('utf-8'))
for word in words1:
uniWord = word.decode('utf8')
words.append(uniWord)
# words is now a list with unicode objects from the dictionary
# the following functino checks if the input string contains all the letters of whe candidate
def hasAllLetters(candidateWord, inputString):
for char in candidateWord:
if char not in inputString:
return False
return True
#identifies repeat letters
def repeatLetters(word):
repeats = {}
for char in word:
if ord(char) in repeats:
repeats[ord(char)] += 1
else:
repeats[ord(char)] = 1
return repeats
def notToManyRepeats(candidateWord,inputString):
input_repeats = repeatLetters(inputString)
word_repeats = repeatLetters(candidateWord)
for key in word_repeats:
if word_repeats[key] > input_repeats[key]:
return False
return True
sentanceLength = len(uni_input)
# possible sentances part
# main function
letterWords = []
for word in words:
if hasAllLetters(word,uni_input):
letterWords.append(word)
possibleWords = []
wordLengths = {}
lengthWords = defaultdict(list)
i = 0
for word in letterWords:
if notToManyRepeats(word, uni_input):
possibleWords.append(word)
wordLengths[i]=len(word)
lengthWords[len(word)].append(word)
i += 1
#print word
# wordlengths is a dictionary with index the same place the word is and the length of thw word as value
# lengthWords is a dictionary of lists with the key being the length of words and the value a list of
# words of that length
"""
for key in wordLengths:
print wordLengths[key]
"""
combos2 = []
#pair for pair in itertools.combinations(li,2) if sum(pair) == 10
for var in combinations(wordLengths, 2):
if var[0] + var[1] == sentanceLength:
combos2.append(var)
combos3 = []
#pair for pair in itertools.combinations(li,2) if sum(pair) == 10
for var in combinations(wordLengths, 3):
if var[0] + var[1] + var[2] == sentanceLength:
combos3.append(var)
combos4 = []
#pair for pair in itertools.combinations(li,2) if sum(pair) == 10
for var in combinations(wordLengths, 4):
if var[0] + var[1] + var[2] + var[3] == sentanceLength:
combos4.append(var)
"""
combos5 = []
#pair for pair in itertools.combinations(li,2) if sum(pair) == 10
for var in combinations(wordLengths, 5):
if var[0] + var[1] + var[2] + var[3] + var[4] == sentanceLength:
combos5.append(var)
"""
#print lengthWords
#print sentanceLength
print combos2
print "###################################################################"
print uni_input
twoWordCombos = []
for combo in combos2:
#print combo[0]
#print combo[1]
for word1 in lengthWords[combo[0]]:
for word2 in lengthWords[combo[1]]:
longWord = word1 + word2
#print longWord
if notToManyRepeats(longWord, uni_input):
twoWordCombos.append(longWord)
threeWordCombos = []
for combo in combos3:
for word1 in lengthWords[combo[0]]:
for word2 in lengthWords[combo[1]]:
for word3 in lengthWords[combo[2]]:
longWord = word1 + word2 + word3
#print longWord
if notToManyRepeats(longWord, uni_input):
threeWordCombos.append(longWord)
fourWordCombos = []
for combo in combos4:
for word1 in lengthWords[combo[0]]:
for word2 in lengthWords[combo[1]]:
for word3 in lengthWords[combo[2]]:
for word4 in lengthWords[combo[3]]:
longWord = word1 + word2 + word3 + word4
if notToManyRepeats(longWord, uni_input):
fourWordCombos.append(longWord)
"""
fiveWordCombos = []
for combo in combos5:
for word1 in lengthWords[combo[0]]:
for word2 in lengthWords[combo[1]]:
for word3 in lengthWords[combo[2]]:
for word4 in lengthWords[combo[3]]:
for word5 in lengthWords[combo[4]]:
longWord = word1 + word2 + word3 + word4
if notToManyRepeats(longWord, uni_input):
fourWordCombos.append(longWord)
"""
f = open(output_file, 'w')
f.write("two word combos -----------------------------------------------------2\n")
print "two word combos -----------------------------------------------------2"
for x in twoWordCombos:
f.write(x.encode('utf8'))
f.write('\n')
print x
f.write("three word combos -----------------------------------------------------3\n")
print "three word combos ---------------------------------------------------3"
for x in threeWordCombos:
f.write(x.encode('utf8'))
f.write('\n')
print x
f.write("four word combos -----------------------------------------------------4\n")
print "four word combos ----------------------------------------------------4"
for x in fourWordCombos:
f.write(x.encode('utf8'))
f.write('\n')
print x
"""
f.write("five word combos -----------------------------------------------------5\n")
print "five word combos ----------------------------------------------------5"
for x in fiveWordCombos:
f.write(x.encode('utf8'))
f.write('\n')
print x
"""
"""
print combos2
print combos3
print combos4
print combos5
"""
| gpl-2.0 | -8,431,472,854,959,819,000 | 27.110132 | 130 | 0.595048 | false |
erudit/eruditorg | eruditorg/apps/public/site_messages/migrations/0002_auto_20190425_1305.py | 1 | 1845 | # Generated by Django 2.0.13 on 2019-04-25 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("site_message", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="TargetSite",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"label",
models.CharField(
help_text="Site cible, par exemple <em>Public</em>, <em>Tableau de bord des revues</em> ou <em>Tableau de bord des bibliothèques</em>.",
max_length=64,
verbose_name="Site cible",
),
),
],
options={
"verbose_name": "Site cible",
"verbose_name_plural": "Sites cibles",
},
),
migrations.AlterField(
model_name="sitemessage",
name="setting",
field=models.CharField(
blank=True,
help_text="Si le site contient un réglage avec ce nom et que ce réglage est à <em>True</em>, le message sera affiché.",
max_length=64,
null=True,
verbose_name="Réglage",
),
),
migrations.AddField(
model_name="sitemessage",
name="target_sites",
field=models.ManyToManyField(
related_name="_sitemessage_target_sites_+",
to="site_message.TargetSite",
verbose_name="Sites cibles",
),
),
]
| gpl-3.0 | -5,972,333,534,437,446,000 | 31.839286 | 173 | 0.449157 | false |
nickgentoo/scikit-learn-graph | skgraph/datasets/load_graph_datasets.py | 1 | 27349 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
from scipy.sparse import csr_matrix
from ioskgraph import load_target
from ..graph import instance_to_graph
from sklearn.datasets.base import Bunch
#TODO import openbabel only if needed
#from obabel import obabel_to_eden
def dispatch(dataset):
if dataset=="CAS":
print "Loading bursi(CAS) dataset"
g_it=load_graphs_bursi()
elif dataset=="GDD":
print "Loading GDD dataset"
g_it=load_graphs_GDD()
elif dataset=="CPDB":
print "Loading CPDB dataset"
g_it=load_graphs_CPDB()
elif dataset=="AIDS":
print "Loading AIDS dataset"
g_it=load_graphs_AIDS()
elif dataset=="NCI1":
print "Loading NCI1 dataset"
g_it=load_graphs_NCI1()
elif dataset=="NCI109":
print "Loading NCI109 dataset"
g_it=load_graphs_NCI109()
elif dataset=="NCI123":
print "Loading NCI123 dataset"
g_it=load_graphs_NCI123()
elif dataset=="NCI_AIDS":
print "Loading NCI_AIDS dataset"
g_it=load_graphs_NCI_AIDS()
elif dataset=="Chemical2":
print "Loading LEUK40OV41LEUK47OV50 dataset"
g_it=load_graphs_LEUK40OV41LEUK47OV50()
elif dataset=="Chemical1":
print "Loading LEUK40LEUK47OV41OV50 dataset"
g_it=load_graphs_LEUK40LEUK47OV41OV50()
elif dataset=="Chemical3":
print "Loading LEUK40LEUK47OV41OV50LEUK40OV41LEUK47OV50 dataset"
g_it=load_graphs_LEUK40LEUK47OV41OV50LEUK40OV41LEUK47OV50()
elif dataset=="Chemical_reduced":
print "Loading LEUK40OV41LEUK47OV50 REDUCED dataset"
g_it=load_graphs_LEUK40OV41LEUK47OV50_reduced()
elif dataset=="MUTAG":
print "Loading MUTAG dataset"
g_it=load_graphs_MUTAG()
elif dataset=="enzymes":
print "Loading enzymes dataset"
g_it=load_graphs_enzymes()
elif dataset=="proteins":
print "Loading proteins dataset"
g_it=load_graphs_proteins()
elif dataset=="synthetic":
print "Loading synthetic dataset"
g_it=load_graphs_synthetic()
elif dataset=="BZR":
print "Loading BZR dataset"
g_it=load_graphs_BZR()
elif dataset=="COX2":
print "Loading COX2 dataset"
g_it=load_graphs_COX2()
elif dataset=="DHFR":
print "Loading DHFR dataset"
g_it=load_graphs_DHFR()
elif dataset=="PROTEINS_full":
print "Loading PROTEINS_full dataset"
g_it=load_graphs_PROTEINS_full()
elif dataset=="LMdata":
print "Loading LMdata dataset"
g_it=load_graphs_LMdata()
else:
print "Unknown dataset name"
return g_it
def convert_to_sparse_matrix(km):
# translate dictionary to Compressed Sparse Row matrix
if len(km) == 0:
raise Exception('ERROR: something went wrong, empty feature_dict. Perhaps wrong data format, i.e. do nodes have the "viewpoint" attribute?')
row, col, data = [], [], []
ne = len(km)
for i in range(ne):
for j in range(ne):
if (km[i][j]!=0):
row.append( i )
col.append( j )
data.append(km[i][j])
print len(row),len(col),len(data)
X = csr_matrix( (data,(row,col)), shape = (ne, ne))
return X
def load_graphs_GDD():
"""Load the GDD graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/GDD/GDD_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/GDD/graphs.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
gra=[i for i in g_it]
print 'Loaded GDD graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_MUTAG():
"""Load the MUTAG graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/MUTAG/mutag_188_target.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/MUTAG/mutag_188_data.can'
_target=load_target(input_target_url)
g_it=obabel_to_eden(input = input_data_url,file_type ='smi')
gra=[i for i in g_it]
print 'Loaded MUTAG graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_CPDB():
"""Load the CPDB graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/CPDB/mutagen_labels.tab'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/CPDB/mutagen_smile.can'
_target=load_target(input_target_url)
from obabel import obabel_to_eden
g_it=obabel_to_eden(input = input_data_url,file_type ='smi')
gra=[i for i in g_it]
print 'Loaded CPDB graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_AIDS():
"""Load the AIDS graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/AIDS/CAvsCM.y'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/AIDS/CAvsCM.can'
_target=load_target(input_target_url)
from obabel import obabel_to_eden
g_it=obabel_to_eden(input = input_data_url,file_type ='smi')
gra=[i for i in g_it]
print 'Loaded AIDS graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_NCI1():
"""Load the NCI1 graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/NCI1/NCI1_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/NCI1/NCI1_graphs.gspan'
_target=load_target(input_target_url)
label_dict={}
g_it=instance_to_graph(input = input_data_url)
#g_it=instance_to_graph(input = input_data_url,label_dict=label_dict)
print 'Loaded NCI1 graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
#label_dict=label_dict,
labels=True,
veclabels=False)
def load_graphs_NCI109():
"""Load the NCI109 graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/NCI109/NCI109_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/NCI109/NCI109_graphs.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
print 'Loaded NCI109 graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=False)
def load_graphs_bursi():
"""Load the Bursi graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.bioinf.uni-freiburg.de/~costa/bursi.target'
input_data_url='http://www.bioinf.uni-freiburg.de/~costa/bursi.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
print 'Loaded Bursi graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=False)
def load_graphs_enzymes():
"""Load the ENZYMES graph dataset for (multiclass) graph classification from:
Schomburg, I., Chang, A., Ebeling, C., Gremse, M., Heldt, C., Huhn, G., & Schomburg, D. (2004).
BRENDA, the enzyme database: updates and major new developments.
Nucleic Acids Research, 32, D431–D433. doi:10.1093/nar/gkh081
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/ENZYMES.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/ENZYMES.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded ENZYMES graph dataset for (multiclass) graph classification from:'
print 'Schomburg, I., Chang, A., Ebeling, C., Gremse, M., Heldt, C., Huhn, G., & Schomburg, D. (2004).'
print 'BRENDA, the enzyme database: updates and major new developments.'
print 'Nucleic Acids Research, 32, D431–D433. doi:10.1093/nar/gkh081'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_proteins():
"""Load the PROTEINS graph dataset for graph classification from:
Dobson, P. D., & Doig, A. J. (2003)
Distinguishing enzyme structures from non-enzymes without alignments.
Journal of Molecular Biology, 330, 771–783. doi:10.1016/S0022-2836(03)00628-4
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
print 'Loaded PROTEINS graph dataset for graph classification from:'
print 'Dobson, P. D., & Doig, A. J. (2003)'
print 'Distinguishing enzyme structures from non-enzymes without alignments.'
print 'Journal of Molecular Biology, 330, 771–783. doi:10.1016/S0022-2836(03)00628-4'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_synthetic():
"""Load the SYNTHETIC graph dataset for graph classification from:
Feragen, A., Kasenburg, N., Petersen, J., de Bruijne, M., & Borgwardt, K. M. (2013)
Scalable kernels for graphs with continuous attributes.
In Neural Information Processing Systems (NIPS) 2013 (pp. 216–224).
Retrieved from http://papers.nips.cc/paper/5155-scalable-kernels-for-graphs-with-continuous-attributes.pdf
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/SYNTHETICnew.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/SYNTHETICnew.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
g=[i for i in g_it]
for i in g:
for n in i.nodes():
i.node[n]['label']=str(i.degree(n))
print 'Loaded SYNTHETIC graph dataset for graph classification from:'
print 'Feragen, A., Kasenburg, N., Petersen, J., de Bruijne, M., & Borgwardt, K. M. (2013)'
print 'Scalable kernels for graphs with continuous attributes.'
print 'In Neural Information Processing Systems (NIPS) 2013 (pp. 216–224).'
return Bunch(graphs=g,
target=_target,
labels=True,
veclabels=True)
def load_graphs_BZR():
"""Load the BZR graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/BZR_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/BZR.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded BZR graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_COX2():
"""Load the COX2 graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/COX2_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/COX2.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded COX2 graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_DHFR():
"""Load the DHFR graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DHFR_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DHFR.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded DHFR graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_PROTEINS_full():
"""Load the PROTEINS_full graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS_full_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS_full.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded PROTEINS_full graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_NCI123():
"""Load the NCI123 graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/Leukemia/leukemia_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/Leukemia/leukemia.smile'
_target=load_target(input_target_url)
g_it=obabel_to_eden(input = input_data_url,file_type ='can')
gra=[i for i in g_it]
print 'Loaded NCI123 graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_NCI_AIDS():
"""Load the NCI antiHIV graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/NCI_AIDS/AIDO99SD_numeric.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/NCI_AIDS/AIDO99SD.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
print 'Loaded NCI antiHIV dataset graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40OV41LEUK47OV50():
#chemical2
"""Load the Chemical2 graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/stream.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40LEUK47OV41OV50():
#chemical1
"""Load the Chemical1 graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_NEW/labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_NEW/stream.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40OV41LEUK47OV50_reduced():
"""Load the Chemical graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/labels_reduced_101.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/stream_reduced_101.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40LEUK47OV41OV50LEUK40OV41LEUK47OV50():
#chemical1
"""Load the Chemical1 graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_CHEMICAL_BIG/labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_CHEMICAL_BIG/stream.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LMdata():
"""Load the LMdata graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/LMdata/labels.txt.standardized'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets//LMdata/graphs.gspan.standardized'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=instance_to_graph(input_data_url,label_dict,counter)
print 'Loaded LMdata graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False) | gpl-3.0 | 425,709,170,503,092,800 | 38.222382 | 152 | 0.664191 | false |
Jumpscale/core9 | JumpScale9/tools/nodemgr/Node.py | 1 | 8095 | from js9 import j
TEMPLATE = """
name = ""
clienttype = ""
sshclient = ""
active = false
selected = false
category = ""
description = ""
secretconfig_ = ""
pubconfig = ""
installed = false
zosclient = ""
"""
FormBuilderBaseClass = j.tools.formbuilder.baseclass_get()
JSBASE = j.application.jsbase_get_class()
class MyConfigUI(FormBuilderBaseClass):
def init(self):
# makes sure that this property is not auto populated, not needed when in form_add_items_pre
self.auto_disable.append("clienttype")
self.auto_disable.append("active")
self.auto_disable.append("selected")
def form_add_items_post(self):
self.widget_add_boolean("active", default=False)
self.widget_add_boolean("selected", default=True)
self.widget_add_multichoice("clienttype", [
"ovh", "packetnet", "ovc", "physical", "docker", "container"])
JSConfigBase = j.tools.configmanager.base_class_config
class Node(JSConfigBase):
def __init__(self, instance, data={}, parent=None, interactive=False):
JSConfigBase.__init__(self, instance=instance, data=data,
parent=parent, template=TEMPLATE, ui=MyConfigUI, interactive=interactive)
self._sshclient = None
self._ftpclient = None
self._private = None
@property
def private(self):
"""
if private in e.g. ovc space then will return True
"""
if self._private is None:
self._private = False
if self.config.data["sshclient"] != "":
if self.config.data["addr_priv"]:
self._private = self.sshclient.isprivate
return self._private
@property
def addr(self):
if self.config.data["sshclient"] != "":
self.sshclient
return self.sshclient.addr
@property
def port(self):
if self.config.data["sshclient"] != "":
return self.sshclient.port
@property
def active(self):
return self.config.data["active"]
@active.setter
def active(self, val):
self.config.data = {"active": val}
@property
def clienttype(self):
return self.config.data["clienttype"]
@clienttype.setter
def clienttype(self, val):
self.config.data = {"clienttype": val}
@property
def category(self):
return self.config.data["category"]
@category.setter
def category(self, val):
self.config.data = {"category": val}
@property
def name(self):
return self.config.data["name"]
@name.setter
def name(self, val):
self.config.data = {"name": val}
@property
def description(self):
return self.config.data["description"]
@description.setter
def description(self, val):
self.config.data = {"description": val}
@property
def selected(self):
return self.config.data["selected"]
@selected.setter
def selected(self, val):
self.config.data = {"selected": bool(val)}
@property
def secretconfig(self):
data = self.config.data["secretconfig_"]
data = j.data.serializer.json.loads(data)
return data
@secretconfig.setter
def secretconfig(self, data):
data = j.data.serializer.json.dumps(data)
self.config.data = {"secretconfig_": data}
@property
def pubconfig(self):
data = self.config.data["pubconfig"]
data = j.data.serializer.json.loads(data)
return data
@pubconfig.setter
def pubconfig(self, data):
data = j.data.serializer.json.dumps(data)
self.config.data = {"pubconfig": data}
@property
def isconnected(self):
if self.config.data["sshclient"] != "":
return self.sshclient.isconnected
# if self._connected is None:
# # lets test tcp on 22 if not then 9022 which are our defaults
# test = j.sal.nettools.tcpPortConnectionTest(
# self.addr, self.port, 3)
# if test is False:
# self.logger.debug("could not connect to %s:%s, will try port 9022" %
# (self.addr, self.port))
# if self.port == 22:
# test = j.sal.nettools.tcpPortConnectionTest(
# self.addr, 9022, 1)
# if test:
# self.port = 9022
# if test is False:
# self._connected = False
# else:
# self._connected = True
# self.active = True
# self._sshclient = None
# self._ftpclient = None
# return self._connected
@property
def sftp(self):
if self.isconnected:
return self.executor.sshclient.sftp
else:
raise RuntimeError("node %s cannot be reached, cannot get ftpclient." % self.instance)
@property
def sshclient(self):
if self._sshclient is None:
self.logger.debug("sshclient get")
self._sshclient = j.clients.ssh.get(instance=self.config.data["sshclient"])
self.clienttype = "ssh"
return self._sshclient
@property
def executor(self):
if self.config.data["sshclient"] != "":
return self.sshclient.prefab.executor
@property
def prefab(self):
return j.tools.prefab.get(executor=self.executor, usecache=True)
def clean(self):
cmd = """
rm -f ~/.profile_js
rm -f ~/env.sh
rm -f rm /etc/jumpscale9.toml
"""
self.executor.execute(cmd)
def test_executor(self):
self.executor.test()
def getActiveCodeDirs(self):
res = []
done = []
repo = j.clients.git.currentDirGitRepo()
if repo is not None:
res.append(j.tools.develop.codedirs.get(repo.type, repo.account, repo.name))
done.append(repo.BASEDIR)
# ddirs = j.tools.develop.codedirs.getActiveCodeDirs(): #TODO: *1 broken
ddirs = j.clients.git.getGitReposListLocal(account="jumpscale") # took predefined list
for key, path in ddirs.items():
self.logger.debug("try to find git dir for:%s" % path)
try:
repo = j.clients.git.get(path)
if path not in done:
res.append(j.tools.develop.codedirs.get(repo.type, repo.account, repo.name))
except Exception as e:
self.logger.error(e)
return res
def sync(self, monitor=False):
if not self.selected:
self.selected = True
ddirs = self.getActiveCodeDirs()
for ddir in ddirs:
dest = "%s/%s/%s/%s" % (
self.executor.dir_paths["CODEDIR"], ddir.type, ddir.account, ddir.name)
source = ddir.path
self.executor.upload(source, dest, dest_prefix='', recursive=True, createdir=True)
self.logger.info("SYNC DONE")
if monitor:
self.monitor()
def portforward(self, remote, local):
self.sshclient.port_forward_local_start(remoteport=remote, localport=local)
def monitor(self):
"""
will sync all active core dirs
"""
if not self.selected:
self.selected = True
# paths = [item.path for item in self.getActiveCodeDirs()]
paths = self.getActiveCodeDirs()
j.tools.develop.sync_active(paths)
def saveToHostfile(self):
j.tools.prefab.local.system.ns.hostfile_set(self.name, self.addr)
def save(self):
self.config.save()
def ssh(self):
cmd = "ssh -A root@%s -p %s" % (self.sshclient.addr_variable, self.sshclient.port_variable)
j.sal.process.executeInteractive(cmd)
def __str__(self):
if self.selected is True:
return "%-14s %-25s:%-4s [%s] *" % (self.name, self.addr, self.port, self.category)
else:
return "%-14s %-25s:%-4s [%s]" % (self.name, self.addr, self.port, self.category)
__repr__ = __str__
| apache-2.0 | -113,120,470,263,446,180 | 29.662879 | 103 | 0.576405 | false |
thebe2/ding | morning/notice_montage.py | 1 | 10104 | # *-* coding: utf-8 *-*
# 抓取证券日报上的每日交易公告集锦
# http://www.ccstock.cn/meiribidu/jiaoyitishi/
# 代码版本 python 2.7 IDE:PyCharm
import requests
import random
import sys
import time
from bs4 import BeautifulSoup
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import smtplib
import codecs
from datetime import date, timedelta
import re
import ConfigParser
import logging
# 日志记录器
logger = logging.getLogger()
# 默认配置信息
DEBUG = True
WEBSITE = "http://www.ccstock.cn/meiribidu/jiaoyitishi"
INTERVAL = 5
# 模拟请求头
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
def download_get_html(url, charset="utf-8", timeout=10, num_retries=3):
UA = random.choice(user_agent_list)
headers = {
'User-Agent': UA,
'Content-Type': 'text/html; charset=' + charset
}
try:
response = requests.get(url, headers=headers,
timeout=timeout)
# 设置编码
response.encoding = charset
# 404 容错
if response.status_code == 404:
logger.debug('get 404: %s ', url)
return None
else:
logger.debug('get : %s ', url)
return response.text
except:
if num_retries > 0:
time.sleep(10)
logger.debug('正在尝试,10S后将重新获取倒数第 %d 次', num_retries)
return download_get_html(url, charset, timeout, num_retries - 1)
else:
logger.debug('尝试也不好使了!取消访问')
return None
# 获取当期集锦的url
def parser_list_page(html_doc, now):
soup = BeautifulSoup(html_doc, 'lxml', from_encoding='utf-8')
# 只找第一个标签
tag = soup.find("div", class_="listMain").find("li")
link_tag = tag.find("a")
span_tag = tag.find("span")
page_url = link_tag['href']
# 截取日期
date_string = span_tag.string[0:10]
if date_string == now:
return page_url
else:
return None
def parser_item_page(html_doc, now):
soup = BeautifulSoup(html_doc, 'lxml', from_encoding='utf-8')
# title = soup.find("h1").string
newscontent = soup.find("div", id="newscontent")
html = newscontent.prettify()
return html
def get_now():
if DEBUG:
yesterday = date.today() - timedelta(1)
return yesterday.strftime('%Y-%m-%d')
else:
now = time.strftime("%Y-%m-%d", time.localtime())
return now
def write_file(content, now):
fileName = "morning-" + now + '.txt'
# full_path = os.path.join(path, fileName)
f = codecs.open(fileName, 'a', 'utf-8')
f.write(content)
f.close()
def read_file(now):
fileName = "morning-" + now + '.txt'
# full_path = os.path.join(path, fileName)
f = open(fileName)
html = ""
i = 1
fileName2 = "morning-" + now + '-f.txt'
f2 = codecs.open(fileName2, 'a', 'utf-8')
for text in f.readlines():
line = text.decode('utf-8')
newline = transform_number(line, i)
f2.write(newline)
html = html + newline
i = i + 1
f.close()
f2.close()
return html
# 格式化人民币计数
def transform_yuan(lineText):
# 中标金额313,000,000.00元
p = re.compile(u"[\d*,]*\d*[.]?\d*万元")
searchObj = re.findall(p, lineText)
if searchObj:
for x in xrange(0, len(searchObj)):
s1 = searchObj[x]
ns = filter(lambda ch: ch in '0123456789.', s1)
nb = float(ns)
if nb >= 10000:
s2 = str(nb / 10000) + "亿元"
lineText = lineText.replace(s1, s1 + "(" + s2 + ")")
p = re.compile(u"[\d*,]*\d*[.]?\d+元")
searchObj = re.findall(p, lineText)
if searchObj:
for x in xrange(0, len(searchObj)):
s1 = searchObj[x]
ns = filter(lambda ch: ch in '0123456789.', s1)
nb = float(ns)
if nb >= 100000000:
s2 = str(nb / 100000000) + "亿元"
lineText = lineText.replace(s1, s1 + "(" + s2 + ")")
elif nb >= 10000:
s2 = str(nb / 10000) + "万元"
lineText = lineText.replace(s1, s1 + "(" + s2 + ")")
return lineText
# 格式化股份计数
def transform_gu(lineText):
p = re.compile(u"[\d*,]*\d+股")
searchObj = re.findall(p, lineText)
if searchObj:
for x in xrange(0, len(searchObj)):
s1 = searchObj[x]
ns = filter(lambda ch: ch in '0123456789', s1)
nb = float(ns)
if nb >= 100000000:
s2 = str(nb / 100000000) + "亿股"
lineText = lineText.replace(s1, s1 + "(" + s2 + ")")
elif nb >= 10000:
s2 = str(nb / 10000) + "万股"
lineText = lineText.replace(s1, s1 + "(" + s2 + ")")
return lineText
def transform_number(lineText, line):
lineText = transform_yuan(lineText)
lineText = transform_gu(lineText)
# TODO 增减持
# 行首空格
return lineText
def send_notice_mail(html, now):
cf = get_config_parser()
to_list = cf.get("mailconf", "to_list").split(",")
mail_host = cf.get("mailconf", "mail_host")
mail_username = cf.get("mailconf", "mail_username")
mail_user = cf.get("mailconf", "mail_user")
mail_pass = cf.get("mailconf", "mail_pass")
mail_postfix = cf.get("mailconf", "mail_postfix")
me = "AStockMarketNoticeWatcher" + "<" + \
mail_username + "@" + mail_postfix + ">"
msg = MIMEMultipart()
subject = now + ' 日 - 二级市场重要公告集锦'
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = me
msg['To'] = ";".join(to_list)
mail_msg = html
# 邮件正文内容
msg.attach(MIMEText(mail_msg, 'html', 'utf-8'))
try:
server = smtplib.SMTP()
server.connect(mail_host)
server.ehlo()
server.starttls()
server.login(mail_user, mail_pass)
server.sendmail(me, to_list, msg.as_string())
server.close()
logger.debug('sent mail successfully')
except smtplib.SMTPException, e:
# 参考http://www.cnblogs.com/klchang/p/4635040.html
logger.debug('Error: 无法发送邮件 %s ', repr(e))
def get_config_parser():
config_file_path = "notice_montage.ini"
cf = ConfigParser.ConfigParser()
cf.read(config_file_path)
return cf
# 解析配置
def init_config():
cf = get_config_parser()
global DEBUG, INTERVAL, WEBSITE
INTERVAL = int(cf.get("timeconf", "interval"))
DEBUG = cf.get("urlconf", "debug") == 'True'
WEBSITE = cf.get("urlconf", "website")
def init_log():
if DEBUG:
# 测试日志输出到流
handler = logging.StreamHandler()
else:
# 正式日志输出到文件,备查
handler = logging.FileHandler("notice_montage.log")
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def main(num_retries=3):
now = get_now()
logger.debug("now %s", now)
list_html_doc = download_get_html(WEBSITE)
page_url = parser_list_page(list_html_doc, now)
logger.debug("page URL : %s", page_url)
if page_url == None:
# 今日公告还未生成,暂停10分钟后再去尝试
if num_retries > 0:
time.sleep(60 * INTERVAL)
main(num_retries - 1)
else:
logger.debug('3次尝试后取消运行')
else:
page_html_doc = download_get_html(page_url)
content = parser_item_page(page_html_doc, now)
# 本地文件存一份,备查
write_file(content, now)
html = read_file(now)
# 发送邮件通知
send_notice_mail(html, now)
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding("utf-8")
init_config()
init_log()
logger.debug('start notice montage')
main()
logger.debug("notice montage run end")
| mit | 7,004,598,003,179,474,000 | 33.355872 | 123 | 0.596126 | false |
gkc1000/pyscf | pyscf/hessian/thermo.py | 1 | 14002 | #!/usr/bin/env python
#
# This code was copied from the data generation program of Tencent Alchemy
# project (https://github.com/tencent-alchemy).
#
#
# #
# # Copyright 2019 Tencent America LLC. All Rights Reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# #
# # Author: Qiming Sun <[email protected]>
# #
'''
Thermochemistry analysis.
Ref:
psi4/psi4/driver/qcdb/vib.py
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.data import nist
LINDEP_THRESHOLD = 1e-7
def harmonic_analysis(mol, hess, exclude_trans=True, exclude_rot=True,
imaginary_freq=True):
'''Each column is one mode
imaginary_freq (boolean): save imaginary_freq as complex number (if True)
or negative real number (if False)
'''
results = {}
atom_coords = mol.atom_coords()
mass = mol.atom_mass_list(isotope_avg=True)
mass_center = numpy.einsum('z,zx->x', mass, atom_coords) / mass.sum()
atom_coords = atom_coords - mass_center
natm = atom_coords.shape[0]
mass_hess = numpy.einsum('pqxy,p,q->pqxy', hess, mass**-.5, mass**-.5)
h = mass_hess.transpose(0,2,1,3).reshape(natm*3,natm*3)
TR = _get_TR(mass, atom_coords)
TRspace = []
if exclude_trans:
TRspace.append(TR[:3])
if exclude_rot:
TRspace.append(TR[3:])
if TRspace:
TRspace = numpy.vstack(TRspace)
q, r = numpy.linalg.qr(TRspace.T)
P = numpy.eye(natm * 3) - q.dot(q.T)
w, v = numpy.linalg.eigh(P)
bvec = v[:,w > LINDEP_THRESHOLD]
h = reduce(numpy.dot, (bvec.T, h, bvec))
force_const_au, mode = numpy.linalg.eigh(h)
mode = bvec.dot(mode)
else:
force_const_au, mode = numpy.linalg.eigh(h)
freq_au = numpy.lib.scimath.sqrt(force_const_au)
results['freq_error'] = numpy.count_nonzero(freq_au.imag > 0)
if not imaginary_freq and numpy.iscomplexobj(freq_au):
# save imaginary frequency as negative frequency
freq_au = freq_au.real - abs(freq_au.imag)
results['freq_au'] = freq_au
au2hz = (nist.HARTREE2J / (nist.ATOMIC_MASS * nist.BOHR_SI**2))**.5 / (2 * numpy.pi)
results['freq_wavenumber'] = freq_wn = freq_au * au2hz / nist.LIGHT_SPEED_SI * 1e-2
norm_mode = numpy.einsum('z,zri->izr', mass**-.5, mode.reshape(natm,3,-1))
results['norm_mode'] = norm_mode
reduced_mass = 1./numpy.einsum('izr,izr->i', norm_mode, norm_mode)
results['reduced_mass'] = reduced_mass
# https://en.wikipedia.org/wiki/Vibrational_temperature
results['vib_temperature'] = freq_au * au2hz * nist.PLANCK / nist.BOLTZMANN
# force constants
dyne = 1e-2 * nist.HARTREE2J / nist.BOHR_SI**2
results['force_const_au'] = force_const_au
results['force_const_dyne'] = reduced_mass * force_const_au * dyne #cm^-1/a0^2
#TODO: IR intensity
return results
def rotation_const(mass, atom_coords, unit='GHz'):
'''Rotational constants to characterize rotational spectra
Kwargs:
unit (string) : One of GHz, wavenumber
'''
mass_center = numpy.einsum('z,zr->r', mass, atom_coords) / mass.sum()
r = atom_coords - mass_center
im = numpy.einsum('z,zr,zs->rs', mass, r, r)
im = numpy.eye(3) * im.trace() - im
e = numpy.sort(numpy.linalg.eigvalsh(im))
unit_im = nist.ATOMIC_MASS * (nist.BOHR_SI)**2
unit_hz = nist.HBAR / (4 * numpy.pi * unit_im)
with numpy.errstate(divide='ignore'):
if unit.lower() == 'ghz':
e = unit_hz / e * 1e-9
elif unit.lower() == 'wavenumber':
e = unit_hz / e / nist.LIGHT_SPEED_SI * 1e-2
else:
raise RuntimeError('Unsupported unit ' + unit)
return e
def thermo(model, freq, temperature=298.15, pressure=101325):
mol = model.mol
atom_coords = mol.atom_coords()
mass = mol.atom_mass_list(isotope_avg=True)
mass_center = numpy.einsum('z,zx->x', mass, atom_coords) / mass.sum()
atom_coords = atom_coords - mass_center
natm = atom_coords.shape[0]
kB = nist.BOLTZMANN
h = nist.PLANCK
c = nist.LIGHT_SPEED_SI
beta = 1. / (kB * temperature)
R_Eh = kB*nist.AVOGADRO / (nist.HARTREE2J * nist.AVOGADRO)
results = {}
results['temperature'] = (temperature, 'K')
results['pressure'] = (pressure, 'Pa')
E0 = model.e_tot
results['E0'] = (E0, 'Eh')
# Electronic part
results['S_elec' ] = (R_Eh * numpy.log(mol.multiplicity), 'Eh/K')
results['Cv_elec'] = results['Cp_elec'] = (0, 'Eh/K')
results['E_elec' ] = results['H_elec' ] = (E0, 'Eh')
# Translational part. See also https://cccbdb.nist.gov/thermo.asp for the
# partition function q_trans
mass_tot = mass.sum() * nist.ATOMIC_MASS
q_trans = ((2.0 * numpy.pi * mass_tot * kB * temperature / h**2)**1.5
* kB * temperature / pressure)
results['S_trans' ] = (R_Eh * (2.5 + numpy.log(q_trans)), 'Eh/K')
results['Cv_trans'] = (1.5 * R_Eh, 'Eh/K')
results['Cp_trans'] = (2.5 * R_Eh, 'Eh/K')
results['E_trans' ] = (1.5 * R_Eh * temperature, 'Eh')
results['H_trans' ] = (2.5 * R_Eh * temperature, 'Eh')
# Rotational part
rot_const = rotation_const(mass, atom_coords, 'GHz')
results['rot_const'] = (rot_const, 'GHz')
if numpy.all(rot_const > 1e8):
rotor_type = 'ATOM'
elif rot_const[0] > 1e8 and (rot_const[1] - rot_const[2] < 1e-3):
rotor_type = 'LINEAR'
else:
rotor_type = 'REGULAR'
sym_number = rotational_symmetry_number(mol)
results['sym_number'] = (sym_number, '')
# partition function q_rot (https://cccbdb.nist.gov/thermo.asp)
if rotor_type == 'ATOM':
results['S_rot' ] = (0, 'Eh/K')
results['Cv_rot'] = results['Cp_rot'] = (0, 'Eh/K')
results['E_rot' ] = results['H_rot' ] = (0, 'Eh')
elif rotor_type == 'LINEAR':
B = rot_const[1] * 1e9
q_rot = kB * temperature / (sym_number * h * B)
results['S_rot' ] = (R_Eh * (1 + numpy.log(q_rot)), 'Eh/K')
results['Cv_rot'] = results['Cp_rot'] = (R_Eh, 'Eh/K')
results['E_rot' ] = results['H_rot' ] = (R_Eh * temperature, 'Eh')
else:
ABC = rot_const * 1e9
q_rot = ((kB*temperature/h)**1.5 * numpy.pi**.5
/ (sym_number * numpy.prod(ABC)**.5))
results['S_rot' ] = (R_Eh * (1.5 + numpy.log(q_rot)), 'Eh/K')
results['Cv_rot'] = results['Cp_rot'] = (1.5 * R_Eh, 'Eh/K')
results['E_rot' ] = results['H_rot' ] = (1.5 * R_Eh * temperature, 'Eh')
# Vibrational part.
au2hz = (nist.HARTREE2J / (nist.ATOMIC_MASS * nist.BOHR_SI**2))**.5 / (2 * numpy.pi)
idx = freq.real > 0
vib_temperature = freq.real[idx] * au2hz * h / kB
rt = reduced_temperature = vib_temperature / max(1e-14, temperature)
e = numpy.exp(-rt)
ZPE = R_Eh * .5 * vib_temperature.sum()
results['ZPE'] = (ZPE, 'Eh')
results['S_vib' ] = (R_Eh * (rt*e/(1-e) - numpy.log(1-e)).sum(), 'Eh/K')
results['Cv_vib'] = results['Cp_vib'] = (R_Eh * (e * rt**2/(1-e)**2).sum(), 'Eh/K')
results['E_vib' ] = results['H_vib' ] = \
(ZPE + R_Eh * temperature * (rt * e / (1-e)).sum(), 'Eh')
results['G_elec' ] = (results['H_elec' ][0] - temperature * results['S_elec' ][0], 'Eh')
results['G_trans'] = (results['H_trans'][0] - temperature * results['S_trans'][0], 'Eh')
results['G_rot' ] = (results['H_rot' ][0] - temperature * results['S_rot' ][0], 'Eh')
results['G_vib' ] = (results['H_vib' ][0] - temperature * results['S_vib' ][0], 'Eh')
def _sum(f):
keys = ('elec', 'trans', 'rot', 'vib')
return sum(results.get(f+'_'+key, (0,))[0] for key in keys)
results['S_tot' ] = (_sum('S' ), 'Eh/K')
results['Cv_tot'] = (_sum('Cv'), 'Eh/K')
results['Cp_tot'] = (_sum('Cp'), 'Eh/K')
results['E_0K' ] = (E0 + ZPE, 'Eh')
results['E_tot' ] = (_sum('E'), 'Eh')
results['H_tot' ] = (_sum('H'), 'Eh')
results['G_tot' ] = (_sum('G'), 'Eh')
return results
def _get_TR(mass, coords):
'''Translational mode and rotational mode'''
mass_center = numpy.einsum('z,zx->x', mass, coords) / mass.sum()
coords = coords - mass_center
natm = coords.shape[0]
massp = mass.reshape(natm,1) ** .5
ex = numpy.repeat([[1, 0, 0]], natm, axis=0)
ey = numpy.repeat([[0, 1, 0]], natm, axis=0)
ez = numpy.repeat([[0, 0, 1]], natm, axis=0)
# translational mode
Tx = ex * massp
Ty = ey * massp
Tz = ez * massp
# rotational mode
Rx = massp * (ey * coords[:,2:3] - ez * coords[:,1:2])
Ry = massp * (ez * coords[:,0:1] - ex * coords[:,2:3])
Rz = massp * (ex * coords[:,1:2] - ey * coords[:,0:1])
return (Tx.ravel(), Ty.ravel(), Tz.ravel(),
Rx.ravel(), Ry.ravel(), Rz.ravel())
def rotational_symmetry_number(mol):
'''Number of unique orientations of the rigid molecule that only
interchange identical atoms.
Source http://cccbdb.nist.gov/thermo.asp (search "symmetry number")
'''
from pyscf import symm
group = symm.detect_symm(mol._atom)[0]
if group in ['SO3', 'C1', 'Ci', 'Cs', 'Coov']:
sigma = 1
elif group == 'Dooh':
sigma = 2
elif group in ['T', 'Td']:
sigma = 12
elif group == 'Oh':
sigma = 24
elif group == 'Ih':
sigma = 60
elif group[0] == 'C': # 'Cn', 'Cnv', 'Cnh'
sigma = int(''.join([x for x in group if x.isdigit()]))
elif group[0] == 'D': # 'Dn', 'Dnd', 'Dnh'
sigma = 2 * int(''.join([x for x in group if x.isdigit()]))
elif group[0] == 'S': # 'Sn'
sigma = int(''.join([x for x in group if x.isdigit()])) / 2
else:
raise RuntimeError("symmetry group: " + group)
return sigma
def dump_thermo(mol, results):
dump = mol.stdout.write
dump('temperature %.4f [%s]\n' % results['temperature'])
dump('pressure %.2f [%s]\n' % results['pressure'])
dump('Rotational constants [%s] %.5f %.5f %.5f\n'
% ((results['rot_const'][1],) + tuple(results['rot_const'][0])))
dump('Symmetry number %d\n' % results['sym_number'][0])
dump('Zero-point energy (ZPE) %.5f [Eh] %.3f [J/mol]\n'
% (results['ZPE'][0], results['ZPE'][0] * nist.HARTREE2J * nist.AVOGADRO))
keys = ('tot', 'elec', 'trans', 'rot', 'vib')
dump(' %s\n' % ' '.join('%10s'%x for x in keys))
def convert(f, keys, unit):
if 'Eh' in unit:
conv = nist.HARTREE2J * nist.AVOGADRO
else:
conv = 1
return ' '.join('%10.3f'%(results.get(f+'_'+key, (0,))[0]*conv) for key in keys)
def write(title, f):
tot, unit = results[f+'_tot']
msg = convert(f, keys, unit)
unit = unit.replace('Eh', 'J/mol')
s = '%s [%s]' % (title, unit)
dump('%-20s %s\n' % (s, msg))
write('Entropy', 'S')
write('Cv', 'Cv')
write('Cp', 'Cp')
dump('%-28s %s\n'
% ('Internal energy [J/mol]', convert('E', keys[2:], 'Eh')))
dump('%-22s %.5f %.5f\n'
% ('Internal energy [Eh]', results['E_tot'][0], results['E0'][0]))
dump('%-28s %s\n'
% ('Enthalpy [J/mol]', convert('H', keys[2:], 'Eh')))
dump('%-22s %.5f\n'
% ('Entropy [Eh]', results['H_tot'][0]))
dump('%-28s %s\n'
% ('Gibbs free energy [J/mol]', convert('G', keys[2:], 'Eh')))
dump('%-22s %.5f\n'
% ('Gibbs free energy [Eh]', results['G_tot'][0]))
def dump_normal_mode(mol, results):
dump = mol.stdout.write
freq_wn = results['freq_wavenumber']
idx = freq_wn.real > 0
freq_wn = freq_wn.real[idx]
nfreq = freq_wn.size
r_mass = results['reduced_mass'].real[idx]
force = results['force_const_dyne'].real[idx]
vib_t = results['vib_temperature'].real[idx]
mode = results['norm_mode'].real[idx]
symbols = [mol.atom_symbol(i) for i in range(mol.natm)]
def inline(q, col0, col1):
return ''.join('%20.4f' % q[i] for i in range(col0, col1))
def mode_inline(row, col0, col1):
return ' '.join('%6.2f%6.2f%6.2f' % (mode[i,row,0], mode[i,row,1], mode[i,row,2])
for i in range(col0, col1))
for col0, col1 in lib.prange(0, nfreq, 3):
dump('Mode %s\n' % ''.join('%20d'%i for i in range(col0,col1)))
dump('Irrep\n')
dump('Freq [cm^-1] %s\n' % inline(freq_wn, col0, col1))
dump('Reduced mass [au] %s\n' % inline(r_mass, col0, col1))
dump('Force const [Dyne/A] %s\n' % inline(force, col0, col1))
dump('Char temp [K] %s\n' % inline(vib_t, col0, col1))
#dump('IR\n')
#dump('Raman\n')
dump('Normal mode %s\n' % (' x y z'*(col1-col0)))
for j, at in enumerate(symbols):
dump(' %4d%4s %s\n' % (j, at, mode_inline(j, col0, col1)))
if __name__ == '__main__':
from pyscf import gto
from pyscf import hessian
mol = gto.M(atom='O 0 0 0; H 0 .757 .587; H 0 -.757 .587')
mass = mol.atom_mass_list(isotope_avg=True)
r = mol.atom_coords() - numpy.random.random((1,3))
print(rotation_const(mass, r, 'GHz'))
print(rotation_const(mass[1:], r[1:], 'GHz'))
print(rotation_const(mass[2:], r[2:], 'GHz'))
mf = mol.apply('HF').run()
hess = hessian.RHF(mf).kernel()
results = harmonic_analysis(mol, hess)
dump_normal_mode(mol, results)
results = thermo(mf, results['freq_au'], 298.15, 101325)
dump_thermo(mol, results)
| apache-2.0 | 4,807,885,719,179,349,000 | 36.239362 | 92 | 0.558777 | false |
mk-fg/yapps | yapps/grammar.py | 1 | 8914 | # grammar.py, part of Yapps 2 - yet another python parser system
# Copyright 1999-2003 by Amit J. Patel <[email protected]>
# Enhancements copyright 2003-2004 by Matthias Urlichs <[email protected]>
#
# This version of the Yapps 2 grammar can be distributed under the
# terms of the MIT open source license, either found in the LICENSE
# file included with the Yapps distribution
# <http://theory.stanford.edu/~amitp/yapps/> or at
# <http://www.opensource.org/licenses/mit-license.php>
#
"""Parser for Yapps grammars.
This file defines the grammar of Yapps grammars. Naturally, it is
implemented in Yapps. The grammar.py module needed by Yapps is built
by running Yapps on yapps_grammar.g. (Holy circularity, Batman!)
"""
import sys, re
from yapps import parsetree
######################################################################
def cleanup_choice(rule, lst):
if len(lst) == 0: return Sequence(rule, [])
if len(lst) == 1: return lst[0]
return parsetree.Choice(rule, *tuple(lst))
def cleanup_sequence(rule, lst):
if len(lst) == 1: return lst[0]
return parsetree.Sequence(rule, *tuple(lst))
def resolve_name(rule, tokens, id, args):
if id in [x[0] for x in tokens]:
# It's a token
if args:
print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
return parsetree.Terminal(rule, id)
else:
# It's a name, so assume it's a nonterminal
return parsetree.NonTerminal(rule, id, args)
# Begin -- grammar generated by Yapps
import sys, re
from yapps import runtime
class ParserDescriptionScanner(runtime.Scanner):
patterns = [
('"rule"', re.compile('rule')),
('"ignore"', re.compile('ignore')),
('"token"', re.compile('token')),
('"option"', re.compile('option')),
('":"', re.compile(':')),
('"parser"', re.compile('parser')),
('[ \t\r\n]+', re.compile('[ \t\r\n]+')),
('#.*?\r?\n', re.compile('#.*?\r?\n')),
('EOF', re.compile('$')),
('ATTR', re.compile('<<.+?>>')),
('STMT', re.compile('{{.+?}}')),
('ID', re.compile('[a-zA-Z_][a-zA-Z_0-9]*')),
('STR', re.compile('[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"')),
('LP', re.compile('\\(')),
('RP', re.compile('\\)')),
('LB', re.compile('\\[')),
('RB', re.compile('\\]')),
('OR', re.compile('[|]')),
('STAR', re.compile('[*]')),
('PLUS', re.compile('[+]')),
('QUEST', re.compile('[?]')),
('COLON', re.compile(':')),
]
def __init__(self, str,*args,**kw):
runtime.Scanner.__init__(self,None,{'[ \t\r\n]+':None,'#.*?\r?\n':None,},str,*args,**kw)
class ParserDescription(runtime.Parser):
Context = runtime.Context
def Parser(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Parser', [])
self._scan('"parser"', context=_context)
ID = self._scan('ID', context=_context)
self._scan('":"', context=_context)
Options = self.Options(_context)
Tokens = self.Tokens(_context)
Rules = self.Rules(Tokens, _context)
EOF = self._scan('EOF', context=_context)
return parsetree.Generator(ID,Options,Tokens,Rules)
def Options(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Options', [])
opt = {}
while self._peek('"option"', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == '"option"':
self._scan('"option"', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
opt[Str] = 1
return opt
def Tokens(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Tokens', [])
tok = []
while self._peek('"token"', '"ignore"', 'EOF', '"rule"', context=_context) in ['"token"', '"ignore"']:
_token = self._peek('"token"', '"ignore"', context=_context)
if _token == '"token"':
self._scan('"token"', context=_context)
ID = self._scan('ID', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
tok.append( (ID,Str) )
else: # == '"ignore"'
self._scan('"ignore"', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
ign = ('#ignore',Str)
if self._peek('STMT', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == 'STMT':
STMT = self._scan('STMT', context=_context)
ign = ign + (STMT[2:-2],)
tok.append( ign )
return tok
def Rules(self, tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'Rules', [tokens])
rul = []
while self._peek('"rule"', 'EOF', context=_context) == '"rule"':
self._scan('"rule"', context=_context)
ID = self._scan('ID', context=_context)
OptParam = self.OptParam(_context)
self._scan('":"', context=_context)
ClauseA = self.ClauseA(ID, tokens, _context)
rul.append( (ID, OptParam, ClauseA) )
return rul
def ClauseA(self, rule, tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseA', [rule, tokens])
ClauseB = self.ClauseB(rule,tokens, _context)
v = [ClauseB]
while self._peek('OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'OR':
OR = self._scan('OR', context=_context)
ClauseB = self.ClauseB(rule,tokens, _context)
v.append(ClauseB)
return cleanup_choice(rule,v)
def ClauseB(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseB', [rule,tokens])
v = []
while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) in ['STR', 'ID', 'LP', 'LB', 'STMT']:
ClauseC = self.ClauseC(rule,tokens, _context)
v.append(ClauseC)
return cleanup_sequence(rule, v)
def ClauseC(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseC', [rule,tokens])
ClauseD = self.ClauseD(rule,tokens, _context)
_token = self._peek('PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context)
if _token == 'PLUS':
PLUS = self._scan('PLUS', context=_context)
return parsetree.Plus(rule, ClauseD)
elif _token == 'STAR':
STAR = self._scan('STAR', context=_context)
return parsetree.Star(rule, ClauseD)
elif _token == 'QUEST':
QUEST = self._scan('QUEST', context=_context)
return parsetree.Option(rule, ClauseD)
else:
return ClauseD
def ClauseD(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseD', [rule,tokens])
_token = self._peek('STR', 'ID', 'LP', 'LB', 'STMT', context=_context)
if _token == 'STR':
STR = self._scan('STR', context=_context)
t = (STR, eval(STR,{},{}))
if t not in tokens: tokens.insert( 0, t )
return parsetree.Terminal(rule, STR)
elif _token == 'ID':
ID = self._scan('ID', context=_context)
OptParam = self.OptParam(_context)
return resolve_name(rule,tokens, ID, OptParam)
elif _token == 'LP':
LP = self._scan('LP', context=_context)
ClauseA = self.ClauseA(rule,tokens, _context)
RP = self._scan('RP', context=_context)
return ClauseA
elif _token == 'LB':
LB = self._scan('LB', context=_context)
ClauseA = self.ClauseA(rule,tokens, _context)
RB = self._scan('RB', context=_context)
return parsetree.Option(rule, ClauseA)
else: # == 'STMT'
STMT = self._scan('STMT', context=_context)
return parsetree.Eval(rule, STMT[2:-2])
def OptParam(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'OptParam', [])
if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'ATTR':
ATTR = self._scan('ATTR', context=_context)
return ATTR[2:-2]
return ''
def Str(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Str', [])
STR = self._scan('STR', context=_context)
return eval(STR,{},{})
def parse(rule, text):
P = ParserDescription(ParserDescriptionScanner(text))
return runtime.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
| mit | -1,790,514,124,538,740,200 | 41.447619 | 158 | 0.536123 | false |
tshirtman/ultimate-smash-friends | usf/screens/level.py | 1 | 3174 | ################################################################################
# copyright 2009 Gabriel Pettier <[email protected]> #
# #
# This file is part of Ultimate Smash Friends. #
# #
# Ultimate Smash Friends is free software: you can redistribute it and/or #
# modify it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# Ultimate Smash Friends is distributed in the hope that it will be useful, but#
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or#
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along with #
# Ultimate Smash Friends. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
'''
The Screen to choose the level to be played.
'''
#standard imports
from os.path import join
import os
import logging
#our modules
from usf.screens.screen import Screen
from usf.widgets.box import VBox
from usf.widgets.button import Button
from usf.widgets.coverflow import Coverflow
from usf import CONFIG
from usf.translation import _
class Level(Screen):
def init(self):
self.add(VBox())
coverflow_data = []
#create a level image for every directory in the level directory.
files = os.listdir(os.path.join( CONFIG.system_path, 'levels'))
files.sort()
for f in files:
try:
if 'level.xml' in os.listdir(
os.path.join(CONFIG.system_path, "levels", f)):
coverflow_data.append([])
coverflow_data[-1].append(f)
coverflow_data[-1].append(
join(
CONFIG.system_path,
"levels", f, "screenshot.png"))
except:
logging.debug(str(f) +" is not a valid level.")
self.coverflow = Coverflow(coverflow_data)
self.widget.add(self.coverflow, size=(800, 275))
self.widget.add(Button(_('Go !')), margin_left=290)
self.widget.add(
Button(_('Back')),
size=(150, 40),
margin_left=20,
margin=20)
def get_level(self):
return self.coverflow.get_value()
def callback(self, action):
if action.text == _('Go !'):
return {'game': 'new'}
if action.text == _('Back'):
return {'goto': 'back'}
| gpl-3.0 | -112,941,543,913,260,480 | 37.240964 | 80 | 0.476686 | false |
gustavoatt/consultas | consultas_proyecto/pacientes_app/tests/test_forms.py | 1 | 1548 | from django import test
from pacientes_app import forms
from pacientes_app import models
class PacienteEditFormTestCase(test.TestCase):
def setUp(self):
self.paciente = models.Paciente.objects.create(
cedula='18423347',
fecha_nacimiento='1988-03-26'
)
def test_valid_form(self):
data = {'cedula': '19883999',
'nombres': 'Naymar',
'apellidos': 'Torres',
'genero': models.Paciente.GENEROS[0][0],
'fecha_nacimiento': '1989-10-12',
'estado': models.Paciente.ESTADOS[0][0],
'ciudad': 'Puerto Ayacucho',
}
form = forms.PacienteEditForm(data=data)
self.assertTrue(form.is_valid(), form.errors)
form.save()
self.assertEquals(2, models.Paciente.objects.count())
def test_invalid_form_unique_cedula(self):
data = {'cedula': '18423347',
'nombres': 'Pepe',
'apellidos': 'Aguilar',
'fecha_nacimiento': '2014-01-01',
'genero': models.Paciente.GENEROS[0][0],
'estado': models.Paciente.ESTADOS[0][0],
'ciudad': 'La Paz',
}
form = forms.PacienteEditForm(data=data)
self.assertFalse(form.is_valid())
self.assertTrue('cedula' in form.errors)
def test_invalid_form_missing_fields(self):
data = {'cedula': '13875815',
'nombres': 'Ithamar Alexander',
'apellidos': 'Torres Torres'
}
form = forms.PacienteEditForm(data=data)
self.assertFalse(form.is_valid())
self.assertTrue('fecha_nacimiento' in form.errors)
| mit | 8,823,473,721,192,565,000 | 31.25 | 57 | 0.613049 | false |
eirannejad/pyRevit | pyrevitlib/pyrevit/coreutils/markdown/extensions/footnotes.py | 1 | 14914 | """
Footnotes Extension for Python-Markdown
=======================================
Adds footnote handling to Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/footnotes.html>
for documentation.
Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import re
from . import Extension
from .. import util
from ..inlinepatterns import Pattern
from ..odict import OrderedDict
from ..postprocessors import Postprocessor
from ..preprocessors import Preprocessor
from ..treeprocessors import Treeprocessor
FN_BACKLINK_TEXT = util.STX + "zz1337820767766393qq" + util.ETX
NBSP_PLACEHOLDER = util.STX + "qq3936677670287331zz" + util.ETX
DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
RE_REF_ID = re.compile(r'(fnref)(\d+)')
class FootnoteExtension(Extension):
""" Footnote Extension. """
def __init__(self, *args, **kwargs):
""" Setup configs. """
self.config = {
'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."],
"BACKLINK_TEXT":
["↩",
"The text string that links from the footnote "
"to the reader's place."]
}
super(FootnoteExtension, self).__init__(*args, **kwargs)
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.found_refs = {}
self.used_refs = set()
self.reset()
def extendMarkdown(self, md, md_globals):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
self.md = md
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add(
"footnote", FootnotePreprocessor(self), "<reference"
)
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add(
"footnote", FootnotePattern(FOOTNOTE_RE, self), "<reference"
)
# Insert a tree-processor that would actually add the footnote div
# This must be before all other treeprocessors (i.e., inline and
# codehilite) so they can run on the the contents of the div.
md.treeprocessors.add(
"footnote", FootnoteTreeprocessor(self), "_begin"
)
# Insert a tree-processor that will run after inline is done.
# In this tree-processor we want to check our duplicate footnote tracker
# And add additional backrefs to the footnote pointing back to the
# duplicated references.
md.treeprocessors.add(
"footnote-duplicate", FootnotePostTreeprocessor(self), '>inline'
)
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add(
"footnote", FootnotePostprocessor(self), ">amp_substitute"
)
def reset(self):
""" Clear footnotes on reset, and prepare for distinct document. """
self.footnotes = OrderedDict()
self.unique_prefix += 1
self.found_refs = {}
self.used_refs = set()
def unique_ref(self, reference, found=False):
""" Get a unique reference if there are duplicates. """
if not found:
return reference
original_ref = reference
while reference in self.used_refs:
ref, rest = reference.split(self.get_separator(), 1)
m = RE_REF_ID.match(ref)
if m:
reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest)
else:
reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest)
self.used_refs.add(reference)
if original_ref in self.found_refs:
self.found_refs[original_ref] += 1
else:
self.found_refs[original_ref] = 1
return reference
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
child_res = finder(child)
if child_res is not None:
return child_res
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def get_separator(self):
if self.md.output_format in ['html5', 'xhtml5']:
return '-'
return ':'
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else:
return 'fn%s%s' % (self.get_separator(), id)
def makeFootnoteRefId(self, id, found=False):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found)
else:
return self.unique_ref('fnref%s%s' % (self.get_separator(), id), found)
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not list(self.footnotes.keys()):
return None
div = util.etree.Element("div")
div.set('class', 'footnote')
util.etree.SubElement(div, "hr")
ol = util.etree.SubElement(div, "ol")
surrogate_parent = util.etree.Element("div")
for id in self.footnotes.keys():
li = util.etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
# Parse footnote with surrogate parent as li cannot be used.
# List block handlers have special logic to deal with li.
# When we are done parsing, we will copy everything over to li.
self.parser.parseChunk(surrogate_parent, self.footnotes[id])
for el in list(surrogate_parent):
li.append(el)
surrogate_parent.remove(el)
backlink = util.etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
if self.md.output_format not in ['html5', 'xhtml5']:
backlink.set("rev", "footnote") # Invalid in HTML5
backlink.set("class", "footnote-backref")
backlink.set(
"title",
"Jump back to footnote %d in the text" %
(self.footnotes.index(id)+1)
)
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = util.etree.SubElement(li, "p")
p.append(backlink)
return div
class FootnotePreprocessor(Preprocessor):
""" Find all footnote references and store for later use. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, lines):
"""
Loop through lines and find, set, and remove footnote definitions.
Keywords:
* lines: A list of lines of text
Return: A list of lines of text with footnote definitions removed.
"""
newlines = []
i = 0
while True:
m = DEF_RE.match(lines[i])
if m:
fn, _i = self.detectTabbed(lines[i+1:])
fn.insert(0, m.group(2))
i += _i-1 # skip past footnote
self.footnotes.setFootnote(m.group(1), "\n".join(fn))
else:
newlines.append(lines[i])
if len(lines) > i+1:
i += 1
else:
break
return newlines
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the index of last line.
"""
items = []
blank_line = False # have we encountered a blank line yet?
i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
detabbed_line = detab(line)
if detabbed_line:
items.append(detabbed_line)
i += 1
continue
elif not blank_line and not DEF_RE.match(line):
# not tabbed but still part of first par.
items.append(line)
i += 1
continue
else:
return items, i+1
else: # Blank line: _maybe_ we are done.
blank_line = True
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]
break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, i
class FootnotePattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
super(FootnotePattern, self).__init__(pattern)
self.footnotes = footnotes
def handleMatch(self, m):
id = m.group(2)
if id in self.footnotes.footnotes.keys():
sup = util.etree.Element("sup")
a = util.etree.SubElement(sup, "a")
sup.set('id', self.footnotes.makeFootnoteRefId(id, found=True))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
a.set('rel', 'footnote') # invalid in HTML5
a.set('class', 'footnote-ref')
a.text = unicode(self.footnotes.footnotes.index(id) + 1)
return sup
else:
return None
class FootnotePostTreeprocessor(Treeprocessor):
""" Ammend footnote div with duplicates. """
def __init__(self, footnotes):
self.footnotes = footnotes
def add_duplicates(self, li, duplicates):
""" Adjust current li and add the duplicates: fnref2, fnref3, etc. """
for link in li.iter('a'):
# Find the link that needs to be duplicated.
if link.attrib.get('class', '') == 'footnote-backref':
ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1)
# Duplicate link the number of times we need to
# and point the to the appropriate references.
links = []
for index in range(2, duplicates + 1):
sib_link = copy.deepcopy(link)
sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest)
links.append(sib_link)
self.offset += 1
# Add all the new duplicate links.
el = list(li)[-1]
for l in links:
el.append(l)
break
def get_num_duplicates(self, li):
""" Get the number of duplicate refs of the footnote. """
fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1)
link_id = '%sref%s%s' % (fn, self.footnotes.get_separator(), rest)
return self.footnotes.found_refs.get(link_id, 0)
def handle_duplicates(self, parent):
""" Find duplicate footnotes and format and add the duplicates. """
for li in list(parent):
# Check number of duplicates footnotes and insert
# additional links if needed.
count = self.get_num_duplicates(li)
if count > 1:
self.add_duplicates(li, count)
def run(self, root):
""" Crawl the footnote div and add missing duplicate footnotes. """
self.offset = 0
for div in root.iter('div'):
if div.attrib.get('class', '') == 'footnote':
# Footnotes shoul be under the first orderd list under
# the footnote div. So once we find it, quit.
for ol in div.iter('ol'):
self.handle_duplicates(ol)
break
class FootnoteTreeprocessor(Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv is not None:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
child, parent, isText = result
ind = parent.getchildren().index(child)
if isText:
parent.remove(child)
parent.insert(ind, footnotesDiv)
else:
parent.insert(ind + 1, footnotesDiv)
child.tail = None
else:
root.append(footnotesDiv)
class FootnotePostprocessor(Postprocessor):
""" Replace placeholders with html entities. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, text):
text = text.replace(
FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
)
return text.replace(NBSP_PLACEHOLDER, " ")
def makeExtension(*args, **kwargs):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(*args, **kwargs)
| gpl-3.0 | -9,122,525,648,859,434,000 | 34.679426 | 109 | 0.545058 | false |
HaroldMills/Vesper | scripts/create_randomized_station_night_list.py | 1 | 2218 | '''
Script that creates a randomized list of station/night pairs.
Given a list of station names and a list of night dates, this script
writes a semi-randomized list of the elements of the cartesian product
of the two lists to a CSV file. The station names cycle through an
alphabetized version of the input station names list, while for each
station the input night dates appear in a randomly shuffled order that
is (probably) different for each station.
'''
import calendar
import csv
import itertools
import random
STATION_NAMES = sorted('''
Angela
Bear
Bell Crossing
Darby
Dashiell
Davies
Deer Mountain
Floodplain
Florence
KBK
Lilo
MPG North
Nelson
Oxbow
Powell
Reed
Ridge
Seeley
Sheep Camp
St Mary
Sula Peak
Teller
Troy
Walnut
Weber
Willow
'''.strip().split('\n'))
YEAR_MONTH_PAIRS = [(2017, 8), (2017, 9)]
OUTPUT_FILE_PATH = '/Users/harold/Desktop/Station-Nights.csv'
def main():
# Seed random number generation so we get the same output every time
# we run this script.
random.seed(0)
station_nights = get_station_nights()
write_csv_file(station_nights)
def get_station_nights():
dates = get_dates()
station_night_rows = [
get_station_night_list(n, dates) for n in STATION_NAMES]
station_night_columns = zip(*station_night_rows)
return itertools.chain.from_iterable(station_night_columns)
def get_station_night_list(station_name, dates):
dates = random.sample(dates, len(dates))
return [(station_name, d) for d in dates]
def get_dates():
date_lists = [get_dates_aux(*p) for p in YEAR_MONTH_PAIRS]
return list(itertools.chain.from_iterable(date_lists))
def get_dates_aux(year, month):
num_days = calendar.monthrange(year, month)[1]
prefix = '{:d}-{:02d}-'.format(year, month)
f = prefix + '{:02d}'
return [f.format(d) for d in range(1, num_days + 1)]
def write_csv_file(station_nights):
with open(OUTPUT_FILE_PATH, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(('Station', 'Night'))
for pair in station_nights:
writer.writerow(pair)
if __name__ == '__main__':
main()
| mit | -5,986,494,067,936,976,000 | 20.745098 | 72 | 0.671326 | false |
shakamunyi/sahara | sahara/tests/unit/plugins/cdh/test_versionfactory.py | 1 | 1665 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5.5.0', '5.7.0', '5.9.0', '5.11.0']
| apache-2.0 | -8,801,172,620,770,967,000 | 37.72093 | 69 | 0.718919 | false |
SamProtas/PALiquor | geocode_fixes.py | 1 | 2733 | import os
import pandas as pd
import numpy as np
import sqlite3
import requests
import time
def fix_location(lid, new_address):
pd.set_option('display.mpl_style', 'default')
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE1 = os.path.join(PROJECT_ROOT, 'dbs', 'licensees.db')
conn1 = sqlite3.connect(DATABASE1)
c = conn1.cursor()
c.execute('SELECT address, latitude, longitude FROM licensees WHERE lid = ?',[lid])
old_info = c.fetchone()
old_latitude = old_info[1]
old_longitude = old_info[2]
if old_latitude or old_longitude:
return 'No need to fix. Aborting geocode call.'
api_key = 'NOT MY REAL KEY!!!!!'
baseurl = 'https://maps.googleapis.com/maps/api/geocode/json?key='+api_key+'&address='
fullurl = baseurl + new_address
page = requests.get(fullurl)
latitude = page.json()['results'][0]['geometry']['location']['lat']
longitude = page.json()['results'][0]['geometry']['location']['lng']
c.execute('UPDATE licensees SET address = ?, latitude = ?, longitude = ? WHERE lid = ?',[new_address, latitude, longitude, lid])
conn1.commit()
c.close()
return 'Good Fix'
# Manually fixed addresses
fix_location(233,'US Customs House Chestnut Street Philadelphia PA')
time.sleep(.2)
fix_location(43444, '431 South Streeet Philadelphia PA')
time.sleep(.2)
fix_location(45162, '2457 Grant Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(69585, '2400 Strawberry Mansion Drive Philadelphia, PA 19132')
time.sleep(.2)
fix_location(44218, 'Chickie and Petes Roosevelt Boulevard, Philadelphia, PA 19116')
time.sleep(.2)
fix_location(48788, 'Diamond Club at Mitten Hall 1913 North Broad Street Philadelphia, PA 19122')
time.sleep(.2)
fix_location(64349, '51 North 12th Street Philadelphia, PA 19107')
time.sleep(.2)
fix_location(64754, '1420 Locust Street Philadelphia PA 19102')
time.sleep(.2)
fix_location(50302, '39 Snyder Ave Philadelphia PA 19148')
time.sleep(.2)
fix_location(61215, '9910 Frankford Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(65590, '11000 E Roosevelt BLVD Philadelphia PA')
time.sleep(.2)
fix_location(26715, 'Knights Road Shopping Center 4018 Woodhaven Road Philadelphia, PA 19154')
time.sleep(.2)
fix_location(66741, '9183 Roosevelt BLVD Philadelphia PA 19114')
time.sleep(.2)
fix_location(65221, '129 S 30th St Philadelphia PA 19104')
time.sleep(.2)
fix_location(23775, 'The Bellevue Philadelphia PA 19103')
time.sleep(.2)
fix_location(55796, '5765 Wister St Philadelphia PA 19138')
time.sleep(.2)
fix_location(25469, 'Market East Philadelphia PA 19107')
time.sleep(.2)
fix_location(1140, 'torresdale and decatour, philadelphia pa')
| gpl-2.0 | -2,758,837,332,182,143,000 | 33.1625 | 136 | 0.706915 | false |
markherringer/waywayd | third-party/django_multilingual/multilingual/query.py | 1 | 26323 | """
Django-multilingual: a QuerySet subclass for models with translatable
fields.
This file contains the implementation for QSRF Django.
"""
import datetime
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query import QuerySet, Q
from django.db.models.sql.query import Query
from django.db.models.sql.datastructures import (
EmptyResultSet,
Empty,
MultiJoin)
from django.db.models.sql.constants import *
from django.db.models.sql.where import WhereNode, EverythingNode, AND, OR
try:
# handle internal API changes in Django rev. 9700
from django.db.models.sql.where import Constraint
def constraint_tuple(alias, col, field, lookup_type, value):
return (Constraint(alias, col, field), lookup_type, value)
except ImportError:
# backwards compatibility, for Django versions 1.0 to rev. 9699
def constraint_tuple(alias, col, field, lookup_type, value):
return (alias, col, field, lookup_type, value)
from multilingual.languages import (
get_translation_table_alias,
get_language_id_list,
get_default_language,
get_translated_field_alias,
get_language_id_from_id_or_code)
__ALL__ = ['MultilingualModelQuerySet']
class MultilingualQuery(Query):
def __init__(self, model, connection, where=WhereNode):
self.extra_join = {}
self.include_translation_data = True
extra_select = {}
super(MultilingualQuery, self).__init__(model, connection, where=where)
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
master_table_name = opts.db_table
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
if hasattr(opts, 'translation_model'):
master_table_name = opts.db_table
for language_id in get_language_id_list():
for fname in [f.attname for f in translation_opts.fields]:
table_alias = get_translation_table_alias(trans_table_name,
language_id)
field_alias = get_translated_field_alias(fname,
language_id)
extra_select[field_alias] = qn2(table_alias) + '.' + qn2(fname)
self.add_extra(extra_select, None, None, None, None, None)
self._trans_extra_select_count = len(self.extra_select)
def clone(self, klass=None, **kwargs):
defaults = {
'extra_join': self.extra_join,
'include_translation_data': self.include_translation_data,
}
defaults.update(kwargs)
return super(MultilingualQuery, self).clone(klass=klass, **defaults)
def pre_sql_setup(self):
"""Adds the JOINS and SELECTS for fetching multilingual data.
"""
super(MultilingualQuery, self).pre_sql_setup()
if not self.include_translation_data:
return
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if hasattr(opts, 'translation_model'):
master_table_name = opts.db_table
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
for language_id in get_language_id_list():
table_alias = get_translation_table_alias(trans_table_name,
language_id)
trans_join = ('LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_id = %s))'
% (qn2(translation_opts.db_table),
qn2(table_alias),
qn2(table_alias),
qn(master_table_name),
qn2(self.model._meta.pk.column),
qn2(table_alias),
language_id))
self.extra_join[table_alias] = trans_join
def get_from_clause(self):
"""Add the JOINS for related multilingual fields filtering.
"""
result = super(MultilingualQuery, self).get_from_clause()
if not self.include_translation_data:
return result
from_ = result[0]
for join in self.extra_join.values():
from_.append(join)
return (from_, result[1])
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True):
"""Copied from add_filter to generate WHERES for translation fields.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif (value == '' and lookup_type == 'exact' and
connection.features.interprets_empty_strings_as_nulls):
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
#NOTE: here comes Django Multilingual
if hasattr(opts, 'translation_model'):
field_name = parts[-1]
if field_name == 'pk':
field_name = opts.pk.name
translation_opts = opts.translation_model._meta
if field_name in translation_opts.translated_fields.keys():
field, model, direct, m2m = opts.get_field_by_name(field_name)
if model == opts.translation_model:
language_id = translation_opts.translated_fields[field_name][1]
if language_id is None:
language_id = get_default_language()
master_table_name = opts.db_table
trans_table_alias = get_translation_table_alias(
model._meta.db_table, language_id)
new_table = (master_table_name + "__" + trans_table_alias)
self.where.add(constraint_tuple(new_table, field.column, field, lookup_type, value), connector)
return
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
# An optimization: if the final join is against the same column as
# we are comparing against, we can go back one step in the join
# chain and compare against the lhs of the join instead (and then
# repeat the optimization). The result, potentially, involves less
# table joins.
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
if (lookup_type == 'isnull' and value is True and not negate and
final > 1):
# If the comparison is against NULL, we need to use a left outer
# join when connecting to the previous model. We make that
# adjustment here. We don't do this unless needed as it's less
# efficient at the database level.
self.promote_alias(join_list[penultimate])
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
table_promote = False
join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
join_promote = self.promote_alias(join)
if table != join:
table_promote = self.promote_alias(table)
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote)
self.where.add(constraint_tuple(alias, col, field, lookup_type, value), connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if final > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(constraint_tuple(alias, j_col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
break
elif not (lookup_type == 'in' and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
entry = self.where_class()
entry.add(constraint_tuple(alias, col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def _setup_joins_with_translation(self, names, opts, alias,
dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None,
negate=False, process_extras=True):
"""
This is based on a full copy of Query.setup_joins because
currently I see no way to handle it differently.
TO DO: there might actually be a way, by splitting a single
multi-name setup_joins call into separate calls. Check it.
-- [email protected]
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters).
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
#NOTE: Start Django Multilingual specific code
if hasattr(opts, 'translation_model'):
translation_opts = opts.translation_model._meta
if model == opts.translation_model:
language_id = translation_opts.translated_fields[name][1]
if language_id is None:
language_id = get_default_language()
#TODO: check alias
master_table_name = opts.db_table
trans_table_alias = get_translation_table_alias(
model._meta.db_table, language_id)
new_table = (master_table_name + "__" + trans_table_alias)
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
trans_join = ('LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_id = %s))'
% (qn2(model._meta.db_table),
qn2(new_table),
qn2(new_table),
qn(master_table_name),
qn2(model._meta.pk.column),
qn2(new_table),
language_id))
self.extra_join[new_table] = trans_join
target = field
continue
#NOTE: End Django Multilingual specific code
elif model:
# The field lives on a base class of the current model.
for int_model in opts.get_base_chain(model):
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
if not self.include_translation_data:
return super(MultilingualQuery, self).setup_joins(names, opts, alias,
dupe_multis, allow_many,
allow_explicit_fk,
can_reuse, negate,
process_extras)
else:
return self._setup_joins_with_translation(names, opts, alias, dupe_multis,
allow_many, allow_explicit_fk,
can_reuse, negate, process_extras)
def get_count(self):
# optimize for the common special case: count without any
# filters
if ((not (self.select or self.where or self.extra_where))
and self.include_translation_data):
obj = self.clone(extra_select = {},
extra_join = {},
include_translation_data = False)
return obj.get_count()
else:
return super(MultilingualQuery, self).get_count()
class MultilingualModelQuerySet(QuerySet):
"""
A specialized QuerySet that knows how to handle translatable
fields in ordering and filtering methods.
"""
def __init__(self, model=None, query=None):
query = query or MultilingualQuery(model, connection)
super(MultilingualModelQuerySet, self).__init__(model, query)
def for_language(self, language_id_or_code):
"""
Set the default language for all objects returned with this
query.
"""
clone = self._clone()
clone._default_language = get_language_id_from_id_or_code(language_id_or_code)
return clone
def iterator(self):
"""
Add the default language information to all returned objects.
"""
default_language = getattr(self, '_default_language', None)
for obj in super(MultilingualModelQuerySet, self).iterator():
obj._default_language = default_language
yield obj
def _clone(self, klass=None, **kwargs):
"""
Override _clone to preserve additional information needed by
MultilingualModelQuerySet.
"""
clone = super(MultilingualModelQuerySet, self)._clone(klass, **kwargs)
clone._default_language = getattr(self, '_default_language', None)
return clone
def order_by(self, *field_names):
if hasattr(self.model._meta, 'translation_model'):
trans_opts = self.model._meta.translation_model._meta
new_field_names = []
for field_name in field_names:
prefix = ''
if field_name[0] == '-':
prefix = '-'
field_name = field_name[1:]
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
real_name = get_translated_field_alias(field.attname,
language_id)
new_field_names.append(prefix + real_name)
else:
new_field_names.append(prefix + field_name)
return super(MultilingualModelQuerySet, self).extra(order_by=new_field_names)
else:
return super(MultilingualModelQuerySet, self).order_by(*field_names)
def values(self, *fields):
return super(MultilingualModelQuerySet, self)
def values_list(self, *fields, **kwargs):
raise NotImplementedError
| agpl-3.0 | 8,766,511,261,829,303,000 | 43.919795 | 115 | 0.512821 | false |
Braiiin/outline-client | outline_client/admin/views.py | 1 | 2399 | from flask import Blueprint, render_template, request, redirect, url_for, \
jsonify
from flask_login import login_required, current_user
from outline_client.libs.outline import Outline
from client.libs.service import Service, Employment
from .forms import AddOutlineForm, EditOutlineForm
import functools
# setup admin
admin = Blueprint('admin', __name__, url_prefix='/admin')
def employee_required(f):
"""employee status required - otherwise, redirect to join page"""
@functools.wraps(f)
def helper(*args, **kwargs):
service = Service(name='Outline').get_or_create()
employee = Employment(
user=current_user.id,
service=service.id).get()
if not employee:
return redirect(url_for('public.join'))
return f(*args, **kwargs)
return helper
@admin.route('/')
@login_required
@employee_required
def home():
"""Admin panel home"""
outlines = Outline().fetch()
return render_template('admin/home.html', outlines=outlines)
@admin.route('/outline/create', methods=['POST', 'GET'])
@login_required
@employee_required
def outline_create():
"""Add a new outline"""
form = AddOutlineForm(author=current_user.id)
if request.method == 'POST':
outline = Outline(**request.form).post()
return redirect(url_for('admin.home'))
return render_template('form.html', **locals())
@admin.route('/outline/<string:outlineId>/edit', methods=['POST', 'GET'])
@login_required
@employee_required
def outline_edit(outlineId):
"""Edit an outline"""
outline = Outline(id=outlineId).get()
outline.hashtags = ', '.join(['#'+h for h in outline.hashtags])
form = EditOutlineForm(**outline._data)
if request.method == 'POST':
outline.created_at = outline.updated_at = None
outline.load(**request.form).put()
return redirect(url_for('admin.home'))
return render_template('form.html', **locals())
@admin.route('/outline/<string:outlineId>/delete', methods=['POST', 'GET'])
@login_required
@employee_required
def outline_delete(outlineId):
"""Delete an outline"""
outline = Outline(id=outlineId).get()
if request.method == 'POST':
outline.delete()
return 'Successfully deleted outline "%s"' % outline.title
return render_template('admin/confirm.html',
cancel=url_for('admin.outline_edit', outlineId=outlineId))
| apache-2.0 | -3,712,959,128,799,209,500 | 31.863014 | 75 | 0.668612 | false |
cst13/canstel | do_release.py | 1 | 1732 | #!/usr/bin/env python
import sys
import argparse
import pkg_resources
from moneywagon import service_table
from moneywagon.supply_estimator import write_blocktime_adjustments
from subprocess import call
parser = argparse.ArgumentParser()
parser.add_argument('--minor', action='store_true', help='Make minor release.')
parser.add_argument('--major', action='store_true', help='Make major release.')
parser.add_argument('--point', action='store_true', help='Make point release.')
parser.add_argument('--skip-blocktime-adjustments', default=True, action='store_true', help='Skip calculating blocktime adjustments')
argz = parser.parse_args()
if not argz.skip_blocktime_adjustments:
import debug
print "Writing new bocktime adjustments..."
write_blocktime_adjustments("moneywagon/blocktime_adjustments.py", random=True, verbose=True)
ex_major, ex_minor, ex_point = pkg_resources.get_distribution('moneywagon').version.split(".")
version = "%s.%s.%s"
if argz.major:
version = version % (int(ex_major) + 1, "0", "0")
elif argz.minor:
version = version % (ex_major, int(ex_minor) + 1, "0")
elif argz.point:
version = version % (ex_major, ex_minor, int(ex_point) + 1)
with open("setup.py", 'w') as f, open("setup_template.py") as t:
setup = t.read().replace("{{ version }}", version)
f.write(setup)
with open("README.md", 'w') as f, open("README_template.md") as t:
table = service_table(format='html')
readme = t.read().replace("{{ service_table }}", table)
f.write(readme)
call(["python", "setup.py", "sdist", "upload"])
call(["python", "setup.py", "develop"])
call(["git", "commit", "-am", "Made release %s" % version])
call(["git", "tag", version])
call(["git", "push", "--tags"])
| mit | 6,398,635,532,910,389,000 | 35.083333 | 133 | 0.684758 | false |
mjtamlyn/archery-scoring | tamlynscore/settings.py | 1 | 3308 | import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = not os.environ.get('PRODUCTION')
ALLOWED_HOSTS = [
'tamlynscore.co.uk',
'www.tamlynscore.co.uk',
'127.0.0.1',
'localhost',
'192.168.1.101',
'192.168.1.100',
]
ADMINS = (
('Marc Tamlyn', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {'default': dj_database_url.config(default='postgres://localhost/tamlynscore')}
TIME_ZONE = 'Europe/London'
USE_TZ = True
LANGUAGE_CODE = 'en-gb'
USE_I18N = True
USE_L10N = True
SECRET_KEY = '(0z9j8dsp!3&@tqx$=&@56&q!pr5(1&6wd0*&7@%hiwt3@k!qt'
SITE_ID = 1
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'collected_static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
),
},
}]
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = os.environ.get('ROOT_URLCONF', 'tamlynscore.urls')
INSTALLED_APPS = (
'tamlynscore',
'core',
'leagues',
'entries',
'scores',
'olympic',
'accounts',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'gunicorn',
'debug_toolbar',
'floppyforms',
'custom_user',
'raven.contrib.django.raven_compat',
)
TEST_RUNNER = 'tests.runner.ScoringRunner'
LOGIN_REDIRECT_URL = '/'
AUTH_USER_MODEL = 'core.User'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
if not DEBUG and not os.environ.get('LOCAL'):
SESSION_COOKIE_SECURE = True
X_FRAME_OPTIONS = 'DENY'
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
MIDDLEWARE = ('sslify.middleware.SSLifyMiddleware',) + MIDDLEWARE
DEBUG_TOOLBAR_CONFIG = {
'JQUERY_URL': STATIC_URL + 'lib/jquery/jquery-2.1.3.min.js',
}
if os.environ.get('RAVEN_DSN'):
RAVEN_CONFIG = {
'dsn': os.environ['RAVEN_DSN'],
}
CURRENT_EVENT = os.environ.get('CURRENT_EVENT', 'bucs-outdoors-2014')
| bsd-3-clause | 7,236,107,738,233,246,000 | 25.464 | 91 | 0.662334 | false |
goujonpa/assotextmining | gabriel.py | 1 | 3496 | # gets all the asso posts and puts it in a single .txt file
import json
import os
txt_250 = list()
txt_500 = list()
txt_1000 = list()
txt_1500 = list()
txt_plus = list()
stats_250 = list()
stats_500 = list()
stats_1000 = list()
stats_1500 = list()
stats_plus = list()
varrad = u'txt_'
varrad2 = u'stats_'
for i in range(0, 51):
try:
with open(("./data/cleaned_assoc_" + str(i) + ".txt"), "r") as in_file:
data = json.loads(in_file.read())
except Exception as e:
print("Error " + str(i) + str(e))
for j in range(0, len(data)):
try:
msg = data[j]["message"]
if len(msg.replace('\n', '').replace(' ', '')) <= 250:
suffix = '250'
elif len(msg.replace('\n', '').replace(' ', '')) <= 500:
suffix = '500'
elif len(msg.replace('\n', '').replace(' ', '')) <= 1000:
suffix = '1000'
elif len(msg.replace('\n', '').replace(' ', '')) <= 1500:
suffix = '1500'
else:
suffix = 'plus'
txt = eval(varrad + suffix)
txt.append("\n========== Page : " + str(i) + " Post : " + str(j) + "\n")
txt.append('Characters number (no space no linebreak):' + str(len(msg.replace('\n', '').replace(' ', ''))) + '\n')
txt.append('Line breaks : ' + str(msg.count('\n')) + '\n')
txt.append('? number : ' + str(msg.count('?')) + '\n')
txt.append('! number : ' + str(msg.count('!')) + '\n')
txt.append('[ number : ' + str(msg.count('[')) + '\n')
txt.append('FROM : ' + data[j]['from']['name'] + '\n')
#txt.append('DATE : ' + data[j]['update_time'] + '\n')
txt.append('MESSAGE : ' + msg + '\n')
txt.append('============================\n\n============================\n\n')
stat = eval(varrad2 + suffix)
stat.append([len(msg.replace('\n', '').replace(' ', '')), msg.count('\n'), msg.count('?'), msg.count('!'), msg.count('[')])
except Exception as e:
print("Error file " + str(i) + " msg " + str(j) + " : " + str(e))
sums = {
'250': [sum(col) for col in zip(*stats_250)],
'500': [sum(col) for col in zip(*stats_500)],
'1000': [sum(col) for col in zip(*stats_1000)],
'1500': [sum(col) for col in zip(*stats_1500)],
'plus': [sum(col) for col in zip(*stats_plus)],
}
for i in [250, 500, 1000, 1500, 'plus']:
char_mean = sums[str(i)][0] / float(len(eval('stats_' + str(i))))
lb_mean = sums[str(i)][1] / float(len(eval('stats_' + str(i))))
qu_mean = sums[str(i)][2] / float(len(eval('stats_' + str(i))))
exc_mean = sums[str(i)][3] / float(len(eval('stats_' + str(i))))
brack_mean = sums[str(i)][4] / float(len(eval('stats_' + str(i))))
txt = eval('txt_' + str(i))
txt.append(('\n\nCharacter mean : ' + str(char_mean) + '\nLine breaks mean : ' + str(lb_mean) + '\n? mean : ' + str(qu_mean) + '\n! mean :' + str(exc_mean) + '\n[ mean :' + str(brack_mean) + '\n\n'))
txt_250 = ''.join(txt_250)
txt_500 = ''.join(txt_500)
txt_1000 = ''.join(txt_1000)
txt_1500 = ''.join(txt_1500)
txt_plus = ''.join(txt_plus)
try:
for i in [250, 500, 1000, 1500, 'plus']:
with open(("./data_gabriel/" + str(i) + ".txt"), "w") as corpus:
txt = eval('txt_' + str(i))
corpus.write(txt.encode('utf8', 'replace'))
except Exception as e:
print("Error while writing final file : " + str(e))
| mit | 7,392,025,437,876,024,000 | 38.280899 | 203 | 0.484268 | false |
yarikoptic/NiPy-OLD | nipy/fixes/scipy/stats/models/tests/rmodelwrap.py | 1 | 1153 | ''' Wrapper for R models to allow comparison to scipy models '''
import numpy as np
from rpy import r
from exampledata import x, y
class RModel(object):
''' Class gives R models scipy.models -like interface '''
def __init__(self, y, design, model_type=r.lm):
''' Set up and estimate R model with data and design '''
self.y = y
self.design = design
self.model_type = model_type
self._design_cols = ['x.%d' % (i+1) for i in range(
self.design.shape[1])]
# Note the '-1' for no intercept - this is included in the design
self.formula = r('y ~ %s-1' % '+'.join(self._design_cols))
self.frame = r.data_frame(y=y, x=self.design)
self.results = self.model_type(self.formula,
data = self.frame)
# Provide compatible interface with scipy models
coeffs = self.results['coefficients']
self.beta = np.array([coeffs[c] for c in self._design_cols])
self.resid = self.results['residuals']
self.predict = self.results['fitted.values']
self.df_resid = self.results['df.residual']
| bsd-3-clause | -4,254,897,468,352,540,000 | 37.433333 | 73 | 0.5915 | false |
Eluvatar/trawler-python-region-scan | wa_scanner.py | 1 | 3652 | #!/usr/bin/python
from parser import api
from ns import id_str
import urllib2
import time
import json
from operator import itemgetter as get
import argparse
parser = argparse.ArgumentParser(description="Scan region")
parser.add_argument('-u', '--user', metavar='USER', required=True, help='a nation name, email, or web page to identify the user as per item 1 of the NS API Terms of Use: http://www.nationstates.net/pages/api.html#terms')
parser.add_argument('-r', '--region', metavar='REGION', required=True, help='the region to scan')
parser.add_argument('-o', '--output', metavar='FILE', help='the file to save the results to (otherwise prints raw JSON to screen)')
parser.add_argument('-i', '--influential-url', metavar='URL', help='an (optional) url to fetch a newline-delimited text file of the non-minnows in the region')
parser.add_argument('-a', '--all', nargs="?", help='scan all nations of the region, not just the WA nations')
parser.add_argument('-c', '--columns', nargs="+", choices=["endorsers","endorsees","influential"], default=["endorsers","endorsees"], help='columns to collect (top endorsers = nations who endorse the most, top endorsees = top recipients of endorsements)')
parser.add_argument('-R', '--rows', default=25, help='number of rows to collect (default = collect top 25 for each column)')
args = parser.parse_args()
api.user_agent = "Trawler Python Region Scan (operated by {})".format(args.user)
def get_nation_endos(nation):
xmlnat = api.request({'nation':nation,'q':('endorsements','wa','name','censusscore-65')})
endos = xmlnat.find('ENDORSEMENTS').text
name = xmlnat.find('NAME').text
spdr = int(xmlnat.find('CENSUSSCORE').text)
endocount = endos.count(',')+1 if endos else 0
return {'name':nation,'Name':name,'endos':endocount,'endorsers':endos.split(',') if endos else (),'influence_score':spdr}
xmlreg = api.request({'region':id_str(args.region),'q':'nations'})
residents = xmlreg.find('NATIONS').text.split(':')
if not args.all:
resident_set = set(residents)
xmlwa = api.request({'wa':'1','q':'members'})
all_wa_nations = xmlwa.find('MEMBERS').text.split(',')
wa_nation_set=set(all_wa_nations)
if args.influential_url:
influential_nation_names = map( str.strip, urllib2.urlopen(args.influential_url).readlines() )
scanned_nations = []
endorser_counts = {}
if args.all:
to_scan = resident_set
else:
to_scan = resident_set & wa_nation_set
for nat in to_scan:
scanned_nations.append(nat)
endorser_counts[nat]=0
infos={}
todo={}
for nat in scanned_nations:
endos = get_nation_endos(nat)
for endorser in endos['endorsers']:
endorser_counts[endorser] += 1
del endos['endorsers']
infos[nat]=endos
for nat in scanned_nations:
infos[nat]['endos_given'] = endorser_counts[nat]
if args.influential_url:
for nation in influential_nation_names:
nat=id_str(nation)
if nat not in wa_nation_set:
if nat in resident_set:
endos = get_nation_endos( nat )
del endos['endorsers']
endos['endos_given']=0
infos[nat]=endos
res={}
rows = args.rows
cols = args.columns
col_keys={'endorsers':'endos_given','endorsees':'endos','influential':'influence_score'}
for col in cols:
res[col]= sorted(infos.values(),key=lambda info: info[col_keys[col]],reverse=True)[0:rows]
res = dict(map(lambda item: (item[0],map(get('name'),item[1])), res.iteritems()))
res['pool']=map( lambda n: infos[n], apply(set.union,map(lambda l: set(l),res.values())) )
if args.output:
outf=open(args.output,"w+")
else:
import sys
outf = sys.stdout
json.dump(res,outf,separators=(',', ':'),sort_keys=True)
if not args.output:
print ""
| gpl-3.0 | -8,256,628,711,192,047,000 | 35.888889 | 255 | 0.692497 | false |
cpascoe95/cryptotools | tests/cipher_tests.py | 1 | 4829 | import unittest
import crypto.ciphers as ciphers
from crypto.alphabets import EnglishAlphabet
class SubstitutionCipherTests(unittest.TestCase):
def setUp(self):
self.alph = EnglishAlphabet()
self.key = {letter: letter for letter in self.alph}
def test_invalid_key_type(self):
with self.assertRaises(TypeError):
sc = ciphers.SubstitutionCipher(self.alph, 'Hello!')
def test_invalid_key_mapping(self):
self.key['B'] = 'A'
result = ciphers.SubstitutionCipher.is_valid_key(self.alph, self.key)
self.assertIsNotNone(result)
self.assertFalse(result)
with self.assertRaises(Exception):
sc = ciphers.SubstitutionCipher(self.alph, self.key)
def test_valid_key_mapping(self):
result = ciphers.SubstitutionCipher.is_valid_key(self.alph, self.key)
self.assertIsNotNone(result)
self.assertTrue(result)
def test_invert_mapping(self):
mapping = {'A': 'X', 'B': 'Y'}
self.assertEqual(ciphers.SubstitutionCipher.invert_mapping(mapping), {'X': 'A', 'Y': 'B'})
def test_identity_encryption(self):
sc = ciphers.SubstitutionCipher(self.alph, self.key)
self.assertEqual(sc.encrypt('A'), 'A')
def test_basic_encryption(self):
self.key['A'] = 'B'
self.key['B'] = 'A'
sc = ciphers.SubstitutionCipher(self.alph, self.key)
self.assertEqual(sc.encrypt('AABCD'), 'BBACD')
def test_identity_decryption(self):
sc = ciphers.SubstitutionCipher(self.alph, self.key)
self.assertEqual(sc.decrypt('ABC'), 'ABC')
def test_basic_decryption(self):
self.key['A'] = 'B'
self.key['B'] = 'A'
sc = ciphers.SubstitutionCipher(self.alph, self.key)
self.assertEqual(sc.decrypt('BBACD'), 'AABCD')
def test_mixed_case_encryption(self):
# This is specifically to test to see if it uses
# Alpabet.strip(), as EnglishAlphabet capitalises lowercase letters
self.key['E'] = 'O'
self.key['O'] = 'E'
sc = ciphers.SubstitutionCipher(self.alph, self.key)
self.assertEqual(sc.encrypt('Hello, World!!'), 'HOLLEWERLD')
def test_mixed_case_decryption(self):
# This is specifically to test to see if it uses
# Alpabet.strip(), as EnglishAlphabet capitalises lowercase letters
self.key['E'] = 'O'
self.key['O'] = 'E'
sc = ciphers.SubstitutionCipher(self.alph, self.key)
self.assertEqual(sc.decrypt('Holle, Werld!'), 'HELLOWORLD')
class CaesarShiftCipherTests(unittest.TestCase):
def setUp(self):
self.cs = ciphers.CaesarShiftCipher(EnglishAlphabet(), 0)
def test_invalid_key_type(self):
with self.assertRaises(TypeError):
self.cs.set_key('Hello, world!')
def test_identity_encryption(self):
self.assertEqual(self.cs.encrypt('A'), 'A')
self.assertEqual(self.cs.encrypt('AABC'), 'AABC')
def test_basic_shift_encryption(self):
self.cs.set_key(1)
self.assertEqual(self.cs.encrypt('A'), 'B')
self.assertEqual(self.cs.encrypt('AABC'), 'BBCD')
def test_modulo_encryption(self):
self.cs.set_key(1)
self.assertEqual(self.cs.encrypt('Z'), 'A')
self.cs.set_key(3)
self.assertEqual(self.cs.encrypt('XXYZ'), 'AABC')
def test_indentity_decryption(self):
self.assertEqual(self.cs.decrypt('A'), 'A')
self.assertEqual(self.cs.decrypt('AABC'), 'AABC')
def test_basic_shift_decryption(self):
self.cs.set_key(1)
self.assertEqual(self.cs.decrypt('B'), 'A')
self.assertEqual(self.cs.decrypt('BBCD'), 'AABC')
def test_modulo_decryption(self):
self.cs.set_key(1)
self.assertEqual(self.cs.decrypt('A'), 'Z')
self.cs.set_key(3)
self.assertEqual(self.cs.decrypt('AABC'), 'XXYZ')
class AffineShiftCipherTests(unittest.TestCase):
def setUp(self):
self.afs = ciphers.AffineShiftCipher(EnglishAlphabet(), 1, 0)
def test_invalid_key_type(self):
with self.assertRaises(TypeError):
self.afs.set_key('A', 'B')
def test_invalid_key_value(self):
with self.assertRaises(Exception):
self.afs.set_key(2, 0)
def test_identity_encryption(self):
self.assertEqual(self.afs.encrypt('A'), 'A')
self.assertEqual(self.afs.encrypt('AABC'), 'AABC')
def test_basic_a_key_encryption(self):
self.afs.set_key(3, 0)
self.assertEqual(self.afs.encrypt('ABBC'), 'ADDG')
def test_basic_b_key_encryption(self):
self.afs.set_key(1, 1)
self.assertEqual(self.afs.encrypt('ABBC'), 'BCCD')
def test_basic_key_encryption(self):
self.afs.set_key(3, 2)
self.assertEqual(self.afs.encrypt('AABBCC'), 'CCFFII')
| gpl-2.0 | -3,983,513,460,971,492,400 | 30.980132 | 98 | 0.628495 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_mailcap.py | 1 | 10130 | import mailcap
import os
import shutil
import copy
import test.support
import unittest
# Location of mailcap file
MAILCAPFILE = test.support.findfile("mailcap.txt")
# Dict to act as mock mailcap entry for this test
# The keys and values should match the contents of MAILCAPFILE
MAILCAPDICT = {
'application/x-movie':
[{'compose': 'moviemaker %s',
'x11-bitmap': '"/usr/lib/Zmail/bitmaps/movie.xbm"',
'description': '"Movie"',
'view': 'movieplayer %s',
'lineno': 4}],
'application/*':
[{'copiousoutput': '',
'view': 'echo "This is \\"%t\\" but is 50 \\% Greek to me" \\; cat %s',
'lineno': 5}],
'audio/basic':
[{'edit': 'audiocompose %s',
'compose': 'audiocompose %s',
'description': '"An audio fragment"',
'view': 'showaudio %s',
'lineno': 6}],
'video/mpeg':
[{'view': 'mpeg_play %s', 'lineno': 13}],
'application/postscript':
[{'needsterminal': '', 'view': 'ps-to-terminal %s', 'lineno': 1},
{'compose': 'idraw %s', 'view': 'ps-to-terminal %s', 'lineno': 2}],
'application/x-dvi':
[{'view': 'xdvi %s', 'lineno': 3}],
'message/external-body':
[{'composetyped': 'extcompose %s',
'description': '"A reference to data stored in an external location"',
'needsterminal': '',
'view': 'showexternal %s %{access-type} %{name} %{site} %{directory} %{mode} %{server}',
'lineno': 10}],
'text/richtext':
[{'test': 'test "`echo %{charset} | tr \'[A-Z]\' \'[a-z]\'`" = iso-8859-8',
'copiousoutput': '',
'view': 'shownonascii iso-8859-8 -e richtext -p %s',
'lineno': 11}],
'image/x-xwindowdump':
[{'view': 'display %s', 'lineno': 9}],
'audio/*':
[{'view': '/usr/local/bin/showaudio %t', 'lineno': 7}],
'video/*':
[{'view': 'animate %s', 'lineno': 12}],
'application/frame':
[{'print': '"cat %s | lp"', 'view': 'showframe %s', 'lineno': 0}],
'image/rgb':
[{'view': 'display %s', 'lineno': 8}]
}
# For backwards compatibility, readmailcapfile() and lookup() still support
# the old version of mailcapdict without line numbers.
MAILCAPDICT_DEPRECATED = copy.deepcopy(MAILCAPDICT)
for entry_list in MAILCAPDICT_DEPRECATED.values():
for entry in entry_list:
entry.pop('lineno')
class HelperFunctionTest(unittest.TestCase):
def test_listmailcapfiles(self):
# The return value for listmailcapfiles() will vary by system.
# So verify that listmailcapfiles() returns a list of strings that is of
# non-zero length.
mcfiles = mailcap.listmailcapfiles()
self.assertIsInstance(mcfiles, list)
for m in mcfiles:
self.assertIsInstance(m, str)
with test.support.EnvironmentVarGuard() as env:
# According to RFC 1524, if MAILCAPS env variable exists, use that
# and only that.
if "MAILCAPS" in env:
env_mailcaps = env["MAILCAPS"].split(os.pathsep)
else:
env_mailcaps = ["/testdir1/.mailcap", "/testdir2/mailcap"]
env["MAILCAPS"] = os.pathsep.join(env_mailcaps)
mcfiles = mailcap.listmailcapfiles()
self.assertEqual(env_mailcaps, mcfiles)
def test_readmailcapfile(self):
# Test readmailcapfile() using test file. It should match MAILCAPDICT.
with open(MAILCAPFILE, 'r') as mcf:
with self.assertWarns(DeprecationWarning):
d = mailcap.readmailcapfile(mcf)
self.assertDictEqual(d, MAILCAPDICT_DEPRECATED)
def test_lookup(self):
# Test without key
expected = [{'view': 'animate %s', 'lineno': 12},
{'view': 'mpeg_play %s', 'lineno': 13}]
actual = mailcap.lookup(MAILCAPDICT, 'video/mpeg')
self.assertListEqual(expected, actual)
# Test with key
key = 'compose'
expected = [{'edit': 'audiocompose %s',
'compose': 'audiocompose %s',
'description': '"An audio fragment"',
'view': 'showaudio %s',
'lineno': 6}]
actual = mailcap.lookup(MAILCAPDICT, 'audio/basic', key)
self.assertListEqual(expected, actual)
# Test on user-defined dicts without line numbers
expected = [{'view': 'mpeg_play %s'}, {'view': 'animate %s'}]
actual = mailcap.lookup(MAILCAPDICT_DEPRECATED, 'video/mpeg')
self.assertListEqual(expected, actual)
def test_subst(self):
plist = ['id=1', 'number=2', 'total=3']
# test case: ([field, MIMEtype, filename, plist=[]], <expected string>)
test_cases = [
(["", "audio/*", "foo.txt"], ""),
(["echo foo", "audio/*", "foo.txt"], "echo foo"),
(["echo %s", "audio/*", "foo.txt"], "echo foo.txt"),
(["echo %t", "audio/*", "foo.txt"], "echo audio/*"),
(["echo \%t", "audio/*", "foo.txt"], "echo %t"),
(["echo foo", "audio/*", "foo.txt", plist], "echo foo"),
(["echo %{total}", "audio/*", "foo.txt", plist], "echo 3")
]
for tc in test_cases:
self.assertEqual(mailcap.subst(*tc[0]), tc[1])
class GetcapsTest(unittest.TestCase):
def test_mock_getcaps(self):
# Test mailcap.getcaps() using mock mailcap file in this dir.
# Temporarily override any existing system mailcap file by pointing the
# MAILCAPS environment variable to our mock file.
with test.support.EnvironmentVarGuard() as env:
env["MAILCAPS"] = MAILCAPFILE
caps = mailcap.getcaps()
self.assertDictEqual(caps, MAILCAPDICT)
def test_system_mailcap(self):
# Test mailcap.getcaps() with mailcap file(s) on system, if any.
caps = mailcap.getcaps()
self.assertIsInstance(caps, dict)
mailcapfiles = mailcap.listmailcapfiles()
existingmcfiles = [mcf for mcf in mailcapfiles if os.path.exists(mcf)]
if existingmcfiles:
# At least 1 mailcap file exists, so test that.
for (k, v) in caps.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, list)
for e in v:
self.assertIsInstance(e, dict)
else:
# No mailcap files on system. getcaps() should return empty dict.
self.assertEqual({}, caps)
class FindmatchTest(unittest.TestCase):
def test_findmatch(self):
# default findmatch arguments
c = MAILCAPDICT
fname = "foo.txt"
plist = ["access-type=default", "name=john", "site=python.org",
"directory=/tmp", "mode=foo", "server=bar"]
audio_basic_entry = {
'edit': 'audiocompose %s',
'compose': 'audiocompose %s',
'description': '"An audio fragment"',
'view': 'showaudio %s',
'lineno': 6
}
audio_entry = {"view": "/usr/local/bin/showaudio %t", 'lineno': 7}
video_entry = {'view': 'animate %s', 'lineno': 12}
message_entry = {
'composetyped': 'extcompose %s',
'description': '"A reference to data stored in an external location"', 'needsterminal': '',
'view': 'showexternal %s %{access-type} %{name} %{site} %{directory} %{mode} %{server}',
'lineno': 10,
}
# test case: (findmatch args, findmatch keyword args, expected output)
# positional args: caps, MIMEtype
# keyword args: key="view", filename="/dev/null", plist=[]
# output: (command line, mailcap entry)
cases = [
([{}, "video/mpeg"], {}, (None, None)),
([c, "foo/bar"], {}, (None, None)),
([c, "video/mpeg"], {}, ('animate /dev/null', video_entry)),
([c, "audio/basic", "edit"], {}, ("audiocompose /dev/null", audio_basic_entry)),
([c, "audio/basic", "compose"], {}, ("audiocompose /dev/null", audio_basic_entry)),
([c, "audio/basic", "description"], {}, ('"An audio fragment"', audio_basic_entry)),
([c, "audio/basic", "foobar"], {}, (None, None)),
([c, "video/*"], {"filename": fname}, ("animate %s" % fname, video_entry)),
([c, "audio/basic", "compose"],
{"filename": fname},
("audiocompose %s" % fname, audio_basic_entry)),
([c, "audio/basic"],
{"key": "description", "filename": fname},
('"An audio fragment"', audio_basic_entry)),
([c, "audio/*"],
{"filename": fname},
("/usr/local/bin/showaudio audio/*", audio_entry)),
([c, "message/external-body"],
{"plist": plist},
("showexternal /dev/null default john python.org /tmp foo bar", message_entry))
]
self._run_cases(cases)
@unittest.skipUnless(os.name == "posix", "Requires 'test' command on system")
def test_test(self):
# findmatch() will automatically check any "test" conditions and skip
# the entry if the check fails.
caps = {"test/pass": [{"test": "test 1 -eq 1"}],
"test/fail": [{"test": "test 1 -eq 0"}]}
# test case: (findmatch args, findmatch keyword args, expected output)
# positional args: caps, MIMEtype, key ("test")
# keyword args: N/A
# output: (command line, mailcap entry)
cases = [
# findmatch will return the mailcap entry for test/pass because it evaluates to true
([caps, "test/pass", "test"], {}, ("test 1 -eq 1", {"test": "test 1 -eq 1"})),
# findmatch will return None because test/fail evaluates to false
([caps, "test/fail", "test"], {}, (None, None))
]
self._run_cases(cases)
def _run_cases(self, cases):
for c in cases:
self.assertEqual(mailcap.findmatch(*c[0], **c[1]), c[2])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,471,126,944,683,534,000 | 41.208333 | 104 | 0.545311 | false |
rockyzhengwu/mlscratch | rnn/char_rnn.py | 1 | 4815 | #!/usr/bin/env python
# -*-coding:utf-8-*-
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License
"""
import numpy as np
# data I/O
data = open('input.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
# hyperparameters
hidden_size = 100 # size of hidden layer of neurons
seq_length = 25 # number of steps to unroll the RNN for
learning_rate = 1e-1
# model parameters
Wxh = np.random.randn(hidden_size, vocab_size) * 0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size) * 0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size) * 0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size, 1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t - 1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t], 0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t - 1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs) - 1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0 / vocab_size) * seq_length # loss at iteration 0
while True:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]
targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print ( '----\n %s \n----' % (txt,))
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print ('iter %d, loss: %f' % (n, smooth_loss))
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
| apache-2.0 | -5,488,489,251,875,216,000 | 38.793388 | 125 | 0.593977 | false |
sensusaps/RoboBraille.Web.API | WorkingDirectory/DaisyPipeline/transformers/ca_cnib_rtf2dtbook/rtf2xml-py/rtf2xml/delete_info.py | 1 | 8983 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #
# 02111-1307 USA #
# #
# #
#########################################################################
import sys, os, tempfile, rtf2xml.copy
class DeleteInfo:
"""Delelet unecessary destination groups"""
def __init__(self,
in_file ,
bug_handler,
copy = None,
run_level = 1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = tempfile.mktemp()
self.__bracket_count=0
self.__ob_count = 0
self.__cb_count = 0
self.__after_asterisk = 0
self.__delete = 0
self.__initiate_allow()
self.__ob = 0
self.__write_cb = 0
self.__run_level = run_level
self.__found_delete = 0
self.__list = 0
def __initiate_allow(self):
"""
Initiate a list of destination groups which should be printed out.
"""
self.__allowable = ('cw<ss<char-style',
'cw<it<listtable_',
'cw<it<revi-table',
'cw<ls<list-lev-d',
'cw<fd<field-inst',
'cw<an<book-mk-st',
'cw<an<book-mk-en',
'cw<an<annotation',
'cw<cm<comment___',
'cw<it<lovr-table',
# 'cw<ls<list______',
)
self.__not_allowable = (
'cw<un<unknown___',
'cw<un<company___',
'cw<ls<list-level',
'cw<fd<datafield_',
)
self.__state = 'default'
self.__state_dict = {
'default' : self.__default_func,
'after_asterisk' : self.__asterisk_func,
'delete' : self.__delete_func,
'list' : self.__list_func,
}
def __default_func(self,line):
"""Handle lines when in no special state. Look for an asterisk to
begin a special state. Otherwise, print out line."""
##cw<ml<asterisk__<nu<true
if self.__token_info == 'cw<ml<asterisk__':
self.__state = 'after_asterisk'
self.__delete_count = self.__ob_count
elif self.__token_info == 'ob<nu<open-brack':
# write previous bracket, if exists
if self.__ob:
self.__write_obj.write(self.__ob)
self.__ob = line
return 0
else:
# write previous bracket, since didn't fine asterisk
if self.__ob:
self.__write_obj.write(self.__ob)
self.__ob = 0
return 1
def __delete_func(self,line):
"""Handle lines when in delete state. Don't print out lines
unless the state has ended."""
if self.__delete_count == self.__cb_count:
self.__state = 'default'
if self.__write_cb:
self.__write_cb = 0
return 1
return 0
def __asterisk_func(self,line):
"""
Determine whether to delete info in group
Note on self.__cb flag.
If you find that you are in a delete group, and the preivous
token in not an open bracket (self.__ob = 0), that means
that the delete group is nested inside another acceptable
detination group. In this case, you have alrady written
the open bracket, so you will need to write the closed one
as well.
"""
# Test for {\*}, in which case don't enter
# delete state
self.__after_asterisk = 0 # only enter this function once
self.__found_delete = 1
if self.__token_info == 'cb<nu<clos-brack':
if self.__delete_count == self.__cb_count:
self.__state = 'default'
self.__ob = 0
# changed this because haven't printed out start
return 0
else:
# not sure what happens here!
# believe I have a '{\*}
if self.__run_level > 3:
msg = 'flag problem\n'
raise self.__bug_handler, msg
return 1
elif self.__token_info in self.__allowable :
if self.__ob:
self.__write_obj.write(self.__ob)
self.__ob = 0
self.__state = 'default'
else:
pass
return 1
elif self.__token_info == 'cw<ls<list______':
self.__ob = 0
self.__found_list_func(line)
elif self.__token_info in self.__not_allowable:
if not self.__ob:
self.__write_cb = 1
self.__ob = 0
self.__state = 'delete'
self.__cb_count = 0
return 0
else:
if self.__run_level > 5:
msg = 'After an asterisk, and found neither an allowable or non-allowble token\n'
msg += 'token is "%s"\n' % self.__token_info
raise self.__bug_handler
if not self.__ob:
self.__write_cb = 1
self.__ob = 0
self.__state = 'delete'
self.__cb_count = 0
return 0
def __found_list_func(self, line):
"""
print out control words in this group
"""
self.__state = 'list'
def __list_func(self, line):
"""
Check to see if the group has ended.
Return 1 for all control words.
Return 0 otherwise.
"""
if self.__delete_count == self.__cb_count and self.__token_info ==\
'cb<nu<clos-brack':
self.__state = 'default'
if self.__write_cb:
self.__write_cb = 0
return 1
return 0
elif line[0:2] == 'cw':
return 1
else:
return 0
def delete_info(self):
"""Main method for handling other methods. Read one line in at
a time, and determine wheter to print the line based on the state."""
line_to_read = 'dummy'
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
while line_to_read:
#ob<nu<open-brack<0001
to_print =1
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
action = self.__state_dict.get(self.__state)
if not action:
sys.stderr.write('No action in dictionary state is "%s" \n'
% self.__state)
to_print = action(line)
"""
if self.__after_asterisk:
to_print = self.__asterisk_func(line)
elif self.__list:
self.__in_list_func(line)
elif self.__delete:
to_print = self.__delete_func(line)
else:
to_print = self.__default_func(line)
"""
if to_print:
self.__write_obj.write(line)
self.__write_obj.close()
read_obj.close()
copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "delete_info.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__found_delete
| apache-2.0 | -7,226,164,426,623,397,000 | 33.953307 | 97 | 0.436046 | false |
MikeWinter/bio-data-repository | bdr/utils/transports.py | 1 | 13814 | """
A set of classes for abstracting communication with remote servers. While the
urllib-family of modules provides similar functionality, they not provide for
querying the source for its metadata (size and modification date, in this case)
prior to fetching the resource itself.
All exceptions raised by these types inherit from the TransportError class.
Support for the FTP and HTTP protocols are included by default. This can be
extended to any communications protocol, however, by subclassing Transport and
adding the fully-qualified class name for the new type to the REMOTE_TRANSPORTS
settings list.
"""
from datetime import datetime
import ftplib
import importlib
import os.path
import re
import shutil
import socket
import tempfile
import urlparse
from django.utils.http import parse_http_date_safe
import requests
from .. import app_settings
from . import utc
__all__ = ["Transport", "TransportError"]
__author__ = "Michael Winter ([email protected])"
__license__ = """
Biological Dataset Repository: data archival and retrieval.
Copyright (C) 2015 Michael Winter
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
class TransportError(Exception):
"""Base class for all transport-related errors."""
pass
class AuthenticationError(TransportError):
"""Authentication credentials are missing or incorrect."""
pass
class ConnectionError(TransportError):
"""Could not establish a connection."""
pass
class NotFoundError(TransportError):
"""The requested resource could not be found remotely."""
pass
class UnrecognisedSchemaError(TransportError):
"""
The schema of the given URL is unrecognised and no transport handler is not
available.
"""
class Transport(object):
"""
An abstraction for retrieving resources, and their metadata, from remote
sources.
"""
@classmethod
def instance(cls, url, user='', password=''):
"""
Create an instance of a data transport mechanism. The transport type
returned is determined by the scheme component given by `url`.
:param url: The URL of the resource to be obtained.
:type url: basestring
:param user: The user name required to access the resource specified in
`url` (optional).
:type user: basestring
:param password: The password required to access the resource specified
in `url` (optional).
:type password: basestring
:return: A transport mechanism capable of accessing the specified
resource, or None if no matching transport type can be found.
:rtype: Transport | None
"""
scheme, _, _, _, _ = urlparse.urlsplit(url)
if scheme in app_settings.REMOTE_TRANSPORTS:
package = app_settings.REMOTE_TRANSPORTS[scheme].split('.')
module = importlib.import_module('.'.join(package[:-1]))
if hasattr(module, package[-1]):
return getattr(module, package[-1])(url, user, password)
return None
def __init__(self, url, user, password):
"""
Create an instance of a data transport mechanism.
:param url: The URL of the resource to be obtained.
:type url: str
:param user: The user name required to access the resource specified in
`url` (optional).
:type user: str
:param password: The password required to access the resource specified
in `url` (optional).
:type password: str
"""
self._content = None
self._password = password
self._url = url
self._user = user
def get_content(self):
"""
Return a temporary file containing the requested resource. This data
will be lost if the content object is closed.
:return: A file-like object containing the requested resource.
:rtype: file
"""
if self._content is None:
self._content = tempfile.TemporaryFile()
self._do_get_content()
self._content.seek(0)
return self._content
def get_modification_date(self):
"""
Return the date and time of the last modification to this resource as
reported by the remote source.
:return: The modification date and time, or None if unknown.
:rtype: datetime | None
:raises TransportError: If an error occurs while communicating with the
server.
"""
raise NotImplementedError
def get_size(self):
"""
Return the size of this resource as reported by the remote source.
:return: The size (in bytes) of this resource, or -1 if unknown.
:rtype: int
:raises TransportError: If an error occurs while communicating with the
server.
"""
raise NotImplementedError
def _do_get_content(self):
"""
Retrieve the resource, writing it to `_content`. Subclasses must
override this method.
"""
raise NotImplementedError
class FtpTransport(Transport):
"""
An abstraction for retrieving resources, and their metadata, from FTP
servers.
"""
_DEFAULT_TIMEOUT = 30
_date_reply = None
_feat_reply = None
_size_reply = None
def __init__(self, *args, **kwargs):
"""
Create an instance of the FTP transport mechanism.
:param url: The URL of the resource to be obtained.
:type url: str
:param user: The user name required to access the resource specified in
`url` (optional).
:type user: str
:param password: The password required to access the resource specified
in `url` (optional).
:type password: str
"""
super(FtpTransport, self).__init__(*args, **kwargs)
components = urlparse.urlsplit(self._url)
self._host, self._port = components.hostname, components.port or ftplib.FTP_PORT
self._path, self._name = os.path.dirname(components.path), os.path.basename(components.path)
self._features = None
self._modification_date = None
self._size = -1
self.__connection = None
@property
def features(self):
"""
A list of feature strings representing FTP extensions supported by the
server.
See RFC 5797 for a summary of these feature strings and RFC 2389 for a
description of the FEAT command.
:return: A list of feature strings that indicated supported commands.
:rtype: list
:raises TransportError: If an error occurs while communicating with the
server.
"""
if self._features is None:
if self._feat_reply is None:
self._feat_reply = re.compile(r'^211([ -])[ \t\x21-\x7e]*'
r'((?:[\r\n]+[ \t\x21-\x7e]+)+)'
r'[\r\n]+211[ \t\x21-\x7e]*[\r\n]*$')
try:
reply = self._connection.sendcmd('FEAT')
except ftplib.error_perm:
self._features = []
else:
match = self._feat_reply.match(reply)
self._features = (re.split(r'[\n\r]+ ', match.group(2).lstrip().lower())
if match and match.group(1) == '-' else [])
return self._features
def get_modification_date(self):
"""
Return the date and time of the last modification to this resource as
reported by the remote source.
:return: The modification date and time, or None if unknown.
:rtype: datetime | None
:raises TransportError: If an error occurs while communicating with the
server.
"""
if self._modification_date is None and 'mdtm' in self.features:
if self._date_reply is None:
self._date_reply = re.compile(r'^213 (\d{14})(?:\.\d+)?[\n\r]*')
try:
reply = self._connection.sendcmd('MDTM ' + self._name)
except ftplib.error_perm as error:
raise NotFoundError(error)
match = self._date_reply.match(reply)
self._modification_date = datetime.strptime(
match.group(1)[:14], '%Y%m%d%H%M%S').replace(tzinfo=utc) if match else None
return self._modification_date
def get_size(self):
"""
Return the size of this resource as reported by the remote source.
:return: The size (in bytes) of this resource, or -1 if unknown.
:rtype: int
:raises TransportError: If an error occurs while communicating with the
server.
"""
if self._size == -1 and 'size' in self.features:
if self._size_reply is None:
self._size_reply = re.compile(r'^213 (\d+)[\n\r]*')
self._connection.voidcmd('TYPE I')
try:
reply = self._connection.sendcmd('SIZE ' + self._name)
except ftplib.error_perm as error:
raise NotFoundError(error)
match = self._size_reply.match(reply)
self._size = int(match.group(1)) if match else -1
return self._size
@property
def _connection(self):
if self.__connection is None:
self.__connection = ftplib.FTP(timeout=self._DEFAULT_TIMEOUT)
try:
self.__connection.connect(self._host, self._port)
self.__connection.login(self._user, self._password)
except socket.gaierror as error:
raise ConnectionError(error)
except ftplib.error_reply as error:
raise AuthenticationError(error)
except ftplib.all_errors + (socket.error,) as error:
raise TransportError(error)
try:
self.__connection.cwd(self._path)
except ftplib.error_reply as error:
raise NotFoundError(error)
return self.__connection
def _do_get_content(self):
try:
self._connection.retrbinary('RETR ' + self._name, self._content.write)
except (ftplib.Error, socket.error) as error:
raise TransportError(error.message)
class HttpTransport(Transport):
"""
An abstraction for retrieving resources, and their metadata, from HTTP
servers.
"""
def __init__(self, *args, **kwargs):
"""
Create an instance of the HTTP transport mechanism.
:param url: The URL of the resource to be obtained.
:type url: str
:param user: The user name required to access the resource specified in
`url` (optional).
:type user: str
:param password: The password required to access the resource specified
in `url` (optional).
:type password: str
"""
super(HttpTransport, self).__init__(*args, **kwargs)
self._metadata = None
self._modification_date = None
self._size = -1
def get_modification_date(self):
"""
Return the date and time of the last modification to this resource as
reported by the remote source.
:return: The modification date and time, or None if unknown.
:rtype: datetime | None
"""
if self._modification_date is None:
metadata = self._get_metadata()
timestamp = parse_http_date_safe(metadata.get("last-modified"))
if timestamp:
self._modification_date = datetime.fromtimestamp(timestamp, utc)
return self._modification_date
def get_size(self):
"""
Return the size of this resource as reported by the remote source.
:return: The size (in bytes) of this resource, or -1 if unknown.
:rtype: int
"""
if self._size == -1:
metadata = self._get_metadata()
self._size = int(metadata.get("content-length", -1))
return self._size
def _get_metadata(self):
if self._metadata is None:
response = self._do_request("HEAD")
self._metadata = response.headers
return self._metadata
def _do_request(self, method="GET"):
try:
response = requests.request(method, self._url, auth=(self._user, self._password), stream=True)
except requests.RequestException as error:
raise ConnectionError(error)
if response == 401:
raise AuthenticationError()
elif 400 >= response < 500:
raise NotFoundError()
elif 500 >= response:
raise ConnectionError()
return response
def _do_get_content(self):
response = self._do_request()
if response.status_code != 200:
raise TransportError(response.reason)
shutil.copyfileobj(response.raw, self._content)
| gpl-2.0 | -1,830,276,783,654,762,800 | 34.973958 | 106 | 0.600333 | false |
rehandalal/buchner | buchner/cmdline.py | 1 | 4058 | import os
import os.path
import string
from optparse import OptionParser
from buchner import __version__
USAGE = '%prog [options] [command] [command-options]'
VERSION = '%prog ' + __version__
def build_parser(usage):
parser = OptionParser(usage=usage, version=VERSION)
return parser
DIGIT_TO_WORD = {
'0': 'zero',
'1': 'one',
'2': 'two',
'3': 'three',
'4': 'four',
'5': 'five',
'6': 'six',
'7': 'seven',
'8': 'eight',
'9': 'nine'
}
def clean_project_module(s):
s = s.lower()
s = ''.join([char for char in s
if char in string.ascii_letters + string.digits])
if s[0] in string.digits:
s = DIGIT_TO_WORD[s[0]] + s[1:]
return s
def perror(s):
print s
def create(command, argv):
parser = build_parser('%prog create <PROJECTNAME>')
parser.add_option(
'--noinput',
action='store_true',
default=False,
help='runs buchner without requiring input')
(options, args) = parser.parse_args(argv)
if not args:
perror('ERROR: You must provide a project name.')
return 1
project_name = args[0]
project_module = clean_project_module(project_name.lower())
if not options.noinput:
# Ask them for project module name and then double-check it's
# valid.
new_project_module = raw_input(
'Python module name for your project: [{0}] '.format(project_module))
new_project_module = new_project_module.strip()
else:
new_project_module = project_module
if not new_project_module:
new_project_module = project_module
if new_project_module != clean_project_module(new_project_module):
perror(
'ERROR: "{0}" is not a valid Python module name.'.format(
new_project_module))
return 1
project_module = new_project_module
project_dir = os.path.abspath(project_module)
if os.path.exists(project_dir):
perror(
'ERROR: Cannot create "{0}"--something is in the way.'.format(
project_dir))
return 1
# Walk the project-template and create all files and directories
# replacing:
#
# * PROJECTMODULE -> project_module
project_template_dir = os.path.join(os.path.dirname(__file__),
'project-template')
for root, dirs, files in os.walk(project_template_dir):
rel_root = root[len(project_template_dir)+1:]
for f in files:
source = os.path.join(root, f)
dest = os.path.join(project_dir, rel_root, f)
dest = dest.replace('PROJECTMODULE', project_module)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
fp = open(source, 'rb')
data = fp.read()
fp.close()
data = data.replace('PROJECTMODULE', project_module)
fp = open(dest, 'wb')
fp.write(data)
fp.close()
print 'create file: {0}'.format(dest)
print 'Done!'
return 0
HANDLERS = (
('create', create, 'Creates a new buchner project.'),)
def cmdline_handler(scriptname, argv):
print '%s version %s' % (scriptname, __version__)
# TODO: Rewrite using subparsers.
handlers = HANDLERS
if not argv or '-h' in argv or '--help' in argv:
parser = build_parser("%prog [command]")
parser.print_help()
print ''
print 'Commands:'
for command_str, _, command_help in handlers:
print ' %-14s %s' % (command_str, command_help)
return 0
if '--version' in argv:
# We've already printed the version, so we can just exit.
return 0
command = argv.pop(0)
for (cmd, fun, hlp) in handlers:
if cmd == command:
return fun(command, argv)
perror('Command "{0}" does not exist.'.format(command))
for cmd, fun, hlp in handlers:
perror(' %-14s %s' % (cmd, hlp))
return 1
| bsd-3-clause | 4,306,350,212,375,323,000 | 24.3625 | 81 | 0.569246 | false |
thomasw/convertly | converter/views.py | 1 | 3286 | from converter import *
from django.template import RequestContext, Context, loader
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404
from converter.forms import UploadDoc
from zipfile import ZipFile
def index(request):
""" Interface for uploading, converting, and downloading documents """
form = UploadDoc()
return render_to_response('index.phtml', {'form':form}, context_instance=RequestContext(request))
def download(request, job_id):
""" Given a job id will provide the user's browser with a converted archive """
clean()
tmpdir = tempfile.gettempdir()
jobdir = os.path.join(tmpdir, tmp_prefix + job_id)
#check if job exists
if not os.path.isdir(jobdir): raise Http404
#find files to zip
files = get_jobfiles(jobdir)
#create zip archive
archive = ZipFile(tempfile.mkstemp()[1], 'w')
for f in files:
name, arcname = str(f), str(f[len(jobdir) + 1:])
archive.write(name, arcname)
archive.close()
#return archive
f = file(archive.filename)
contents = f.read()
f.close()
rm(archive.filename)
filename = os.path.basename(job_id) + '.zip'
mimetype = 'application/zip'
response = HttpResponse(contents, mimetype=mimetype)
response['Content-Disposition'] = 'attachment; filename=%s' % (filename,)
return response
def upload(request):
""" Accepts docx files to be converted to html """
if request.method == 'POST':
clean()
form = UploadDoc(request.POST, request.FILES)
if form.is_valid():
#Move uploaded file to job directory
upload = request.FILES['file']
source = upload.temporary_file_path()
jobdir = tempfile.mkdtemp(prefix=tmp_prefix)
dest = os.path.join(jobdir, upload.name)
os.rename(source, dest)
#Process an individual docx file
if has_extension(upload.name, 'docx'):
files = [dest,]
#Process a zip archive, only pulls docx in root dir of archive
if has_extension(upload.name, 'zip'):
#read archive
archive = ZipFile(dest, 'r')
members = archive.namelist()
members = filter(lambda f: has_extension(f, 'docx'), members)
members = filter(lambda f: len(f.split(os.sep)) == 1, members)
if not members: return error('No docx files found in root directory of archive.')
#extract each item
for m in members:
try:
f = file(os.path.join(jobdir,m), 'w')
f.write(archive.read(m))
f.close()
except:
return error('An error occurred trying to extract files from archive.')
#add docx files to file list
files = os.listdir(jobdir)
files = filter(lambda f: f.split('.')[-1] == 'docx', files)
files = map(lambda f: os.path.join(jobdir, f), files)
#Convert files in job
for f in files:
input = f
output = os.path.join(jobdir, remove_ext(f) + '.html')
im = 'cdwsiodocx'
om = 'cdwsiodocx'
job = Zombie(input, output)
if job.convert():
job.finalize()
else:
e = sys.stderr.last_error()
return error('There was an error converting the document provided "%s"' % e)
context = {'id': os.path.basename(jobdir)[len(tmp_prefix):]}
return render_to_response('result.phtml', context)
return render_to_response('errors.phtml', {'errors':form.errors})
| mit | 6,260,761,604,533,570,000 | 31.215686 | 98 | 0.664029 | false |
propdata/scrum-pm | scrum_pm/artifacts/migrations/0001_initial.py | 1 | 5055 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Sprint'
db.create_table('artifacts_sprint', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sprint_created', to=orm['auth.User'])),
('starts', self.gf('django.db.models.fields.DateTimeField')()),
('ends', self.gf('django.db.models.fields.DateTimeField')()),
('retrospective', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('artifacts', ['Sprint'])
def backwards(self, orm):
# Deleting model 'Sprint'
db.delete_table('artifacts_sprint')
models = {
'artifacts.sprint': {
'Meta': {'object_name': 'Sprint'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sprint_created'", 'to': "orm['auth.User']"}),
'ends': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'retrospective': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'starts': ('django.db.models.fields.DateTimeField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['artifacts'] | mit | 6,145,486,394,306,137,000 | 65.526316 | 182 | 0.563403 | false |
davidpaulrosser/Forms | forms/util/mesh.py | 1 | 3205 | """
This module provides various mesh utilities for cleaning and extruding.
"""
import pymel.core as pm
import maya.mel as mel
"""
Combine multiple polygon meshes with the option of removing duplicate internal faces.
Parameters:
instanceGroup -- A group of meshes to combine ( pymel.core.general.group )
meshName -- A name for the mesh output ( default "mesh" )
duplicateFaces -- Optionally remove lamina and the faces they share ( default False )
Return:
mesh -- ( pymel.core.nodetypes.Transform(u'') )
"""
def combineClean( instanceGroup, meshName, duplicateFaces = False ):
print( "Combining mesh" )
mesh = pm.polyUnite( instanceGroup, name = meshName, constructionHistory = False )
#print( "Merging %i" % len( mesh[ 0 ].vtx ) + " verticies" )
pm.polyMergeVertex( mesh[ 0 ].vtx, distance = 0.1 )
#print( "Reduced to %i" % mesh[ 0 ].numVertices() + " verticies" )
if duplicateFaces:
print( "Cleaning up faces" )
pm.select( mesh[ 0 ] )
pm.selectType( polymeshFace = True )
pm.polySelectConstraint( mode = 3, type = 0x0008, topology = 2 )
# Don't ask me how I did this
mel.eval('polyCleanupArgList 3 { "0","2","0","0","0","0","0","0","0","1e-005","0","1e-005","1","0.3","0","-1","1" };')
pm.delete()
pm.polySelectConstraint( mode = 0, topology = 0 )
pm.selectType( polymeshFace = False )
pm.selectMode( object = True )
print( "Faces reduced" )
if pm.PyNode( instanceGroup ).exists():
pm.delete( instanceGroup )
pm.delete( constructionHistory = True )
pm.select( clear = True )
print( "Cleaning up complete" )
return mesh
"""
Create a wireframe style mesh
Ported from jh_polyWire.mel http://www.creativecrash.com/maya/downloads/scripts-plugins/modeling/poly-tools/c/convert-to-polywire
Parameters:
mesh -- The mesh to convert ( pm.core.nodetypes.Mesh )
gridSize -- The thickness of the borders ( default 0.9 )
depth -- The depth of the extrusion. The value is relative to the scale of the model ( default 0.5 )
extrudeMode -- The extrusion mode. 0 to scale the faces in world space, 1 to translate the faces in local space ( default 1 )
"""
def polyWire( mesh, gridSize = 0.9, depth = 0.5, extrudeMode = 0 ):
# Select the faces
pm.select( mesh[ 0 ].f )
# Extrude and scale the faces
extrude = pm.polyExtrudeFacet( constructionHistory = True, keepFacesTogether = False, divisions = 1, twist = 0, taper = 1, off = 0 )
pm.PyNode( extrude[ 0 ] ).localScale.set( [ gridSize, gridSize, gridSize ] )
# Delete inner faces
pm.delete()
pm.select( mesh[ 0 ].f )
# Extrude the faces
extrude = pm.polyExtrudeFacet( constructionHistory = True, keepFacesTogether = True, divisions = 1, twist = 0, taper = 1, off = 0 )
if extrudeMode == 0:
pm.PyNode( extrude[ 0 ] ).scale.set( [ depth, depth, depth ] )
elif extrudeMode == 1:
pm.PyNode( extrude[ 0 ] ).localTranslate.set( [ 0, 0, depth ] )
pm.select( clear = True )
| mit | -1,726,906,930,116,694,500 | 29.235849 | 136 | 0.616849 | false |
jerjorg/BZI | BZI/convergence.py | 1 | 6793 | import numpy as np
import matplotlib.pyplot as plt
import time
from BZI.symmetry import make_ptvecs
from BZI.sampling import make_grid
from BZI.pseudopots import Al_PP
from BZI.integration import monte_carlo
from BZI.plots import PlotMesh
class Convergence(object):
""" Compare integrations of pseudo-potentials by creating convergence plots.
Args:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cutoff (float): the energy cutoff of the pseudo-potential
cell_type (str): the geometry of the integration cell
cell_constant (float): the size of the integration cell
offset (list): a vector that offsets the grid from the origin and is
given in grid coordinates.
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
Attributes:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cell_type (str): the geometry of the integration cell.
cell_constant (float): the size of the integration cell.
cell_vectors (np.ndarray): an array vectors as columns of a 3x3 numpy
array that is used to create the cell
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
answer (float): the expected result of integration
errors (list): a list of errors for each grid type
nspts (list): a list of the number of sampling points for each grid type
integrals (list): a list of integral value for each grid type and constant
times (list): a list of the amount of time taken computing the grid
generation and integration.
"""
def __init__(self, pseudo_potential=None, cutoff=None, cell_centering=None,
cell_constants=None, cell_angles=None, offset=None,
grid_types=None, grid_constants=None,
integration_methods=None, origin=None, random = None):
self.pseudo_potential = pseudo_potential or Al_PP
self.cutoff = cutoff or 4.
self.cell_centering = cell_centering or "prim"
self.cell_constants = cell_constants or [1.]*3
self.cell_angles = cell_angles or [np.pi/2]*3
self.cell_vectors = make_ptvecs(self.cell_centering, self.cell_constants,
self.cell_angles)
self.grid_centerings = grid_centerings or ["prim", "base", "body", "face"]
self.grid_constants = grid_constants or [1/n for n in range(2,11)]
self.offset = offset or [0.,0.,0.]
# self.integration_methods = integration_methods or [rectangle_method]
self.origin = origin or [0.,0.,0.]
self.random = random or False
def compare_grids(self, answer, plot=False, save=False):
self.answer = answer
if self.random:
nm = len(self.grid_types)
self.nspts = [[] for _ in range(nm + 1)]
self.errors = [[] for _ in range(nm + 1)]
self.integrals = [[] for _ in range(nm + 1)]
self.times = [[] for _ in range(nm + 1)]
npts_list = [2**n for n in range(8,14)]
for npts in npts_list:
time1 = time.time()
integral = monte_carlo(self.pseudo_potential,
self.cell_vectors,
npts,
self.cutoff)
self.nspts[nm].append(npts)
self.integrals[nm].append(integral)
self.times[nm].append((time.time() - time1))
self.errors[nm].append(np.abs(self.integrals[nm][-1] - answer))
else:
self.nspts = [[] for _ in range(len(self.grid_types))]
self.errors = [[] for _ in range(len(self.grid_types))]
self.integrals = [[] for _ in range(len(self.grid_types))]
self.times = [[] for _ in range(len(self.grid_types))]
integration_method = self.integration_methods[0]
for (i,grid_centering) in enumerate(self.grid_centering_list):
for grid_consts in self.grid_constants_list:
for grid_angles in grid_angles_list:
grid_vecs = make_ptvecs(grid_centering, grid_consts, grid_angles)
time1 = time.time()
npts, integral = integration_method(self.pseudo_potential,
self.cell_vectors,
grid_vecs,
self.offset,
self.origin,
self.cutoff)
self.nspts[i].append(npts)
self.integrals[i].append(integral)
self.times[i].append((time.time() - time1))
self.errors[i].append(np.abs(self.integrals[i][-1] - answer))
if save:
np.save("%s_times" %self.pseudo_potential, self.times)
np.save("%s_integrals" %self.pseudo_potential, self.integrals)
np.save("%s_errors" %self.pseudo_potential, self.errors)
if plot:
if self.random:
plt.loglog(self.nspts[nm], self.errors[nm], label="random", color="orange")
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.errors[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Error")
test = [1./n**(2./3) for n in self.nspts[0]]
plt.loglog(self.nspts[0], test, label="1/n**(2/3)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.times[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Time (s)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
def plot_grid(self,i,j):
"""Plot one of the grids in the convergence plot.
"""
grid_vecs = make_ptvecs(self.grid_types[i], self.grid_constants[j])
grid_pts = make_grid(self.rcell_vectors, gr_vecs, self.offset)
PlotMesh(grid_pts, self.rcell_vectors, self.offset)
| gpl-3.0 | -3,526,169,047,747,902,000 | 48.583942 | 91 | 0.554541 | false |
andras-tim/sphinxcontrib-httpdomain | doc/conf.py | 1 | 8340 | # -*- coding: utf-8 -*-
#
# sphinxcontrib-httpdomain documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 2 13:27:52 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain', 'sphinxcontrib.autohttp.flask',
'sphinxcontrib.autohttp.bottle',
'sphinxcontrib.autohttp.tornado',
'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinxcontrib-httpdomain'
copyright = u'2011, Hong Minhee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxcontrib-httpdomaindoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinxcontrib-httpdomain.tex',
u'sphinxcontrib-httpdomain Documentation',
u'Hong Minhee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxcontrib-httpdomain',
u'sphinxcontrib-httpdomain Documentation',
[u'Hong Minhee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinxcontrib-httpdomain',
u'sphinxcontrib-httpdomain Documentation', u'Hong Minhee',
'sphinxcontrib-httpdomain', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
extlinks = {
'pull': ('https://bitbucket.org/birkenfeld/sphinx-contrib/pull-request/%s/',
'pull request #'),
'issue': ('https://bitbucket.org/birkenfeld/sphinx-contrib/issue/%s/',
'issue #')
}
| bsd-2-clause | 1,940,652,276,734,229,500 | 31.834646 | 80 | 0.702158 | false |
bfirsh/docker-py | tests/unit/auth_test.py | 1 | 16214 | # -*- coding: utf-8 -*-
import base64
import json
import os
import os.path
import random
import shutil
import tempfile
import unittest
from docker import auth, errors
try:
from unittest import mock
except ImportError:
import mock
class RegressionTest(unittest.TestCase):
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
'password': 'GR?XGR?XGR?XGR?X'
}
encoded = auth.encode_header(auth_data)
assert b'/' not in encoded
assert b'_' in encoded
class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image'),
('docker.io', 'image'),
)
def test_resolve_repository_name_dotted_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image.valid'),
('docker.io', 'image.valid')
)
def test_resolve_repository_name_hub_image(self):
self.assertEqual(
auth.resolve_repository_name('username/image'),
('docker.io', 'username/image'),
)
def test_explicit_hub_index_library_image(self):
self.assertEqual(
auth.resolve_repository_name('docker.io/image'),
('docker.io', 'image')
)
def test_explicit_legacy_hub_index_library_image(self):
self.assertEqual(
auth.resolve_repository_name('index.docker.io/image'),
('docker.io', 'image')
)
def test_resolve_repository_name_private_registry(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/image'),
('my.registry.net', 'image'),
)
def test_resolve_repository_name_private_registry_with_port(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net:5000/image'),
('my.registry.net:5000', 'image'),
)
def test_resolve_repository_name_private_registry_with_username(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/username/image'),
('my.registry.net', 'username/image'),
)
def test_resolve_repository_name_no_dots_but_port(self):
self.assertEqual(
auth.resolve_repository_name('hostname:5000/image'),
('hostname:5000', 'image'),
)
def test_resolve_repository_name_no_dots_but_port_and_username(self):
self.assertEqual(
auth.resolve_repository_name('hostname:5000/username/image'),
('hostname:5000', 'username/image'),
)
def test_resolve_repository_name_localhost(self):
self.assertEqual(
auth.resolve_repository_name('localhost/image'),
('localhost', 'image'),
)
def test_resolve_repository_name_localhost_with_username(self):
self.assertEqual(
auth.resolve_repository_name('localhost/username/image'),
('localhost', 'username/image'),
)
def test_invalid_index_name(self):
self.assertRaises(
errors.InvalidRepository,
lambda: auth.resolve_repository_name('-gecko.com/image')
)
def encode_auth(auth_info):
return base64.b64encode(
auth_info.get('username', '').encode('utf-8') + b':' +
auth_info.get('password', '').encode('utf-8'))
class ResolveAuthTest(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
auth_config = auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
})
def test_resolve_authconfig_hostname_only(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_protocol(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_trailing_slash(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_wrong_secure_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://index.docker.io'
)['username'],
'indexuser'
)
def test_resolve_authconfig_path_wrong_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_default_registry(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config)['username'],
'indexuser'
)
def test_resolve_authconfig_default_explicit_none(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config, None)['username'],
'indexuser'
)
def test_resolve_authconfig_fully_explicit(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_legacy_config(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'legacy.registry.url'
)['username'],
'legacyauth'
)
def test_resolve_authconfig_no_match(self):
self.assertTrue(
auth.resolve_authconfig(self.auth_config, 'does.not.exist') is None
)
def test_resolve_registry_and_auth_library_image(self):
image = 'image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_hub_image(self):
image = 'username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_explicit_hub(self):
image = 'docker.io/username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_explicit_legacy_hub(self):
image = 'index.docker.io/username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_private_registry(self):
image = 'my.registry.net/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'privateuser',
)
def test_resolve_registry_and_auth_unauthenticated_registry(self):
image = 'other.registry.net/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
),
None,
)
class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = [email protected]')
cfg = auth.load_config(dockercfg_path)
assert auth.INDEX_NAME in cfg
self.assertNotEqual(cfg[auth.INDEX_NAME], None)
cfg = cfg[auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': '[email protected]'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': '[email protected]'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_with_auths(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
'auths': {
registry: {
'auth': '{0}'.format(auth_),
'email': '[email protected]'
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_utf8(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(
b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii')
config = {
'auths': {
registry: {
'auth': '{0}'.format(auth_),
'email': '[email protected]'
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], b'sakuya\xc3\xa6'.decode('utf8'))
self.assertEqual(cfg['password'], b'izayoi\xc3\xa6'.decode('utf8'))
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_with_headers(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'HttpHeaders': {
'Name': 'Spike',
'Surname': 'Spiegel'
},
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert 'HttpHeaders' in cfg
self.assertNotEqual(cfg['HttpHeaders'], None)
cfg = cfg['HttpHeaders']
self.assertEqual(cfg['Name'], 'Spike')
self.assertEqual(cfg['Surname'], 'Spiegel')
def test_load_config_unknown_keys(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert cfg == {}
def test_load_config_invalid_auth_dict(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'auths': {
'scarlet.net': {'sakuya': 'izayoi'}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert cfg == {'scarlet.net': {}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
registry = 'scarlet.net'
token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce'
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii')
config = {
'auths': {
registry: {
'auth': auth_entry,
'identitytoken': token
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
cfg = cfg[registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
| apache-2.0 | 4,883,358,497,778,586,000 | 32.5 | 79 | 0.562107 | false |
stxnext-kindergarten/presence-analyzer-pzarebski | src/presence_analyzer/utils.py | 1 | 3612 | # -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
from json import dumps
from functools import wraps
from datetime import datetime
from flask import Response
from presence_analyzer.main import app
import logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
return Response(
dumps(function(*args, **kwargs)),
mimetype='application/json'
)
return inner
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = [[] for i in range(7)] # one list for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def group_by_weekday_start_end(items):
"""
Groups start time and end time by weekday.
It creates structure like this:
result = [
{
'start': [39973, 35827, 31253, 32084, 40358],
'end': [70900, 61024, 61184, 55828, 70840],
},
{
'start': [33058, 39177, 31018],
'end': [61740, 71032, 70742],
}
]
"""
result = [{} for i in range(7)] # one dict for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].setdefault('start', []).append(
seconds_since_midnight(start)
)
result[date.weekday()].setdefault('end', []).append(
seconds_since_midnight(end)
)
return result
| mit | 8,764,287,398,231,358,000 | 26.157895 | 79 | 0.551495 | false |
robot-tools/iconograph | server/modules/persistent.py | 1 | 1150 | #!/usr/bin/python3
import argparse
import os
import icon_lib
parser = argparse.ArgumentParser(description='iconograph persistent')
parser.add_argument(
'--chroot-path',
dest='chroot_path',
action='store',
required=True)
FLAGS = parser.parse_args()
def main():
module = icon_lib.IconModule(FLAGS.chroot_path)
os.mkdir(os.path.join(FLAGS.chroot_path, 'persistent'))
tool_path = os.path.join(FLAGS.chroot_path, 'icon', 'persistent')
os.makedirs(tool_path, exist_ok=True)
script = os.path.join(tool_path, 'startup.sh')
with open(script, 'w') as fh:
os.chmod(fh.fileno(), 0o755)
fh.write("""\
#!/bin/bash
set -ex
e2fsck -y LABEL=PERSISTENT
mount -o noatime LABEL=PERSISTENT /persistent
""")
with module.ServiceFile('persistent.service') as fh:
fh.write("""
[Unit]
Description=Mount /persistent
DefaultDependencies=no
Conflicts=shutdown.target
After=systemd-remount-fs.service
Before=sysinit.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/icon/persistent/startup.sh
[Install]
WantedBy=sysinit.target
""")
module.EnableService('persistent.service')
if __name__ == '__main__':
main()
| apache-2.0 | 3,711,239,642,971,571,700 | 19.175439 | 69 | 0.713913 | false |
ratoaq2/deluge | packaging/win32/deluge-bbfreeze.py | 1 | 7783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Calum Lind <[email protected]>
# Copyright (C) 2010 Damien Churchill <[email protected]>
# Copyright (C) 2009-2010 Andrew Resch <[email protected]>
# Copyright (C) 2009 Jesper Lund <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from __future__ import print_function
import glob
import os
import re
import shutil
import sys
import bbfreeze
import gtk
from win32verstamp import stamp
import deluge.common
class VersionInfo(object):
def __init__(self, version, internalname=None, originalfilename=None,
comments=None, company=None, description=None,
_copyright=None, trademarks=None, product=None, dll=False,
debug=False, verbose=True):
parts = version.split('.')
while len(parts) < 4:
parts.append('0')
self.version = '.'.join(parts)
self.internal_name = internalname
self.original_filename = originalfilename
self.comments = comments
self.company = company
self.description = description
self.copyright = _copyright
self.trademarks = trademarks
self.product = product
self.dll = dll
self.debug = debug
self.verbose = verbose
DEBUG = False
if len(sys.argv) == 2 and sys.argv[1].lower() == 'debug':
DEBUG = True
# Get build_version from installed deluge.
build_version = deluge.common.get_version()
python_path = os.path.dirname(sys.executable)
if python_path.endswith('Scripts'):
python_path = python_path[:-8]
gtk_root = os.path.join(gtk.__path__[0], '..', 'runtime')
build_dir = os.path.join('build-win32', 'deluge-bbfreeze-' + build_version)
if DEBUG:
print('Python Path: %s' % python_path)
print('Gtk Path: %s' % gtk_root)
print('bbfreeze Output Path: %s' % build_dir)
print('Freezing Deluge %s...' % build_version)
# Disable printing to console for bbfreezing.
if not DEBUG:
sys.stdout = open(os.devnull, 'w')
# Include python modules not picked up automatically by bbfreeze.
includes = ('libtorrent', 'cairo', 'pangocairo', 'atk', 'pango', 'twisted.internet.utils',
'gio', 'gzip', 'email.mime.multipart', 'email.mime.text', '_cffi_backend')
excludes = ('numpy', 'OpenGL', 'psyco', 'win32ui', 'unittest')
def recipe_gtk_override(mf):
# Override bbfreeze function so that it includes all gtk libraries
# in the installer so users don't require a separate GTK+ installation.
return True
bbfreeze.recipes.recipe_gtk_and_friends = recipe_gtk_override
# Workaround for "ImportError: The 'packaging' package is required" with setuptools > 18.8.
# (https://github.com/pypa/setuptools/issues/517)
bbfreeze.recipes.recipe_pkg_resources = bbfreeze.recipes.include_whole_package('pkg_resources')
fzr = bbfreeze.Freezer(build_dir, includes=includes, excludes=excludes)
fzr.include_py = False
fzr.setIcon(os.path.join(os.path.dirname(deluge.common.__file__), 'ui', 'data', 'pixmaps', 'deluge.ico'))
# TODO: Can/should we grab the script list from setup.py entry_points somehow.
# Hide cmd console popup for these console entries force gui_script True.
force_gui = ['deluge-web', 'deluged']
for force_script in force_gui:
script_path = os.path.join(python_path, 'Scripts', force_script + '-script.py')
shutil.copy(script_path, script_path.replace('script', 'debug-script'))
script_list = []
for script in glob.glob(os.path.join(python_path, 'Scripts\\deluge*-script.py*')):
# Copy the scripts to remove the '-script' suffix before adding to freezer.
new_script = script.replace('-script', '')
shutil.copy(script, new_script)
gui_script = False
script_splitext = os.path.splitext(os.path.basename(new_script))
if script_splitext[1] == '.pyw' or script_splitext[0] in force_gui:
gui_script = True
try:
fzr.addScript(new_script, gui_only=gui_script)
script_list.append(new_script)
except Exception:
os.remove(script)
# Start the freezing process.
fzr()
# Clean up the duplicated scripts.
for script in script_list:
os.remove(script)
# Exclude files which are already included in GTK or Windows. Also exclude unneeded pygame dlls.
excludeDlls = ('MSIMG32.dll', 'MSVCR90.dll', 'MSVCP90.dll', 'MSVCR120.dll',
'POWRPROF.dll', 'DNSAPI.dll', 'USP10.dll', 'MPR.dll',
'jpeg.dll', 'libfreetype-6.dll', 'libpng12-0.dll', 'libtiff.dll',
'SDL_image.dll', 'SDL_ttf.dll')
for exclude_dll in excludeDlls:
try:
os.remove(os.path.join(build_dir, exclude_dll))
except OSError:
pass
# Re-enable printing.
if not DEBUG:
sys.stdout = sys.__stdout__
# Copy gtk locale files.
gtk_locale = os.path.join(gtk_root, 'share/locale')
locale_include_list = ['gtk20.mo', 'locale.alias']
def ignored_files(adir, ignore_filenames):
return [
ignore_file for ignore_file in ignore_filenames
if not os.path.isdir(os.path.join(adir, ignore_file)) and
ignore_file not in locale_include_list
]
shutil.copytree(gtk_locale, os.path.join(build_dir, 'share/locale'), ignore=ignored_files)
# Copy gtk theme files.
theme_include_list = [
[gtk_root, 'share/icons/hicolor/index.theme'],
[gtk_root, 'lib/gtk-2.0/2.10.0/engines'],
[gtk_root, 'share/themes/MS-Windows'],
['DelugeStart Theme', 'lib/gtk-2.0/2.10.0/engines/libmurrine.dll'],
['DelugeStart Theme', 'share/themes/DelugeStart'],
['DelugeStart Theme', 'etc/gtk-2.0/gtkrc']
]
for path_root, path in theme_include_list:
full_path = os.path.join(path_root, path)
if os.path.isdir(full_path):
shutil.copytree(full_path, os.path.join(build_dir, path))
else:
dst_dir = os.path.join(build_dir, os.path.dirname(path))
try:
os.makedirs(dst_dir)
except OSError:
pass
shutil.copy(full_path, dst_dir)
# Add version information to exe files.
for script in script_list:
script_exe = os.path.splitext(os.path.basename(script))[0] + '.exe'
# Don't add to dev build versions.
if not re.search('[a-zA-Z_-]', build_version):
versionInfo = VersionInfo(build_version,
description='Deluge Bittorrent Client',
company='Deluge Team',
product='Deluge',
_copyright='Deluge Team')
stamp(os.path.join(build_dir, script_exe), versionInfo)
# Copy version info to file for nsis script.
with open('VERSION.tmp', 'w') as ver_file:
ver_file.write('build_version = "%s"' % build_version)
# Create the install and uninstall file list for NSIS.
filedir_list = []
for root, dirnames, filenames in os.walk(build_dir):
dirnames.sort()
filenames.sort()
filedir_list.append((root[len(build_dir):], filenames))
with open('install_files.nsh', 'w') as f:
f.write('; Files to install\n')
for dirname, files in filedir_list:
if not dirname:
dirname = os.sep
f.write('\nSetOutPath "$INSTDIR%s"\n' % dirname)
for filename in files:
f.write('File "${BBFREEZE_DIR}%s"\n' % os.path.join(dirname, filename))
with open('uninstall_files.nsh', 'w') as f:
f.write('; Files to uninstall\n')
for dirname, files in reversed(filedir_list):
f.write('\n')
if not dirname:
dirname = os.sep
for filename in files:
f.write('Delete "$INSTDIR%s"\n' % os.path.join(dirname, filename))
f.write('RMDir "$INSTDIR%s"\n' % dirname)
| gpl-3.0 | 7,283,705,416,891,160,000 | 34.701835 | 105 | 0.654118 | false |
zplab/rpc-scope | scope/gui/microscope_widget.py | 1 | 23339 | # This code is licensed under the MIT License (see LICENSE file for details)
from PyQt5 import Qt
import pkg_resources
from . import device_widget
from . import status_widget
from ..simple_rpc import rpc_client
from .. import util
class MicroscopeWidget(device_widget.DeviceWidget):
PROPERTY_ROOT = 'scope.'
PROPERTIES = [
# tuple contains: property, type, and zero or more args that are passed to
# the 'make_'+type+'widget() function.
# ('stand.active_microscopy_method', 'enum', 'stand.available_microscopy_methods'),
('nosepiece.position', 'objective'),
# ('nosepiece.safe_mode', 'bool'),
# ('nosepiece.immersion_mode', 'bool'),
('il.shutter_open', 'bool'),
('tl.shutter_open', 'bool'),
('il.field_wheel', 'enum', 'il.field_wheel_positions'),
('il.filter_cube', 'enum', 'il.filter_cube_values'),
# The final element of the 'tl.aperature_diaphragm' tuple, 'scope.nosepiece.position', indicates
# that 'tl.aperture_diaphragm_range' may change with 'scope.nosepiece.position'. So,
# 'tl.aperture_diaphragm_range' should be refreshed upon 'scope.nosepiece.position' change.
('tl.aperture_diaphragm', 'int', 'tl.aperture_diaphragm_range', 'nosepiece.position'),
('tl.field_diaphragm', 'int', 'tl.field_diaphragm_range', 'nosepiece.position'),
('tl.condenser_retracted', 'bool'),
('stage.xy_fine_control', 'bool'),
('stage.z_fine_control', 'bool'),
# TODO: use hard max values read from scope, if possible
# TODO: If not possible, verify that hard max values are identical across all scopes
# otherwise make this a config parameter.
('stage.x', 'stage_axis_pos', 225),
('stage.y', 'stage_axis_pos', 76),
('stage.z', 'stage_axis_pos', 26)
]
@classmethod
def can_run(cls, scope):
# We're useful if at least one of our properties can be read. Properties that can not be read
# when the widget is created are not shown in the GUI.
for property, *rest in cls.PROPERTIES:
attr = scope
try:
for name in property.split('.'):
attr = getattr(attr, name)
except:
continue
return True
return False
def __init__(self, scope, parent=None):
super().__init__(scope, parent)
self.limit_pixmaps_and_tooltips = LimitPixmapsAndToolTips()
self.setWindowTitle('Stand')
form = Qt.QFormLayout(self)
form.setContentsMargins(0, 0, 0, 0)
form.setVerticalSpacing(4)
form.setLabelAlignment(Qt.Qt.AlignRight | Qt.Qt.AlignVCenter)
form.setFieldGrowthPolicy(Qt.QFormLayout.ExpandingFieldsGrow)
for property, widget_type, *widget_args in self.PROPERTIES:
self.make_widgets_for_property(self.PROPERTY_ROOT + property, widget_type, widget_args)
if hasattr(scope, 'job_runner'):
form.addRow(status_widget.StatusWidget(scope))
def get_scope_attr(self, property):
"""look up an attribute on the scope object by property name, which is
expected to start with 'scope.' -- e.g. 'scope.stage.z_high_soft_limit'
"""
attr = self.scope
for name in property.split('.')[1:]:
attr = getattr(attr, name)
return attr
def make_widgets_for_property(self, property, widget_type, widget_args):
try:
self.get_scope_attr(property)
except AttributeError:
# The property isn't available for this scope object, so don't
# make a widget for it.
return
layout = self.layout()
label = Qt.QLabel(property[len(self.PROPERTY_ROOT):] + ':') # strip the 'scope.' off
label.setSizePolicy(Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding)
widget = getattr(self, 'make_{}_widget'.format(widget_type))(property, *widget_args)
widget.setSizePolicy(Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding)
layout.addRow(label, widget)
def make_bool_widget(self, property):
widget = Qt.QCheckBox()
update = self.subscribe(property, callback=widget.setChecked)
if update is None:
widget.setEnabled(False)
else:
def gui_changed(value):
try:
update(value)
except rpc_client.RPCError as e:
error = 'Could not set {} ({}).'.format(property, e.args[0])
Qt.QMessageBox.warning(self, 'Invalid Value', error)
widget.toggled.connect(gui_changed)
return widget
def make_int_widget(self, property, range_property, range_depends_on_property):
widget = Qt.QWidget()
layout = Qt.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
slider = Qt.QSlider(Qt.Qt.Horizontal)
slider.setTickInterval(1)
layout.addWidget(slider)
spinbox = Qt.QSpinBox()
layout.addWidget(spinbox)
handling_change = util.Condition() # acts as false, except when in a with-block, where it acts as true
def range_changed(_):
if handling_change:
return
with handling_change:
range = self.get_scope_attr(self.PROPERTY_ROOT + range_property)
slider.setRange(*range)
spinbox.setRange(*range)
self.subscribe(self.PROPERTY_ROOT + range_depends_on_property, callback=range_changed, readonly=True)
def prop_changed(value):
if handling_change:
return
with handling_change:
slider.setValue(value)
spinbox.setValue(value)
update = self.subscribe(property, callback=prop_changed)
if update is None:
spinbox.setEnabled(False)
slider.setEnabled(False)
else:
def gui_changed(value):
if handling_change:
return
with handling_change:
update(value)
# TODO: verify the below doesn't blow up without indexing the
# overloaded valueChanged signal as [int]
slider.valueChanged.connect(gui_changed)
spinbox.valueChanged.connect(gui_changed)
return widget
def make_enum_widget(self, property, choices_property):
widget = Qt.QComboBox()
widget.setEditable(False)
widget.addItems(sorted(self.get_scope_attr(self.PROPERTY_ROOT + choices_property)))
update = self.subscribe(property, callback=widget.setCurrentText)
if update is None:
widget.setEnabled(False)
else:
def gui_changed(value):
try:
update(value)
except rpc_client.RPCError as e:
error = 'Could not set {} ({}).'.format(property, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
widget.currentTextChanged.connect(gui_changed)
return widget
def make_objective_widget(self, property):
widget = Qt.QComboBox()
widget.setEditable(False)
mags = self.get_scope_attr(self.PROPERTY_ROOT + 'nosepiece.all_objectives')
model = _ObjectivesModel(mags, widget.font(), self)
widget.setModel(model)
def prop_changed(value):
widget.setCurrentIndex(value)
update = self.subscribe(property, callback=prop_changed)
if update is None:
widget.setEnabled(False)
else:
def gui_changed(value):
try:
update(value)
except rpc_client.RPCError as e:
error = 'Could not set {} ({}).'.format(property, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
# TODO: verify the below doesn't blow up without indexing the
# overloaded currentIndexChanged signal as [int]
widget.currentIndexChanged.connect(gui_changed)
return widget
def make_stage_axis_pos_widget(self, property, axis_max_val):
widget = Qt.QWidget()
vlayout = Qt.QVBoxLayout()
vlayout.setSpacing(0)
vlayout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(vlayout)
axis_name = property.split('.')[-1]
props = self.scope.properties.properties # dict of tracked properties, updated by property client
# [low limits status indicator] [-------<slider>-------] [high limits status indicator]
slider_layout = Qt.QHBoxLayout()
l, t, r, b = slider_layout.getContentsMargins()
slider_layout.setContentsMargins(l, 0, r, 0)
slider_layout.setSpacing(5)
low_limit_status_label = Qt.QLabel()
# NB: *_limit_status_label pixmaps are set here so that layout does not jump when limit status RPC property updates
# are first received
low_limit_status_label.setPixmap(self.limit_pixmaps_and_tooltips.low_no_limit_pm)
slider_layout.addWidget(low_limit_status_label)
pos_slider_factor = 1e3
pos_slider = Qt.QSlider(Qt.Qt.Horizontal)
pos_slider.setEnabled(False)
pos_slider.setRange(0, pos_slider_factor * axis_max_val)
pos_slider.setValue(0)
slider_layout.addWidget(pos_slider)
high_limit_status_label = Qt.QLabel()
high_limit_status_label.setPixmap(self.limit_pixmaps_and_tooltips.high_no_limit_pm)
slider_layout.addWidget(high_limit_status_label)
vlayout.addLayout(slider_layout)
at_ls_property = self.PROPERTY_ROOT + 'stage.at_{}_low_soft_limit'.format(axis_name)
at_lh_property = self.PROPERTY_ROOT + 'stage.at_{}_low_hard_limit'.format(axis_name)
at_hs_property = self.PROPERTY_ROOT + 'stage.at_{}_high_soft_limit'.format(axis_name)
at_hh_property = self.PROPERTY_ROOT + 'stage.at_{}_high_hard_limit'.format(axis_name)
def at_low_limit_prop_changed(_):
try:
at_s = props[at_ls_property]
at_h = props[at_lh_property]
except KeyError:
return
if at_s and at_h:
pm = self.limit_pixmaps_and_tooltips.low_hard_and_soft_limits_pm
tt = self.limit_pixmaps_and_tooltips.low_hard_and_soft_limits_tt
elif at_s:
pm = self.limit_pixmaps_and_tooltips.low_soft_limit_pm
tt = self.limit_pixmaps_and_tooltips.low_soft_limit_tt
elif at_h:
pm = self.limit_pixmaps_and_tooltips.low_hard_limit_pm
tt = self.limit_pixmaps_and_tooltips.low_hard_limit_tt
else:
pm = self.limit_pixmaps_and_tooltips.low_no_limit_pm
tt = self.limit_pixmaps_and_tooltips.low_no_limit_tt
low_limit_status_label.setPixmap(pm)
low_limit_status_label.setToolTip(tt)
self.subscribe(at_ls_property, at_low_limit_prop_changed, readonly=True)
self.subscribe(at_lh_property, at_low_limit_prop_changed, readonly=True)
def at_high_limit_prop_changed(_):
try:
at_s = props[at_hs_property]
at_h = props[at_hh_property]
except KeyError:
return
if at_s and at_h:
pm = self.limit_pixmaps_and_tooltips.high_hard_and_soft_limits_pm
tt = self.limit_pixmaps_and_tooltips.high_hard_and_soft_limits_tt
elif at_s:
pm = self.limit_pixmaps_and_tooltips.high_soft_limit_pm
tt = self.limit_pixmaps_and_tooltips.high_soft_limit_tt
elif at_h:
pm = self.limit_pixmaps_and_tooltips.high_hard_limit_pm
tt = self.limit_pixmaps_and_tooltips.high_hard_limit_tt
else:
pm = self.limit_pixmaps_and_tooltips.high_no_limit_pm
tt = self.limit_pixmaps_and_tooltips.high_no_limit_tt
high_limit_status_label.setPixmap(pm)
high_limit_status_label.setToolTip(tt)
self.subscribe(at_hs_property, at_high_limit_prop_changed, readonly=True)
self.subscribe(at_hh_property, at_high_limit_prop_changed, readonly=True)
# [stop] [low soft limit text edit] [position text edit] [high soft limit text edit] [reset high soft limit button]
buttons_layout = Qt.QHBoxLayout()
l, t, r, b = buttons_layout.getContentsMargins()
buttons_layout.setSpacing(5)
buttons_layout.setContentsMargins(l, 0, r, 0)
stop_button = Qt.QPushButton(widget.style().standardIcon(Qt.QStyle.SP_BrowserStop), '')
stop_button.setToolTip('Stop movement along {} axis.'.format(axis_name))
stop_button.setEnabled(False)
buttons_layout.addWidget(stop_button)
low_limit_text_widget = FocusLossSignalingLineEdit()
low_limit_text_widget.setMaxLength(8)
low_limit_text_validator = Qt.QDoubleValidator()
low_limit_text_validator.setBottom(0)
low_limit_text_widget.setValidator(low_limit_text_validator)
buttons_layout.addWidget(low_limit_text_widget)
pos_text_widget = FocusLossSignalingLineEdit()
pos_text_widget.setMaxLength(8)
pos_text_validator = Qt.QDoubleValidator()
pos_text_widget.setValidator(pos_text_validator)
buttons_layout.addWidget(pos_text_widget)
high_limit_text_widget = FocusLossSignalingLineEdit()
high_limit_text_widget.setMaxLength(8)
high_limit_text_validator = Qt.QDoubleValidator()
high_limit_text_validator.setTop(axis_max_val)
high_limit_text_widget.setValidator(high_limit_text_validator)
buttons_layout.addWidget(high_limit_text_widget)
reset_limits_button = Qt.QPushButton('Reset limits')
reset_limits_button.setToolTip(
'Reset {} soft min and max to the smallest \n and largest acceptable values, respectively.'.format(axis_name)
)
buttons_layout.addWidget(reset_limits_button)
vlayout.addLayout(buttons_layout)
def moving_along_axis_changed(value):
stop_button.setEnabled(value)
self.subscribe('{}stage.moving_along_{}'.format(self.PROPERTY_ROOT, axis_name), moving_along_axis_changed, readonly=True)
def stop_moving_along_axis():
try:
self.get_scope_attr(self.PROPERTY_ROOT+'stage.stop_{}'.format(axis_name))()
except rpc_client.RPCError as e:
error = 'Could not stop movement along {} axis ({}).'.format(axis_name, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
# TODO: verify the below doesn't blow up without indexing the
# overloaded clicked signal as [bool]
stop_button.clicked.connect(stop_moving_along_axis)
# low limit sub-widget
low_limit_property = self.PROPERTY_ROOT + 'stage.{}_low_soft_limit'.format(axis_name)
handling_low_soft_limit_change = util.Condition() # start out false, except when used as with-block context manager
def low_limit_prop_changed(value):
if handling_low_soft_limit_change:
return
with handling_low_soft_limit_change:
low_limit_text_widget.setText(str(value))
pos_text_validator.setBottom(value)
high_limit_text_validator.setBottom(value)
update_low_limit = self.subscribe(low_limit_property, low_limit_prop_changed)
if update_low_limit is None:
low_limit_text_widget.setEnabled(False)
else:
def submit_low_limit_text():
if handling_low_soft_limit_change:
return
with handling_low_soft_limit_change:
try:
new_low_limit = float(low_limit_text_widget.text())
except ValueError:
return
try:
update_low_limit(new_low_limit)
except rpc_client.RPCError as e:
error = 'Could not set {} axis to {} ({}).'.format(axis_name, new_low_limit, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
low_limit_text_widget.returnPressed.connect(submit_low_limit_text)
def low_limit_text_focus_lost():
low_limit_text_widget.setText(str(props.get(low_limit_property, '')))
low_limit_text_widget.focus_lost.connect(low_limit_text_focus_lost)
# position sub-widget
handling_pos_change = util.Condition()
def position_changed(value):
if handling_pos_change:
return
with handling_pos_change:
pos_text_widget.setText(str(value))
pos_slider.setValue(int(value * pos_slider_factor))
self.subscribe(property, position_changed, readonly=True)
get_pos = getattr(self.scope.stage, '_get_{}'.format(axis_name))
set_pos = getattr(self.scope.stage, '_set_{}'.format(axis_name))
def submit_pos_text():
if handling_pos_change:
return
with handling_pos_change:
try:
new_pos = float(pos_text_widget.text())
except ValueError:
return
if new_pos != get_pos():
try:
set_pos(new_pos, async_='fire_and_forget')
except rpc_client.RPCError as e:
error = 'Could not set {} axis to {} ({}).'.format(axis_name, new_pos, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
pos_text_widget.returnPressed.connect(submit_pos_text)
def pos_text_focus_lost():
pos_text_widget.setText(str(props.get(property, '')))
pos_text_widget.focus_lost.connect(pos_text_focus_lost)
# high limit sub-widget
high_limit_property = self.PROPERTY_ROOT + 'stage.{}_high_soft_limit'.format(axis_name)
handling_high_soft_limit_change = util.Condition()
def high_limit_prop_changed(value):
if handling_high_soft_limit_change:
return
with handling_high_soft_limit_change:
high_limit_text_widget.setText(str(value))
pos_text_validator.setTop(value)
low_limit_text_validator.setTop(value)
update_high_limit = self.subscribe(high_limit_property, high_limit_prop_changed)
if update_high_limit is None:
high_limit_text_widget.setEnabled(False)
else:
def submit_high_limit_text():
if handling_high_soft_limit_change:
return
with handling_high_soft_limit_change:
try:
new_high_limit = float(high_limit_text_widget.text())
except ValueError:
return
try:
update_high_limit(new_high_limit)
except rpc_client.RPCError as e:
error = 'Could not set {} axis to {} ({}).'.format(axis_name, new_high_limit, e.args[0])
Qt.QMessageBox.warning(self, 'RPC Exception', error)
high_limit_text_widget.returnPressed.connect(submit_high_limit_text)
def high_limit_text_focus_lost():
high_limit_text_widget.setText(str(props.get(high_limit_property, '')))
high_limit_text_widget.focus_lost.connect(high_limit_text_focus_lost)
def reset_limits_button_clicked(_):
update_low_limit(0.0)
self.get_scope_attr(self.PROPERTY_ROOT + 'stage.reset_{}_high_soft_limit'.format(axis_name))()
# TODO: verify the below doesn't blow up without indexing the
# overloaded clicked signal as [bool]
reset_limits_button.clicked.connect(reset_limits_button_clicked)
# We do not receive events for z high soft limit changes initiated by means other than assigning
# to scope.stage.z_high_soft_limit or calling scope.stage.reset_z_high_soft_limit(). However,
# the scope's physical interface does not offer any way to modify z high soft limit, with one
# possible exception: it would make sense for the limit to change with objective in order to prevent
# head crashing. In case that happens, we refresh z high soft limit upon objective change.
# TODO: verify that this is never needed and get rid of it if so
if axis_name is 'z':
def objective_changed(_):
if handling_high_soft_limit_change:
return
with handling_high_soft_limit_change:
high_limit_text_widget.setText(str(self.get_scope_attr(self.PROPERTY_ROOT + 'stage.z_high_soft_limit')))
self.subscribe(self.PROPERTY_ROOT + 'nosepiece.position', objective_changed, readonly=True)
return widget
class _ObjectivesModel(Qt.QAbstractListModel):
def __init__(self, mags, font, parent=None):
super().__init__(parent)
self.mags = mags
self.empty_pos_font = Qt.QFont(font)
self.empty_pos_font.setItalic(True)
def rowCount(self, _=None):
return len(self.mags)
def flags(self, midx):
f = Qt.Qt.ItemNeverHasChildren
if midx.isValid():
row = midx.row()
if row > 0:
f |= Qt.Qt.ItemIsEnabled | Qt.Qt.ItemIsSelectable
return f
def data(self, midx, role=Qt.Qt.DisplayRole):
if midx.isValid():
row = midx.row()
mag = self.mags[row]
if role == Qt.Qt.DisplayRole:
r = '{}: {}{}'.format(
row,
'BETWEEN POSITIONS' if row == 0 else mag,
'' if mag is None else '×')
return Qt.QVariant(r)
if role == Qt.Qt.FontRole and mag is None:
return Qt.QVariant(self.empty_pos_font)
return Qt.QVariant()
class LimitPixmapsAndToolTips:
def __init__(self, height=25):
flip = Qt.QTransform()
flip.rotate(180)
for icon in ('no_limit', 'soft_limit', 'hard_limit', 'hard_and_soft_limits'):
fname = pkg_resources.resource_filename(__name__, f'limit_icons/{icon}.svg')
im = Qt.QImage(fname).scaledToHeight(height)
setattr(self, 'low_'+icon+'_pm', Qt.QPixmap.fromImage(im))
setattr(self, 'high_'+icon+'_pm', Qt.QPixmap.fromImage(im.transformed(flip)))
setattr(self, 'low_'+icon+'_tt', icon[0].capitalize() + icon[1:].replace('_', ' ') + ' reached.')
setattr(self, 'high_'+icon+'_tt', icon[0].capitalize() + icon[1:].replace('_', ' ') + ' reached.')
class FocusLossSignalingLineEdit(Qt.QLineEdit):
focus_lost = Qt.pyqtSignal()
def focusOutEvent(self, event):
super().focusOutEvent(event)
self.focus_lost.emit()
def sizeHint(self):
hint = super().sizeHint()
hint.setWidth(self.fontMetrics().width('44.57749') * 1.3)
return hint
| mit | -6,965,134,118,459,684,000 | 45.769539 | 129 | 0.597395 | false |
Antreasgr/Random-Graphs | Python/SHET.py | 1 | 8516 | import os
# import networkx as nx
import numpy
from numpy.random import RandomState
from clique_tree import *
from nx_converters import *
from randomizer import *
from subtrees import *
from datetime import datetime
from Runners import *
from report_generator import *
from enum import Enum
import yaml
from yaml import Loader, Dumper
# from joblib import Parallel, delayed
# import plotter
"""
Create a random chordal graph
"""
def tree_generation(n_vert, rand):
"""
Creates a random tree on n nodes
and create the adjacency lists for each node
"""
tree = [TreeNode(0)]
for uid in range(0, n_vert - 1):
parent, _ = rand.next_element(tree)
newnode = TreeNode(uid + 1)
# update the adjacency lists
newnode.Ax.append(parent)
parent.Ax.append(newnode)
parent.Dx[newnode] = len(parent.Ax) - 1
newnode.Dx[parent] = len(newnode.Ax) - 1
# update helper, children list, parent pointer
parent.children.append(newnode)
newnode.parent = parent
# append to tree
tree.append(newnode)
return tree
def chordal_generation(run, rand):
"""
Generate a random chordal graph with n vertices, k is the algorithm parameter
"""
k = run["Parameters"]["k"]
n = run["Parameters"]["n"]
version = run["Parameters"]["version"]
if 2 * k - 1 > n:
raise Exception("chordal gen parameter k must be lower than n/2")
print("Begin Run ".center(70, "-"))
print("Parameters: ")
formatstr = ''
listvalues = [str(v) for v in run["Parameters"].values()]
listkeys = list(run["Parameters"].keys())
for ii, par in enumerate(listvalues):
formatstr += '{' + str(ii) + ':>' + str(max(len(par), len(listkeys[ii])) + 1) + '} |'
print(formatstr.format(*listkeys))
print(formatstr.format(*listvalues))
print("Times: ".center(70, "-"))
with Timer("t_real_tree", run["Times"]):
tree = tree_generation(n, rand)
with Timer("t_subtrees_2", run["Times"]):
if version == SHETVersion.ConnectedNodes:
connected_nodes(tree, n, rand)
elif version == SHETVersion.PrunedTree:
fraction = run["Parameters"]["edge_fraction"]
barier = run["Parameters"]["barier"]
for sub_tree_index in range(n):
pruned_tree(tree, n, sub_tree_index, fraction, barier, rand)
else:
for node in tree:
node.s = 0
for subtree_index in range(0, n):
sub_tree_gen(tree, k, subtree_index, rand, version)
# convert to networkx, our main algorithm
with Timer("t_ctree", run["Times"]):
nx_chordal, final_cforest = convert_clique_tree_networkx2(tree, n, True)
run["Graphs"]["tree"] = tree
run["Graphs"]["nx_chordal"] = nx_chordal
run["Graphs"]["final_cforest"] = final_cforest
print("End Run".center(70, "-"))
def post_process(run):
out = run["Output"]
graphs = run["Graphs"]
stats = run["Stats"]
times = run["Times"]
# get number of conected components
# stats["ncc"] = nx.number_connected_components(graphs["nx_chordal"])
# calculate time, and ratios
stats["total"] = times["t_real_tree"] + times["t_subtrees_2"] + times["t_ctree"]
# stats["ratio[total/chordal]"] = stats["total"] / float(times["t_chordal"])
# stats["ratio[total/forest]"] = stats["total"] / float(times["t_forestverify"])
# stats["ratio[total/[chordal+forest]]"] = stats["total"] / float(times["t_forestverify"] + times["t_chordal"])
# get output parameters
out["nodes"] = run["Parameters"]["n"] # len(graphs["nx_chordal"].nodes())
out["edges"] = graphs["nx_chordal"].size() # len(graphs["nx_chordal"].edges())
stats["edge_density"] = float(out["edges"]) / (float(out["nodes"] * (out["nodes"] - 1)) / 2)
temp_forest = cForest(1)
temp_forest.ctree.append(graphs["tree"])
# calculate tree output parameters
out["clique_trees"] = [dfs_forest(graphs["final_cforest"], run["Parameters"]["n"])]
ct_stats = out["clique_trees"][0]
ct_stats.max_clique_edge_distribution = (ct_stats.max_size * (ct_stats.max_size - 1) / 2) / out["edges"]
stats["ncc"] = len(graphs["final_cforest"].ctree)
# convert clique forest to nx for export to json
nx_ctrees = None # [convert_tree_networkx(tree) for tree in graphs["final_cforest"].ctree]
# nx_ctrees.insert(0, convert_tree_networkx(graphs["tree"]))
return nx_ctrees
def run_SHET_PRUNED(list_vertices, list_f_s, num_runs):
shet_data = []
for j, num in enumerate(list_vertices):
for f, s in list_f_s[j]:
Runners = []
for i in range(num_runs):
randomizer = Randomizer(2 * num)
Runners.append(runner_factory(num, NAME, None, k=0, edge_fraction=f, barier=s, version=SHETVersion.PrunedTree))
chordal_generation(Runners[-1], randomizer)
trees1 = post_process(Runners[-1])
Runners[-1]["Stats"]["randoms"] = randomizer.total_count
# cleanup some memory
del Runners[-1]["Graphs"]
print(".....Done")
shet_data.append(merge_runners(Runners))
run_reports_data(NAME, shet_data)
def run_normal_SHET(list_vertices, list_k, num_runs):
shet_data = []
for j, num in enumerate(list_vertices):
for factor in list_k[j]:
Runners = []
par_k = int(num * factor)
par_k = max(1, par_k)
par_k = min(num // 2, par_k)
for i in range(num_runs):
randomizer = Randomizer(2 * num)
Runners.append(runner_factory(num, NAME, None, k=par_k, version=SHETVersion.Dict))
chordal_generation(Runners[-1], randomizer)
trees1 = post_process(Runners[-1])
Runners[-1]["Parameters"]["k/n"] = str(par_k / num)
Runners[-1]["Stats"]["randoms"] = randomizer.total_count
# cleanup some memory
del Runners[-1]["Graphs"]
print(".....Done")
# # RUNNER contains all data and statistics
# filename = "Results/SHET/Run_{}_{}_{}.yml".format(num, par_k, datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
# if not os.path.isdir(os.path.dirname(filename)):
# os.makedirs(os.path.dirname(filename))
# with io.open(filename, 'w') as file:
# print_statistics(Runners, file)
shet_data.append(merge_runners(Runners))
run_reports_data(NAME, shet_data)
def run_SHET_Connected_Nodes(list_vertices, list_lamda, num_runs):
shet_data = []
for j, num in enumerate(list_vertices):
for l in list_lamda[j]:
Runners = []
for i in range(num_runs):
randomizer = Randomizer(2 * num)
Runners.append(runner_factory(num, NAME, None, k=0, lamda=l, version=SHETVersion.ConnectedNodes))
chordal_generation(Runners[-1], randomizer)
trees1 = post_process(Runners[-1])
Runners[-1]["Stats"]["randoms"] = randomizer.total_count
# cleanup some memory
del Runners[-1]["Graphs"]
print(".....Done")
shet_data.append(merge_runners(Runners))
run_reports_data(NAME, shet_data)
NAME = "SHET_CNODES"
if __name__ == '__main__':
NUM_VERTICES = [50, 100, 500, 1000, 2500, 5000, 10000]
PAR_K_FACTOR = [
[0.03, 0.1, 0.2, 0.32, 0.49], # 50
[0.04, 0.1, 0.22, 0.33, 0.49], # 100
[0.02, 0.05, 0.08, 0.2, 0.40], # 500
[0.02, 0.05, 0.08, 0.18, 0.33], # 1000
[0.01, 0.04, 0.07, 0.13, 0.36], # 2500
[0.01, 0.04, 0.07, 0.1, 0.36], # 5000
[0.009, 0.03, 0.06, 0.09, 0.33] # 10000
]
PAR_F_S_PRUNED = [
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 50
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 100
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 500
[(0.7, 0.6), (0.14, 0.85), (0.1, 0.93)], # 1000
[(0.7, 0.7), (0.12, 0.9), (0.077, 0.95)], # 2500
[(0.700, 0.75), (0.080, 0.91), (0.045, 0.96)], # 5000
[(0.70, 0.81), (0.060, 0.93), (0.031, 0.96)] # 10000
]
PAR_L = [[0], [0], [0], [0], [0], [0], [0]]
# run_SHET_PRUNED(NUM_VERTICES, PAR_F_S_PRUNED, 3)
# run_normal_SHET(num_runs, PAR_K_FACTOR, 10)
run_SHET_Connected_Nodes(NUM_VERTICES, PAR_L, 5)
| mit | 1,985,836,268,281,842,400 | 34.781513 | 127 | 0.564937 | false |
lmiphay/gentoo-oam | oam/eventparser/scanner.py | 1 | 1799 | #!/usr/bin/python
from __future__ import print_function
import sys
import os
import subprocess
import logging
import glob
import collections
import unittest
import re
class Scanner:
def __init__(self, report, checker):
self.report = report
self.checker = checker
self.logger = logging.getLogger("oam.eventparser.scanner")
def parse(self):
for chk in self.checker:
self.in_block = False
for line, i in self.report.scan():
match = re.search(chk.RECORD, line)
self.logger.log(logging.INFO, "line: %d, %s", i, line)
if match:
self.logger.log(logging.INFO, "parse-match: %s", str(match.groups()))
self.groups = match.groups()
if self.groups[0]:
self.in_block = True
elif self.groups[-1]:
self.in_block = False
elif self.in_block:
ev = chk.process(line, match)
if ev: yield ev
if self.in_block:
self.report.consume(i, chk.TAG)
if chk.ev != None: yield chk.ev
class ScannerTestCase(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("oam.eventparser.scanner.test")
def test_scanner(self):
pass
if __name__ == '__main__':
if len(sys.argv)==1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
unittest.main()
else:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
sys.exit(EventParser(sys.argv[1]).run())
| gpl-2.0 | 8,609,256,467,794,436,000 | 30.017241 | 89 | 0.526959 | false |
nsmoooose/csp | csp/data/ui/tutorials/mission.py | 1 | 2231 | #!/usr/bin/python
# Combat Simulator Project
# Copyright (C) 2002-2009 The Combat Simulator Project
# http://csp.sourceforge.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Combat Simulator Project : Tutorial mission base interface
"""
import csp.cspsim
from csp.data.ui.scripts.gamescreenmanager import GameScreenManager
from csp.data.ui.scripts.utils import SlotManager
class Mission:
"""Base class for all tutorial missions."""
def __init__(self, cspsim):
self.cspsim = cspsim
def name(self):
"""Provides a name to the mission. Usually a name that comes from
a localized xml resource. Example: ${tutorials_takeoff}"""
return "No name set"
def describingUI(self):
"""Name of an xml file that contains the graphical user interface
for this mission. Must be implemented by all tutorials."""
raise NotImplementedError, "Implement this interface to be able " + \
"to show a describing user interface for the mission."
def theatre(self):
# Informs about what theatre to load.
return "No theatre set"
def startMission(self):
windowManager = self.cspsim.getWindowManager()
windowManager.closeAll()
self.cspsim.displayLogoScreen()
self.cspsim.loadSimulation()
self.modifyTheatreForMission()
gameScreenManager = GameScreenManager(self.cspsim)
self.onStart()
def modifyTheatreForMission(self):
pass
def onStart(self):
pass
| gpl-2.0 | 3,707,970,281,633,007,600 | 32.298507 | 77 | 0.688929 | false |
cdeil/gammalib | inst/cta/test/test_irf_offset.py | 1 | 5601 | #! /usr/bin/env python
# ===========================================================================================#
# This script tests the offset angle dependence of the instrumental response function.
#
# ===========================================================================================#
from gammalib import *
# ====================== #
# Set point source model #
# ====================== #
def ptsrc_model(ra=0.0, dec=0.0):
"""
Set shell model.
"""
# Set shell centre
pos = GSkyDir()
pos.radec_deg(ra, dec)
# Set spatial model
spatial = GModelSpatialPtsrc(pos)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelPointSource(spatial, spectral)
# Return model
return model
# =============== #
# Set shell model #
# =============== #
def shell_model(ra=0.3, dec=0.3, radius=0.3, width=0.1):
"""
Set shell model.
"""
# Set shell centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialShell(center, radius, width, False)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# =============== #
# Set disk model #
# =============== #
def disk_model(ra=359.6, dec=-0.2, radius=0.4):
"""
Set disk model.
"""
# Set disk centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialDisk(center, radius)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# ================== #
# Set Gaussian model #
# ================== #
def gauss_model(ra=359.6, dec=+0.1, sigma=0.2):
"""
Set Gaussian model.
"""
# Set Gaussian centre
center = GSkyDir()
center.radec_deg(ra, dec)
# Set radial model
radial = GModelRadialGauss(center, sigma)
# Set spectral model
spectral = GModelSpectralPlaw(1.0, -2.0)
# Set sky model
model = GModelExtendedSource(radial, spectral)
# Return model
return model
# ========================== #
# Set binned CTA observation #
# ========================== #
def observation(ra=0.0, dec=0.0, binsz=0.05, npix=200, ebins=10):
"""
Set binned CTA observation.
"""
# Allocate observation
obs = GCTAObservation()
# Set response
obs.response("kb_E_50h_v3", "../caldb")
# Set pointing
dir = GSkyDir()
pnt = GCTAPointing()
dir.radec_deg(ra, dec)
pnt.dir(dir)
obs.pointing(pnt)
# Set
ebounds = GEbounds()
emin = GEnergy()
emax = GEnergy()
emin.TeV(0.1)
emax.TeV(100.0)
ebounds.setlog(emin, emax, ebins)
gti = GGti()
tmin = GTime()
tmax = GTime()
tmin.met(0.0)
tmax.met(1800.0)
gti.append(tmin, tmax)
map = GSkymap("CAR", "CEL", ra, dec, -binsz, binsz, npix, npix, ebins)
cube = GCTAEventCube(map, ebounds, gti)
obs.events(cube)
# Optionally show observation
# print obs
# Return observation
return obs
# ================ #
# Create model map #
# ================ #
def modmap(obs, models, phi=0, theta=0, filename="modmap.fits"):
"""
Create model map.
"""
# Loop over all bins
for bin in obs.events():
# Cast to CTA bin
bin = cast_GCTAEventBin(bin)
# Set bin energy and time as source energy and time (no dispersion)
srcDir = bin.dir()
srcEng = bin.energy()
srcTime = bin.time()
# Compute IRF
irf = 0.0
for model in models:
irf += obs.response().irf(bin, model, srcEng, srcTime, obs) * bin.size()
# Set bin
bin.counts(irf)
# Save observation
obs.save(filename, True)
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
"""
Test offset angle dependence of IRF.
"""
# Dump header
print
print "***************************************"
print "* Test offset angle dependence of IRF *"
print "***************************************"
# Set set
set = 2
# Set CTA observation
obs = observation()
print obs
# Set offset angle range
# offsets = [0.0, 1.0, 2.0, 3.0]
offsets = [0.0]
# Loop over offset angles
for offset in offsets:
# Set models
if set == 1:
model1 = ptsrc_model(ra=0.0, dec=offset)
model2 = ptsrc_model(ra=1.0, dec=0.0)
model3 = ptsrc_model(ra=2.0, dec=0.0)
model4 = ptsrc_model(ra=3.0, dec=0.0)
model5 = ptsrc_model(ra=4.0, dec=0.0)
models = [model1, model2, model3, model4, model5]
elif set == 2:
model1 = disk_model(ra=0.0, dec=offset)
model2 = disk_model(ra=1.0, dec=0.0)
model3 = disk_model(ra=2.0, dec=0.0)
model4 = disk_model(ra=3.0, dec=0.0)
model5 = disk_model(ra=4.0, dec=0.0)
models = [model1, model2, model3, model4, model5]
# model = shell_model(ra=0.0, dec=offset)
# model = disk_model(ra=0.0, dec=offset)
# model = gauss_model(ra=0.0, dec=offset)
# Print model
# print model
# Set filename
filename = "modmap_theta%2.2d.fits" % (int(offset * 10.0))
# Create model map
modmap(obs, models, phi=0.0, theta=0.0, filename=filename)
| gpl-3.0 | 4,921,560,824,327,337,000 | 22.834043 | 94 | 0.518657 | false |
Ehnonymoose/delver | server/delver.py | 1 | 2586 | from flask import Flask, request, jsonify, send_from_directory, make_response
from functools import wraps, update_wrapper
from datetime import datetime
from sqlalchemy import and_, or_
from sqlalchemy.sql import select
app = Flask(__name__)
from database import db_session
import query
import models
import json
NUM_RESULTS_PER_QUERY = 15
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
from models import Card, CardPrinting, Set
def serializeCard(card, printing, cardSet):
data = {
'name': card.name,
'layout': card.layout,
'related': [x.name for x in card.related],
'mana': card.manaCost,
'cmc': card.cmc,
'types': card.types,
'rules': card.rules
}
if card.power is not None:
data['power'] = card.power
if card.toughness is not None:
data['toughness'] = card.toughness
if card.loyalty is not None:
data['loyalty'] = card.loyalty
if printing.flavor is not None:
data['flavor'] = printing.flavor
if printing.rarity is not None:
data['rarity'] = printing.rarity
return data
@app.route("/query")
def handleQuery():
tokens = query.parse(request.args.get('q', ''))
print(tokens)
start = request.args.get('start', 0)
try:
start = int(start)
except:
start = 0
print (start)
clauses = query.generateClauses(tokens)
statement = and_(*clauses)
sql = db_session.query(models.Card, models.CardPrinting, models.Set)\
.join(models.CardPrinting).join(models.Set)\
.filter(statement)\
.group_by(models.Card.id).order_by(models.Card.name)
# Get a count of all results
count = sql.count()
# Now get a selection of results
if start > 0:
sql = sql.offset(start)
sql = sql.limit(NUM_RESULTS_PER_QUERY)
print(sql)
results = sql.all()
serializedResults = [ serializeCard(*result) for result in results ]
results = {
'count': count,
'start': start,
'cards': serializedResults
}
return json.dumps(results)
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache, view)
@app.route('/', defaults={'path':'index.html'})
@app.route('/<path:path>')
@nocache
def main(path):
return send_from_directory('public', path)
if __name__ == "__main__":
app.run()
| mit | 8,030,225,006,713,413,000 | 20.371901 | 119 | 0.678268 | false |
hlt-bme-hu/eval-embed | translate.py | 1 | 5706 | import sys
import numpy
from collections import defaultdict
from itertools import chain
import argparse
def renormalize(M):
M /= numpy.linalg.norm(M, axis=1)[:, None]
return
def renormalize_vector(v):
return v / numpy.linalg.norm(v)
def outer(l1, l2):
return list(chain(*[[(x,y) for x in l1] for y in l2]))
def read_embed(file, word_list):
n, dim = map(int, file.readline().strip().split())
W = []
V = defaultdict(list)
i2w = {}
i = 0
multi = False
for line in file:
parts = line.strip().split()
if len(word_list) == 0 or parts[0] in word_list:
W.append(map(float, parts[1:]))
V[parts[0]].append(i)
i2w[i] = parts[0]
if not multi and len(V[parts[0]]) > 1:
multi = True
i += 1
return numpy.array(W), dict(V), i2w, multi
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("type", type=str, choices=["train", "test"])
parser.add_argument("embed1")
parser.add_argument("embed2")
parser.add_argument("seed_dict")
parser.add_argument("--train-mode", dest="train_mode", default="single",
choices=["single", "first", "all"])
parser.add_argument("-n", dest="n", default=5, type=int,
help="number of examples shown")
parser.add_argument("--verbose", help="writes translation examples to stderr",
action="store_true")
parser.add_argument("--fit", dest="fit", type=str, default="lin",
help="seeks for linear or orthogonal transformation",
choices=['lin', 'ortho'])
parser.add_argument("--normalize", default=False, action="store_true",
help="normalizes embedding before fitting the translation matrix")
args = parser.parse_args()
seed_list = [tuple(line.strip().split()) for line in open(args.seed_dict, "r")]
if args.type == "train":
lang1_words = [pair[0] for pair in seed_list]
lang2_words = [pair[1] for pair in seed_list]
else:
if args.verbose:
lang1_words = []
lang2_words = []
else:
lang1_words = [pair[0] for pair in seed_list]
lang2_words = []
W1, V1, i2w1, multi1 = read_embed(open(args.embed1), lang1_words)
W2, V2, i2w2, multi2 = read_embed(open(args.embed2), lang2_words)
if args.type == "train":
M1 = numpy.zeros((0, W1.shape[1]))
M2 = numpy.zeros((0, W2.shape[1]))
if args.train_mode == "single":
if multi1 or multi2:
print >>sys.stderr, "Not a single prototype embedding!"
exit(1)
train_pairs = [(V1[s], V2[t]) for s, t in seed_list if s in V1 and t in V2]
if args.train_mode == "first":
train_pairs = [(p1[0], p2[0]) for p1, p2 in train_pairs]
else:
train_pairs = list(chain(*[outer(p1, p2) for p1, p2 in train_pairs]))
lang1_indices, lang2_indices = zip(*train_pairs)
M1 = W1[lang1_indices, :]
M2 = W2[lang2_indices, :]
if args.normalize:
renormalize(M1)
renormalize(M2)
if args.fit == "lin":
T = numpy.linalg.lstsq(M1, M2)[0]
else:
M=M1.transpose().dot(M2)
U, s, V = numpy.linalg.svd(M, full_matrices=True)
T=U.dot(V)
numpy.savetxt(sys.stdout, T)
else:
T = numpy.loadtxt(sys.stdin)
renormalize(W2)
seed_dict = defaultdict(set)
for source, target in seed_list:
seed_dict[source].add(target)
seed_dict = dict(seed_dict)
for source, targets in seed_dict.iteritems():
weak_hit = W2.shape[0]
weak_answers = list(chain(*[V2[t] for t in targets if t in V2]))
strong_hits = [W2.shape[0]] * len(targets)
strong_answers = [V2[t] if t in V2 else [] for t in targets]
if source in V1:
for s in V1[source]:
translated = renormalize_vector(W1[s].dot(T))
scores = W2.dot(translated)
indices = numpy.argsort(scores)[::-1]
if args.verbose:
closest = (numpy.argsort(W1.dot(W1[s]))[::-1])[:args.n]
for c in closest:
print >>sys.stderr, i2w1[c],
print >>sys.stderr, "->",
for t in indices[:args.n]:
print >>sys.stderr, i2w2[t],
print >>sys.stderr, "|",
for a in targets:
print >>sys.stderr, a,
print >>sys.stderr
if len(weak_answers) > 0:
this_weak_hit = min(list(indices).index(t) for t in weak_answers)
if this_weak_hit < weak_hit:
weak_hit = this_weak_hit
for j in range(len(targets)):
if len(strong_answers[j]) > 0:
this_strong_hit = min(list(indices).index(t) for t in strong_answers[j])
if this_strong_hit < strong_hits[j]:
strong_hits[j] = this_strong_hit
for strong_hit, target in zip(*[strong_hits, targets]):
print weak_hit + 1, strong_hit + 1, source, target
| lgpl-3.0 | 7,749,965,516,054,434,000 | 38.625 | 100 | 0.502454 | false |
wilg64/MarkovTweet | markovtweet.py | 1 | 5870 | import tweepy
import json
import re
import time
import random
def create_api(config_filename):
"""
Creates an authorized tweepy API object given a config file containing
appropriate twitter application keys
:param config_filename: string containing the config filename
:return: the tweepy API object associated with the authorized twitter
application
"""
with open(config_filename) as api_keys:
keys = json.load(api_keys)['twitter']
api_key = keys['API Key']
secret_key = keys['API Secret']
access_tok = keys['Access Token']
access_tok_sec = keys['Access Token Secret']
auth = tweepy.OAuthHandler(api_key,secret_key)
auth.set_access_token(access_tok, access_tok_sec)
api = tweepy.API(auth)
return api
def limit_handled(cursor):
"""
Function to handle api call limits. When limit is reached, the function
will wait 15 minutes before iterating. From Tweepy website
:param cursor:
:return:
"""
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(15 * 60)
def tokenize(tweet):
"""
Uses regular expressions to tokenize tweets
:param tweet: the text of a given tweet
:return: the tokenization of that tweet as a list
"""
emoticons_str = r"""
(?:
[:=;] #
[oO\-]?
[D\)\]\(\]/\\OpP]
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
return tokens_re.findall(tweet)
class Markov_Chain:
def __init__(self):
self.mc = {}
class Probability_Distribution:
def __init__(self):
self.dist = {}
self.total = 0
def pick(self):
"""
Randomly returns a random token given the current distribution
:return: a random token from the distribution
"""
randnum = random.randrange(self.total)
currDex = 0
for token in self.dist:
currCnt = self.dist[token]
if randnum < currCnt + currDex:
return token
currDex += currCnt
def update(self, token):
"""
Increment the probability of encountering a certain token
:param token: a string containing the token
"""
if token in self.dist:
self.dist[token] += 1
else:
self.dist[token] = 1
self.total += 1
def update_markov_chain(self, tokens):
"""
Updates the markov structure with a new tokenized tweet
:param tokens: list of strings from tokenized tweet
"""
for i in range(1,len(tokens)):
if tokens[i-1] in self.mc:
self.mc[tokens[i-1]].update(tokens[i])
else:
self.mc[tokens[i-1]] = self.Probability_Distribution()
self.mc[tokens[i-1]].update(tokens[i])
#need to account for final token
if i == len(tokens) - 1:
if tokens[i] in self.mc:
self.mc[tokens[i]].update('END_OF_TWEET')
else:
self.mc[tokens[i]] = self.Probability_Distribution()
self.mc[tokens[i]].update('END_OF_TWEET')
def train_on_tweets(self, api, ids, limit = -1):
"""
Trains the given markov chain on the given twitter handles
:param api: the authorized tweepy api object
:param ids: list of ids you'd like to train on
:param limit: limits the number of tweets, default no limit
:return:
"""
for user in ids:
if (limit > 0):
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id = user).items(limit)):
self.update_markov_chain(tokenize(tweet.text))
else:
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id = user).items()):
self.update_markov_chain(tokenize(tweet.text))
def save_markov_chain(self, filename):
"""
Serializes a markov chain into a JSON file
:param filename: string containing path
"""
with open(filename, 'w') as outfile:
json.dumps(self.mc)
def load_markov_chain(self, filename):
"""
Loads a previously trained markov chain from a json file
:param filename: string containing path
"""
with open(filename) as infile:
self.mc = json.load(infile)
def generate_next_token(self, token):
"""
Given a token, produces a likely next token
:param token:
:return:
"""
return self.mc[token].pick()
def generate_tweet(self, seed):
"""
Takes an intial word then generates a tweet string
:param seed: the initial word
:return: string containing generated tweet
"""
tweet = seed
while len(tweet) < 140:
try:
next = self.generate_next_token(seed)
if next == "END_OF_TWEET":
break
tweet += " " + next
seed = next
except KeyError:
print "Seed not present in the Markov Chain"
return ""
return tweet | mit | 7,790,373,430,743,105,000 | 27.5 | 101 | 0.529642 | false |
henryiii/rootpy | rootpy/plotting/utils.py | 1 | 14257 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from math import log
import operator
import ROOT
from .canvas import _PadBase
from .hist import _Hist, Hist, HistStack
from .graph import _Graph1DBase, Graph
from ..context import preserve_current_canvas, do_nothing
from ..extern.six.moves import range
__all__ = [
'draw',
'get_limits',
'get_band',
'canvases_with',
'find_all_primitives',
'tick_length_pixels',
]
def draw(plottables, pad=None, same=False,
xaxis=None, yaxis=None,
xtitle=None, ytitle=None,
xlimits=None, ylimits=None,
xdivisions=None, ydivisions=None,
logx=False, logy=False,
**kwargs):
"""
Draw a list of histograms, stacks, and/or graphs.
Parameters
----------
plottables : Hist, Graph, HistStack, or list of such objects
List of objects to draw.
pad : Pad or Canvas, optional (default=None)
The pad to draw onto. If None then use the current global pad.
same : bool, optional (default=False)
If True then use 'SAME' draw option for all objects instead of
all but the first. Use this option if you are drawing onto a pad
that already holds drawn objects.
xaxis : TAxis, optional (default=None)
Use this x-axis or use the x-axis of the first plottable if None.
yaxis : TAxis, optional (default=None)
Use this y-axis or use the y-axis of the first plottable if None.
xtitle : str, optional (default=None)
Set the x-axis title.
ytitle : str, optional (default=None)
Set the y-axis title.
xlimits : tuple, optional (default=None)
Set the x-axis limits with a 2-tuple of (min, max)
ylimits : tuple, optional (default=None)
Set the y-axis limits with a 2-tuple of (min, max)
xdivisions : int, optional (default=None)
Set the number of divisions for the x-axis
ydivisions : int, optional (default=None)
Set the number of divisions for the y-axis
logx : bool, optional (default=False)
If True, then set the x-axis to log scale.
logy : bool, optional (default=False)
If True, then set the y-axis to log scale.
kwargs : dict
All extra arguments are passed to get_limits when determining the axis
limits.
Returns
-------
(xaxis, yaxis), (xmin, xmax, ymin, ymax) : tuple
The axes and axes bounds.
See Also
--------
get_limits
"""
context = preserve_current_canvas if pad else do_nothing
if not isinstance(plottables, (tuple, list)):
plottables = [plottables]
elif not plottables:
raise ValueError("plottables is empty")
with context():
if pad is not None:
pad.cd()
# get the axes limits
xmin, xmax, ymin, ymax = get_limits(plottables,
logx=logx, logy=logy,
**kwargs)
if xlimits is not None:
xmin, xmax = xlimits
if ylimits is not None:
ymin, ymax = ylimits
if not same:
obj = plottables.pop(0)
if isinstance(obj, ROOT.THStack):
obj.SetMinimum(ymin)
obj.SetMaximum(ymax)
obj.Draw()
xaxis = obj.xaxis
yaxis = obj.yaxis
# draw the plottables
for i, obj in enumerate(plottables):
if i == 0 and isinstance(obj, ROOT.THStack):
# use SetMin/Max for y-axis
obj.SetMinimum(ymin)
obj.SetMaximum(ymax)
# ROOT: please fix this...
obj.Draw('SAME')
# set the axes limits and titles
if xaxis is not None:
xaxis.SetLimits(xmin, xmax)
xaxis.SetRangeUser(xmin, xmax)
if xtitle is not None:
xaxis.SetTitle(xtitle)
if xdivisions is not None:
xaxis.SetNdivisions(xdivisions)
if yaxis is not None:
yaxis.SetLimits(ymin, ymax)
yaxis.SetRangeUser(ymin, ymax)
if ytitle is not None:
yaxis.SetTitle(ytitle)
if ydivisions is not None:
yaxis.SetNdivisions(ydivisions)
if pad is None:
pad = ROOT.gPad.func()
pad.SetLogx(bool(logx))
pad.SetLogy(bool(logy))
# redraw axes on top
# axes ticks sometimes get hidden by filled histograms
pad.RedrawAxis()
return (xaxis, yaxis), (xmin, xmax, ymin, ymax)
multiadd = lambda a, b: map(operator.add, a, b)
multisub = lambda a, b: map(operator.sub, a, b)
def _limits_helper(x1, x2, a, b, snap=False):
"""
Given x1, x2, a, b, where:
x1 - x0 x3 - x2
a = ------- , b = -------
x3 - x0 x3 - x0
determine the points x0 and x3:
x0 x1 x2 x3
|----------|-----------------|--------|
"""
if x2 < x1:
raise ValueError("x2 < x1")
if a + b >= 1:
raise ValueError("a + b >= 1")
if a < 0:
raise ValueError("a < 0")
if b < 0:
raise ValueError("b < 0")
if snap:
if x1 >= 0:
x1 = 0
a = 0
elif x2 <= 0:
x2 = 0
b = 0
if x1 == x2 == 0:
# garbage in garbage out
return 0., 1.
elif x1 == x2:
# garbage in garbage out
return x1 - 1., x1 + 1.
if a == 0 and b == 0:
return x1, x2
elif a == 0:
return x1, (x2 - b * x1) / (1 - b)
elif b == 0:
return (x1 - a * x2) / (1 - a), x2
x0 = ((b / a) * x1 + x2 - (x2 - x1) / (1 - a - b)) / (1 + b / a)
x3 = (x2 - x1) / (1 - a - b) + x0
return x0, x3
def get_limits(plottables,
xpadding=0,
ypadding=0.1,
xerror_in_padding=True,
yerror_in_padding=True,
snap=True,
logx=False,
logy=False,
logx_crop_value=1E-5,
logy_crop_value=1E-5,
logx_base=10,
logy_base=10):
"""
Get the axes limits that should be used for a 1D histogram, graph, or stack
of histograms.
Parameters
----------
plottables : Hist, Graph, HistStack, or list of such objects
The object(s) for which visually pleasing plot boundaries are
requested.
xpadding : float or 2-tuple, optional (default=0)
The horizontal padding as a fraction of the final plot width.
ypadding : float or 2-tuple, optional (default=0.1)
The vertical padding as a fraction of the final plot height.
xerror_in_padding : bool, optional (default=True)
If False then exclude the x error bars from the calculation of the plot
width.
yerror_in_padding : bool, optional (default=True)
If False then exclude the y error bars from the calculation of the plot
height.
snap : bool, optional (default=True)
Make the minimum or maximum of the vertical range the x-axis depending
on if the plot maximum and minimum are above or below the x-axis. If
the plot maximum is above the x-axis while the minimum is below the
x-axis, then this option will have no effect.
logx : bool, optional (default=False)
If True, then the x-axis is log scale.
logy : bool, optional (default=False)
If True, then the y-axis is log scale.
logx_crop_value : float, optional (default=1E-5)
If an x-axis is using a logarithmic scale then crop all non-positive
values with this value.
logy_crop_value : float, optional (default=1E-5)
If the y-axis is using a logarithmic scale then crop all non-positive
values with this value.
logx_base : float, optional (default=10)
The base used for the logarithmic scale of the x-axis.
logy_base : float, optional (default=10)
The base used for the logarithmic scale of the y-axis.
Returns
-------
xmin, xmax, ymin, ymax : tuple of plot boundaries
The computed x and y-axis ranges.
"""
try:
import numpy as np
use_numpy = True
except ImportError:
use_numpy = False
if not isinstance(plottables, (list, tuple)):
plottables = [plottables]
xmin = float('+inf')
xmax = float('-inf')
ymin = float('+inf')
ymax = float('-inf')
for h in plottables:
if isinstance(h, HistStack):
h = h.sum
if not isinstance(h, (_Hist, _Graph1DBase)):
raise TypeError(
"unable to determine plot axes ranges "
"from object of type `{0}`".format(
type(h)))
if use_numpy:
y_array_min = y_array_max = np.array(list(h.y()))
if yerror_in_padding:
y_array_min = y_array_min - np.array(list(h.yerrl()))
y_array_max = y_array_max + np.array(list(h.yerrh()))
_ymin = y_array_min.min()
_ymax = y_array_max.max()
else:
y_array_min = y_array_max = list(h.y())
if yerror_in_padding:
y_array_min = multisub(y_array_min, list(h.yerrl()))
y_array_max = multiadd(y_array_max, list(h.yerrh()))
_ymin = min(y_array_min)
_ymax = max(y_array_max)
if isinstance(h, _Graph1DBase):
if use_numpy:
x_array_min = x_array_max = np.array(list(h.x()))
if xerror_in_padding:
x_array_min = x_array_min - np.array(list(h.xerrl()))
x_array_max = x_array_max + np.array(list(h.xerrh()))
_xmin = x_array_min.min()
_xmax = x_array_max.max()
else:
x_array_min = x_array_max = list(h.x())
if xerror_in_padding:
x_array_min = multisub(x_array_min, list(h.xerrl()))
x_array_max = multiadd(x_array_max, list(h.xerrh()))
_xmin = min(x_array_min)
_xmax = max(x_array_max)
else:
_xmin = h.xedgesl(1)
_xmax = h.xedgesh(h.nbins(0))
if logy:
_ymin = max(logy_crop_value, _ymin)
_ymax = max(logy_crop_value, _ymax)
if logx:
_xmin = max(logx_crop_value, _xmin)
_xmax = max(logx_crop_value, _xmax)
if _xmin < xmin:
xmin = _xmin
if _xmax > xmax:
xmax = _xmax
if _ymin < ymin:
ymin = _ymin
if _ymax > ymax:
ymax = _ymax
if isinstance(xpadding, (list, tuple)):
if len(xpadding) != 2:
raise ValueError("xpadding must be of length 2")
xpadding_left = xpadding[0]
xpadding_right = xpadding[1]
else:
xpadding_left = xpadding_right = xpadding
if isinstance(ypadding, (list, tuple)):
if len(ypadding) != 2:
raise ValueError("ypadding must be of length 2")
ypadding_top = ypadding[0]
ypadding_bottom = ypadding[1]
else:
ypadding_top = ypadding_bottom = ypadding
if logx:
x0, x3 = _limits_helper(
log(xmin, logx_base), log(xmax, logx_base),
xpadding_left, xpadding_right)
xmin = logx_base ** x0
xmax = logx_base ** x3
else:
xmin, xmax = _limits_helper(
xmin, xmax, xpadding_left, xpadding_right)
if logy:
y0, y3 = _limits_helper(
log(ymin, logy_base), log(ymax, logy_base),
ypadding_bottom, ypadding_top, snap=False)
ymin = logy_base ** y0
ymax = logy_base ** y3
else:
ymin, ymax = _limits_helper(
ymin, ymax, ypadding_bottom, ypadding_top, snap=snap)
return xmin, xmax, ymin, ymax
def get_band(low_hist, high_hist, middle_hist=None):
"""
Convert the low and high histograms into a TGraphAsymmErrors centered at
the middle histogram if not None otherwise the middle between the low and
high points, to be used to draw a (possibly asymmetric) error band.
"""
npoints = low_hist.nbins(0)
band = Graph(npoints)
for i in range(npoints):
center = low_hist.x(i + 1)
width = low_hist.xwidth(i + 1)
low, high = low_hist.y(i + 1), high_hist.y(i + 1)
if middle_hist is not None:
middle = middle_hist.y(i + 1)
else:
middle = (low + high) / 2.
yerrh = max(high - middle, low - middle, 0)
yerrl = abs(min(high - middle, low - middle, 0))
band.SetPoint(i, center, middle)
band.SetPointError(i, width / 2., width / 2.,
yerrl, yerrh)
return band
def canvases_with(drawable):
"""
Return a list of all canvases where `drawable` has been painted.
Note: This function is inefficient because it inspects all objects on all
canvases, recursively. Avoid calling it if you have a large number of
canvases and primitives.
"""
return [c for c in ROOT.gROOT.GetListOfCanvases()
if drawable in find_all_primitives(c)]
def find_all_primitives(pad):
"""
Recursively find all primities on a pad, even those hiding behind a
GetListOfFunctions() of a primitive
"""
result = []
for primitive in pad.GetListOfPrimitives():
result.append(primitive)
if hasattr(primitive, "GetListOfFunctions"):
result.extend(primitive.GetListOfFunctions())
if hasattr(primitive, "GetHistogram"):
p = primitive.GetHistogram()
if p:
result.append(p)
if isinstance(primitive, ROOT.TPad):
result.extend(find_all_primitives(primitive))
return result
def tick_length_pixels(pad, xaxis, yaxis, xlength, ylength=None):
"""
Set the axes tick lengths in pixels
"""
if ylength is None:
ylength = xlength
xaxis.SetTickLength(xlength / float(pad.height_pixels))
yaxis.SetTickLength(ylength / float(pad.width_pixels))
| gpl-3.0 | 2,577,957,560,046,515,000 | 30.966368 | 79 | 0.556779 | false |
q3k/kasownik | webapp/models.py | 1 | 13156 | #!/usr/bin/env python2
# - * - coding=utf-8 - * -
# Copyright (c) 2015, Sergiusz Bazanski <[email protected]>
# Copyright (c) 2015, Remigiusz Marcinkiewicz <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
import enum
import json
import re
from sqlalchemy.orm import subqueryload_all
from flask import g
from webapp import app, db, mc, cache_enabled
import directory
class APIKey(db.Model):
id = db.Column(db.Integer, primary_key=True)
secret = db.Column(db.String(64))
member = db.Column(db.Integer, db.ForeignKey("member.id"))
description = db.Column(db.Text)
class MemberTransfer(db.Model):
__tablename__ = "member_transfer"
id = db.Column(db.Integer, primary_key=True)
member = db.Column(db.Integer, db.ForeignKey("member.id"))
transfer_id = db.Column(db.Integer, db.ForeignKey("transfer.id"))
year = db.Column(db.Integer)
month = db.Column(db.Integer)
transfer = db.relationship("Transfer", backref="member_transfers")
def __init__(self, _id, year, month, transfer):
self.id = _id
self.year = year
self.month = month
self.transfer = transfer
class PaymentStatus(enum.Enum):
never_paid = 1 # never paid membership fees
unpaid = 2 # more than 3 fees unapid
okay = 3 # fees paid
class PaymentPolicy(enum.Enum):
normal = "Normal"
extended = "Extended Grace Period"
potato = "Potato"
disabled = "Disabled"
class MembershipType(enum.Enum):
fatty = "Fatty"
starving = "Starving"
class Member(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True)
type = db.Column(db.Enum("starving", "fatty", name="member_types"))
transfers = db.relationship("MemberTransfer",order_by=[db.asc(MemberTransfer.year), db.asc(MemberTransfer.month)])
# old field
active = db.Column(db.Boolean)
api_keys = db.relationship("APIKey")
join_year = db.Column(db.Integer)
join_month = db.Column(db.Integer)
ldap_username = db.Column(db.String(64), unique=True)
# Normal - standard 3 months grace period
# Extended Grace Period - do not shut off account after grace period
# Potato - do not ever shut off account, report falsified payment status
# Disabled - manual disable override, regardless of payment extra
payment_policy = db.Column(db.Enum(*[p.value for p in PaymentPolicy.__members__.values()],
name='payment_policy_types'))
preferred_email = db.Column(db.String(64))
def mt_covers(self, mt):
"""For transfer view - given an mt, should we rowspan?"""
if mt not in self.transfers:
return None
ix = self.transfers.index(mt)
if ix != 0:
# check if the previous mt was covered by the same transfer
if self.transfers[ix-1].transfer.uid == mt.transfer.uid:
return None
# check how many next mts use the same transfer
rowspan = 0
for ix2 in range(ix+1, len(self.transfers)):
if self.transfers[ix2].transfer.uid == mt.transfer.uid:
rowspan += 1
else:
break
if rowspan == 0:
return None
else:
return rowspan + 1
@classmethod
def get_members(kls, deep=False):
"""Gets all members as an SQLAlchemy query.
@param(deep) - whether to do a subqueryload_all and load all transfer data
"""
if deep:
return kls.query.options(subqueryload_all(kls.transfers,
MemberTransfer.transfer)).order_by(kls.username)
else:
return kls.query.order_by(kls.username)
def _yearmonth_increment(self, ym):
y, m = ym
y2, m2 = y, m+1
if m2 > 12:
y2 += 1
m2 = 1
return (y2, m2)
def _yearmonth_scalar(self, ym):
y, m = ym
return y * 12 + (m - 1)
def _get_status_uncached(self):
now_date = datetime.datetime.now()
now = now_date.year * 12 + (now_date.month - 1)
del now_date
status = {}
status['ldap_username'] = self.ldap_username
status['username'] = self.username
status['type'] = self.type
status['payment_policy'] = self.payment_policy
# First check - did we actually get any transfers?
if not self.transfers or self.transfers[0].transfer.uid == app.config['DUMMY_TRANSFER_UID']:
status['payment_status'] = PaymentStatus.never_paid.value
status['months_due'] = None
status['last_paid'] = (None, None)
if self.join_year is not None and self.join_month is not None:
status['joined'] = (self.join_year, self.join_month)
status['next_unpaid'] = self._yearmonth_increment(status['joined'])
else:
status['joined'] = (None, None)
status['next_unpaid'] = (None, None)
status['left'] = False
self._apply_judgement(status)
return status
# Use the join date from SQL, if available
if self.join_year is not None and self.join_month is not None:
joined = (self.join_year, self.join_month)
else:
joined = (self.transfers[0].year, self.transfers[0].month)
joined_scalar = self._yearmonth_scalar(joined)
status['joined'] = joined
most_recent_transfer = (0, 0)
unpaid_months = 0
# Iterate over all payments and figure out how much months are unpaid
previous_transfer = (0, 0)
previous_uid = None
active_payment = True
for mt in self.transfers:
this_transfer = (mt.year, mt.month)
this_scalar = self._yearmonth_scalar(this_transfer)
this_uid = mt.transfer.uid
previous_scalar = self._yearmonth_scalar(previous_transfer)
most_recent_scalar = self._yearmonth_scalar(most_recent_transfer)
# Is this transfer a „not a member anymore” transfer?
if this_uid == app.config['DUMMY_TRANSFER_UID']:
active_payment = False
continue
# Is this the first transfer? See if it was done on time
if previous_uid is None:
unpaid_months += (this_scalar - joined_scalar)
# Apply any missing payments
if active_payment and previous_uid is not None:
unpaid_months += (this_scalar - previous_scalar) - 1
# Is this the most recent payment?
if this_scalar > most_recent_scalar:
most_recent_scalar = this_scalar
most_recent_transfer = this_transfer
active_payment = True
previous_transfer = this_transfer
previous_uid = this_uid
# Apply missing payments from now
if active_payment:
previous_scalar = self._yearmonth_scalar(previous_transfer)
unpaid_months += (now - previous_scalar)
status['months_due'] = unpaid_months
status['payment_status'] = PaymentStatus.okay.value if unpaid_months < 4 else PaymentStatus.unpaid.value
status['last_paid'] = most_recent_transfer
status['left'] = not active_payment
if not active_payment:
status['next_unpaid'] = (None, None)
else:
status['next_unpaid'] = self._yearmonth_increment(status['last_paid'])
self._apply_judgement(status)
return status
def get_list_email(self):
if self.preferred_email:
return self.preferred_email
return '{}@hackerspace.pl'.format(self.ldap_username)
def get_contact_email(self):
if self.preferred_email:
return self.preferred_email
mra = directory.get_member_fields(g.ldap, self.ldap_username,
'mailRoutingAddress')
mra = mra['mailRoutingAddress']
if mra:
return mra
else:
return '{}@hackerspace.pl'.format(self.ldap_username)
def get_status(self):
"""It's better to call this after doing a full select of data."""
cache_key = 'kasownik-payment_status-{}'.format(self.username)
cache_data = mc.get(cache_key)
if cache_data and cache_enabled:
data = json.loads(cache_data)
return data
else:
cache_data = self._get_status_uncached()
mc.set(cache_key, json.dumps(cache_data))
return cache_data
def _apply_judgement(self, status):
"""Check your priviledge, you cisnormative shitlord!"""
if status['left']:
status['judgement'] = False
return
policy = status['payment_policy']
if policy == 'Normal':
if status['payment_status'] == PaymentStatus.okay.value and status['last_paid'][0] is not None:
status['judgement'] = True
else:
status['judgement'] = False
elif policy == 'Extended Grace Period':
status['judgement'] = True
elif policy == 'Potato':
status['judgement'] = True
status['months_due'] = 0
else:
status['judgement'] = False
def get_months_due(self):
status = self.get_status()
return status['months_due']
def get_last_paid(self):
status = self.get_status()
return status['last_paid']
def get_next_unpaid(self):
status = self.get_status()
return status['next_unpaid']
def __init__(self, _id, _username, _type, _active):
self.id = _id
self.username = _username
self.type = _type
self.active = _active
now_date = datetime.datetime.now()
self.join_year = now_date.year
self.join_month = now_date.month
self.ldap_username = _username
self.payment_policy = PaymentPolicy.normal.value
class Transfer(db.Model):
id = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.String(128))
account_from = db.Column(db.String(32))
name_from = db.Column(db.String(256))
amount = db.Column(db.Integer)
title = db.Column(db.String(256))
date = db.Column(db.Date)
ignore = db.Column(db.Boolean)
def __init__(self, _id, _uid, _account_from, _name_from, _amount, _title, _date, _ignore):
self.id = _id
self.uid = _uid
self.account_from = _account_from
self.name_from = _name_from
self.amount = _amount
self.title = _title
self.date = _date
self.ignore = _ignore
def get_short_uid(self):
return self.uid[:16]
def parse_title(self):
m = re.match(ur"^([a-z0-9\-_\.]+) *\- *(fatty|starving|superfatty) *\- *([0-9a-z\-_ąężźćóżłśń \(\),/\.]+$)", self.title.strip().lower())
if not m:
return (None, None, None)
member, _type, title = m.group(1), m.group(2), m.group(3)
if title in [u"składka", u"opłata", u"opłata miesięczna", "skladka"]:
return (member, _type, None)
return member, _type, title
MATCH_OK, MATCH_WRONG_TYPE, MATCH_NO_USER, MATCH_UNPARSEABLE = range(4)
def get_matchability(self):
title = self.parse_title()
if not title[0]:
return self.MATCH_UNPARSEABLE, self.title
member_name = title[0]
member = Member.query.filter_by(username=member_name).first()
if not member:
return self.MATCH_NO_USER, member_name
if (title[1] == 'starving' and self.amount > 50) or (title[1] == 'fatty' and self.amount > 100):
return self.MATCH_WRONG_TYPE, member
if title[2]:
return self.MATCH_WRONG_TYPE, member
return self.MATCH_OK, member
| bsd-2-clause | -2,691,253,247,546,642,400 | 35.80112 | 145 | 0.607931 | false |
perrette/pyglacier | pyglacier/plotting.py | 1 | 1106 | import matplotlib.pyplot as plt
#
# plotting
#
def plot_elevation(ds, ax=None):
if ax is None:
ax = plt.gca()
ds['hs'].plot(ax=ax,label="surface")
ds['hb'].plot(ax=ax,label="bottom")
# add horizontal line to indicate sea level
ax.hlines(0, ds.x[0], ds.x[-1], linestyle='dashed', color='black')
ds['zb'].plot(ax=ax, color='black', linewidth=2, label="bedrock") # add bedrock
ax.legend(frameon=False, loc="upper right")
def plot_velocity(ds, ax=None):
if ax is None:
ax = plt.gca()
ds = ds.copy()
u = 'u' if 'u' in ds else 'U'
ds[u] = ds[u]*3600*24
ds[u].plot(ax=ax)
ax.set_ylabel('velocity [m/d]')
def plot_glacier(ds):
fig,axes=plt.subplots(2,1,sharex=True)
ax=axes[0]
plot_elevation(ds, ax)
ax=axes[1]
plot_velocity(ds, ax)
ax.set_xlim([ds.x[0], ds.x[-1]])
return fig, axes
def plot_stress(ds):
_v = ["driving", "lat", "long", "basal", "residual"]
try:
ds = ds.take(_v)
except KeyError:
ds = ds.take([k + '_stress' for k in _v])
return ds.to_array(axis='stress').T.plot()
| mit | -7,186,009,462,917,936,000 | 25.97561 | 83 | 0.577758 | false |
samzhang111/wikipedia-jargon | all-subjects/make_tf_differences.py | 1 | 2815 | from __future__ import print_function
import msgpack
import sys
import os
from collections import defaultdict
from helpers import text_dict_to_term_dict
from WikiExtractor import clean, compact
import pandas as pd
def remove_wikipedia_markup(text):
return compact(clean(text.decode('utf8')))
def print_help_and_exit(msg=''):
if msg:
print('Error: {}\n'.format(msg))
print('Usage: python make_tf_differences.py [n-grams] [path to directory]')
print('The directory should contain files output by grab_texts.py')
sys.exit(1)
if len(sys.argv) <= 2:
print_help_and_exit()
##############################################################
# Read in msgpack files, separating them from simple and en Wikipedia
##############################################################
ngrams = int(sys.argv[1])
text_dir = sys.argv[2]
only = sys.argv[3:]
print('Only calculating for: ', only)
try:
files = os.listdir(text_dir)
except OSError:
print_help_and_exit()
##############################################################
# Organize the text files by subject, then wiki (en or simple)
##############################################################
file_dict = defaultdict(dict)
for f in files:
try:
subject, wiki, _ = f.split('_')
if only and subject not in only:
continue
file_dict[subject][wiki] = f
except ValueError:
print_help_and_exit('Text directory does not contain valid filenames')
for subject in file_dict:
print('Importing ', subject)
with open(os.path.join(text_dir, file_dict[subject]['en'])) as f:
en_text = msgpack.load(f)
en_text = {k: remove_wikipedia_markup(v) for k,v in en_text.items()}
with open(os.path.join(text_dir, file_dict[subject]['simple'])) as f:
sm_text = msgpack.load(f)
sm_text = {k: remove_wikipedia_markup(v) for k,v in sm_text.items()}
print('Calculating term differences')
en_tf, en_counts = text_dict_to_term_dict(en_text, ngrams)
sm_tf, sm_counts = text_dict_to_term_dict(sm_text, ngrams)
sm_terms = set(sm_tf)
en_terms = set(en_tf)
term_differences = {}
for t in sm_terms.union(en_terms):
term_differences[t] = en_tf[t] - sm_tf[t]
sorted_term_difference = sorted(term_differences.items(),
key=lambda x: x[1])
print('Outputting term differences')
td_df = pd.DataFrame(sorted_term_difference, columns=['term',
'term_difference'])
td_df['en_tf'] = td_df.term.apply(lambda x: en_tf[x])
td_df['sm_tf'] = td_df.term.apply(lambda x: sm_tf[x])
try:
os.mkdir('data/term-diffs/ngrams-{}'.format(ngrams))
except OSError:
pass
td_df.to_csv('data/term-diffs/ngrams-{}/{}_td.csv'.format(ngrams, subject),
index=False, encoding='utf8')
| gpl-3.0 | 5,142,175,881,754,362,000 | 30.988636 | 79 | 0.588988 | false |
fkie-cad/iva | local_repositories/tasks/datetime_utils.py | 1 | 1476 | from datetime import datetime, timedelta
TIME_FORMAT = '%H:%M:%S'
def calculate_task_execution_timeout(task_time):
current_datetime = datetime.now()
current_time = get_time_from_datetime(current_datetime)
return calculate_delta_time(task_time, current_time)
def calculate_task_next_execution_datetime(task_time):
current_datetime = get_current_datetime()
current_time = get_time_from_datetime(current_datetime)
if get_time_object(current_time) >= get_time_object(task_time):
current_datetime = add_one_day(current_datetime)
return update_time_in_datetime(current_datetime, task_time)
def get_current_datetime():
return datetime.now()
def calculate_delta_time(time_a_str, time_b_str):
delta_time = (get_time_object(time_a_str) - get_time_object(time_b_str)).seconds
if delta_time > 0:
return delta_time
return 60
def get_time_object(time_a):
return datetime.strptime(time_a, TIME_FORMAT)
def get_time_from_datetime(datetime_):
return datetime_.strftime(TIME_FORMAT)
def verify_time_format(time_str):
try:
datetime.strptime(time_str, TIME_FORMAT)
return True
except ValueError:
return False
def update_time_in_datetime(datetime_, time_str):
time_object = get_time_object(time_str)
return datetime_.replace(hour=time_object.hour, minute=time_object.minute, second=time_object.second)
def add_one_day(datetime_):
return datetime_ + timedelta(days=1) | lgpl-3.0 | 7,140,560,285,524,920,000 | 26.867925 | 105 | 0.708672 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/effective_network_security_group_py3.py | 1 | 2060 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2018_01_01.models.SubResource
:param association: Associated resources.
:type association:
~azure.mgmt.network.v2018_01_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2018_01_01.models.EffectiveNetworkSecurityRule]
:param tag_map: Mapping of tags to list of IP Addresses included within
the tag.
:type tag_map: dict[str, list[str]]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
'tag_map': {'key': 'tagMap', 'type': '{[str]}'},
}
def __init__(self, *, network_security_group=None, association=None, effective_security_rules=None, tag_map=None, **kwargs) -> None:
super(EffectiveNetworkSecurityGroup, self).__init__(**kwargs)
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
self.tag_map = tag_map
| mit | -5,176,465,326,651,729,000 | 44.777778 | 136 | 0.658252 | false |
raymondanthony/youtube-dl | youtube_dl/extractor/pornotube.py | 1 | 1760 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
unified_strdate,
)
class PornotubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
_TEST = {
'url': 'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
'md5': '374dd6dcedd24234453b295209aa69b6',
'info_dict': {
'id': '1689755',
'ext': 'flv',
'upload_date': '20090708',
'title': 'Marilyn-Monroe-Bathing',
'age_limit': 18
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
video_title = mobj.group('title')
# Get webpage content
webpage = self._download_webpage(url, video_id)
# Get the video URL
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
video_url = compat_urllib_parse.unquote(video_url)
# Get the uploaded date
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
if upload_date:
upload_date = unified_strdate(upload_date)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'url': video_url,
'upload_date': upload_date,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'age_limit': age_limit,
}
| unlicense | 507,547,985,690,037,800 | 30.428571 | 119 | 0.546591 | false |
szepeviktor/courier-pythonfilter-custom | email-correct.py | 1 | 9398 | #!/usr/bin/python
# file: email-correct.py
# -*- coding: utf-8 -*-
import os
import sys
import email
import email.charset
import email.encoders
from email.header import Header
from email.utils import getaddresses
from email.utils import formataddr
from email.utils import parseaddr
from email.utils import make_msgid
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import courier.control
from courier.xfilter import XFilter
from courier.xfilter import XFilterError
from lxml import etree
import html2text
__VERSION__ = '1.10'
SELF = 'email-correct'
DEFAULT_CHARSET = 'ISO-8859-2'
debug = False
#TODO
# unknown encoding: scz-1171-1 - check all .encode() and .decode()
# Courier 0.60.0 /etc/courier/bofh
#opt MIME=none
#opt BOFHBADMIME=accept
# check etree version >= 3
#from lxml import etree; etree.__version__
def is_nonascii(string):
return isinstance(string, basestring) and any(ord(c) & 0x80 for c in string)
def check_preamble(msg, corrected):
if msg.preamble is not None and is_nonascii(msg.preamble):
corrected += ['PREAMBLE']
u_preamble = unicode(msg.preamble, DEFAULT_CHARSET)
msg.preamble = u_preamble.encode('ascii', 'replace')
def check_msgid(msg, corrected):
# rfc2822
if msg.get('message-id') is None:
if msg.get('from') is None:
domain = 'msgid.missing'
else:
name, email = parseaddr(msg.get('from'))
domain = email.split('@')[1]
corrected += ['MESSAGE_ID']
msg['Message-ID'] = make_msgid(domain)
def check_mimeversion(msg, corrected):
# rfc2045
if msg.get('mime-version') is None and (msg.is_multipart() or msg.get('content-transfer-encoding') is not None):
corrected += ['MIME_VERSION']
msg['MIME-Version'] = '1.0'
def check_encoding(part, corrected):
if (part['content-transfer-encoding'] is None or part['content-transfer-encoding'] != '8bit') and is_nonascii(part.get_payload()):
corrected += ['7BIT_TO_8BIT']
del part['content-transfer-encoding']
part['Content-Transfer-Encoding'] = '8bit'
def check_addresses(part, corrected, charset):
# https://tools.ietf.org/html/rfc5504#section-3.2
for header in ('From', 'Sender', 'To', 'Cc', 'Bcc', 'Reply-To', 'Resent-From', 'Resent-Sender', 'Resent-To', 'Resent-Cc', 'Resent-Bcc', 'Resent-Reply-To', 'Return-Path', 'Disposition-Notification-To'):
addresses = part.get_all(header)
if addresses is None:
continue
del part[header]
if len(addresses) > 1:
corrected += ['MULTI_' + header.upper()]
for addressline in addresses:
addrlist = getaddresses([addressline])
new_addrlist = []
for (name, addr) in addrlist:
if is_nonascii(name):
corrected += [header.upper()]
new_name = Header(name, charset, errors='replace').encode().encode('ascii', 'replace')
new_addrlist += [(new_name, addr)]
else:
new_addrlist += [(name, addr)]
part[header] = ', '.join(map(formataddr, new_addrlist))
def is_invalid_header(value):
if value and not isinstance(value, tuple) and is_nonascii(value):
return True
return False
def check_headers(part, corrected, charset):
subject = part['Subject']
if is_invalid_header(subject):
corrected += ['SUBJECT']
part.replace_header('subject', Header(subject, charset).encode().encode('ascii', 'replace'))
maildate = part['Date']
if is_invalid_header(maildate):
corrected += ['DATE']
part.replace_header('date', Header(maildate, charset).encode().encode('ascii', 'replace'))
# indamail.hu problem
mailgeoip = part['X-GeoIP']
if is_invalid_header(mailgeoip):
corrected += ['GEOIP']
part.replace_header('x-geoip', Header(mailgeoip, charset).encode().encode('ascii', 'replace'))
charset = part.get_content_charset() or charset
# attachments
value = part.get_param('name')
if is_invalid_header(value):
corrected += ['NAME']
value = Header(value, charset).encode().encode('ascii', 'replace')
part.set_param('name', value)
value = part.get_param('filename', header='content-disposition')
if is_invalid_header(value):
corrected += ['FILENAME']
value = Header(value, charset).encode().encode('ascii', 'replace')
part.set_param('filename', value, 'Content-Disposition')
def check_htmlonly(msg, corrected):
# Skip if multipart or Content-Type is not HTML
if msg.is_multipart() or msg.get('content-type') is None or msg.get('content-type').split(';')[0].strip().lower() != 'text/html':
return msg
###FIXME How to detect multipart messages without plain text part?
email.charset.add_charset('utf-8', email.charset.QP, email.charset.QP, 'utf-8')
###TODO Messages without <head> should get <base href="http://<FROM_DOMAIN>/"> for relative links.
charset = msg.get_content_charset() or DEFAULT_CHARSET
# New message with alternative multipart MIME-level
new_msg = MIMEMultipart('alternative')
# Loop through the original message's headers and copy those to the new one (except two headers)
for (key, value) in msg.items():
if key.lower() not in ['content-type', 'content-disposition']:
new_msg[key] = value
payload = msg.get_payload(decode=True)
###FIXME Encode (QP) every header line of all parts
### with non-decodable (by Content-Type: <CHARSET>) character
# https://docs.python.org/2/library/email.message.html#email.message.Message.defects
parser = etree.HTMLParser(encoding=str(charset), recover=True)
dom_tree = etree.fromstring(payload, parser)
if debug:
etree.dump(dom_tree, pretty_print=True)
output = etree.tostring(dom_tree, pretty_print=True, method='html')
# Revert to UNICODE
html_payload = output.decode('utf-8')
try:
text_payload = html2text.html2text(html_payload)
except Exception as error:
# English - Hungarian
text_payload = 'No text part - Nincs szoveges resz'
pid = str(os.getpid())
sys.stderr.write(SELF + '[' + pid + '] Exception in html2text: %s; %s; charset=%s\n' % (str(type(error)), str(error), str(charset)))
bf = open('/tmp/' + SELF + '_bodyFile.' + pid, 'w')
# Only the non-convertable (broken) HTML
#bf.write(msg.as_string())
# The whole original message
bf.write(output)
bf.close()
# Creating two MIME parts keeping the character set
part1 = MIMEText(text_payload.encode(str(charset), 'replace'), 'plain', charset)
part2 = MIMEText(html_payload.encode(str(charset), 'replace'), 'html', charset)
part1['Content-Disposition'] = 'inline'
part2['Content-Disposition'] = 'inline'
part1['Content-Description'] = 'Plaintext Version of Message'
part2['Content-Description'] = 'HTML Version of Message'
# Attaching the parts to the new message
new_msg.preamble = 'This is a MIME-formatted message. If you see this text it means that your\nE-mail software does not support MIME-formatted messages.\n'
new_msg.attach(part1)
new_msg.attach(part2)
corrected += ['HTMLONLY']
return new_msg
def initFilter():
# No variables for this module yes
###TODO e.g. DEFAULT_CHARSET, path for exception body files
#courier.config.applyModuleConfig(SELF, globals())
sys.stderr.write('Initialized the "' + SELF + '" ' + __VERSION__ + ' python filter\n')
def doFilter(bodyFile, controlFileList):
corrected = []
try:
xf = XFilter(SELF, bodyFile, controlFileList)
except XFilterError:
sys.stderr.write(SELF + ': Loop + exit\n')
return ''
pid = str(os.getpid())
# Representing an email message:
# https://docs.python.org/2/library/email.message.html
msg = xf.getMessage()
if debug:
to = msg['to']
else:
tolist = courier.control.getRecipientsData(controlFileList)
if tolist is not None:
to = tolist[0][0]
check_preamble(msg, corrected)
check_msgid(msg, corrected)
check_mimeversion(msg, corrected)
for part in msg.walk():
charset = part.get_charset() or part.get_param('charset') or DEFAULT_CHARSET
check_encoding(part, corrected)
check_addresses(part, corrected, charset)
check_headers(part, corrected, charset)
msg = check_htmlonly(msg, corrected)
if corrected:
msg.set_param('corrected', ','.join(corrected), 'X-Email-Correct')
msg.set_param('version', __VERSION__, 'X-Email-Correct')
xf.setMessage(msg)
try:
xf.submit()
except Exception as error:
sys.stderr.write(SELF + '[' + pid + '] Exception in XFilter.submit: %s; %s\n' % (str(type(error)), str(error)))
bf = open('/tmp/' + SELF + '_bodyFile2.' + pid, 'w')
bf.write(msg.as_string())
bf.close()
sys.stderr.write(SELF + '[' + pid + '] To: ' + to + ' corrected=' + ','.join(corrected) + '\n')
elif debug:
sys.stderr.write(SELF + '[' + pid + '] To: ' + to + ' correct\n')
return ''
if __name__ == '__main__':
debug = True
initFilter()
doFilter(sys.argv[1], sys.argv[2:])
| mit | -9,201,266,120,644,209,000 | 36.146245 | 205 | 0.632581 | false |
gift-surg/GIFT-Grab | src/tests/files/conftest.py | 1 | 2581 | from pytest import fixture
from pygiftgrab import Codec, ColourSpace
def pytest_addoption(parser):
parser.addoption('--colour-space', action='store', type=str, required=True,
help='Colour space specification (BGRA or I420)')
parser.addoption('--filepath', action='store', type=str, required=True,
help='Video file to use')
parser.addoption('--frame-rate', action='store', type=float, required=True,
help='Frame rate of video file')
parser.addoption('--frame-count', action='store', type=int, required=True,
help='No. of frames in video file')
parser.addoption('--frame-width', action='store', type=int, required=True,
help='Width of frames in video file')
parser.addoption('--frame-height', action='store', type=int, required=True,
help='Height of frames in video file')
@fixture(scope='session')
def colour_space(request):
colour_space = request.config.getoption('--colour-space')
case_insensitive = colour_space.lower()
if case_insensitive == 'bgra':
return ColourSpace.BGRA
elif case_insensitive == 'i420':
return ColourSpace.I420
elif case_insensitive == 'uyvy':
return ColourSpace.UYVY
else:
raise RuntimeError('Could not recognise colour space ' +
colour_space)
@fixture(scope='session')
def filepath(request):
filepath = request.config.getoption('--filepath')
if not filepath:
raise RuntimeError('Filepath cannot be empty')
else:
return filepath
@fixture(scope='session')
def frame_rate(request):
frame_rate = float(request.config.getoption('--frame-rate'))
if frame_rate <= 0:
raise RuntimeError('Frame rate must be positive')
else:
return frame_rate
@fixture(scope='session')
def frame_count(request):
frame_count = int(request.config.getoption('--frame-count'))
if frame_count <= 0:
raise RuntimeError('Frame count must be positive')
else:
return frame_count
@fixture(scope='session')
def frame_width(request):
frame_width = int(request.config.getoption('--frame-width'))
if frame_width <= 0:
raise RuntimeError('Frame width must be positive')
else:
return frame_width
@fixture(scope='session')
def frame_height(request):
frame_height = int(request.config.getoption('--frame-height'))
if frame_height <= 0:
raise RuntimeError('Frame height must be positive')
else:
return frame_height | bsd-3-clause | -3,027,531,557,023,826,400 | 32.532468 | 79 | 0.642774 | false |
kzys/buildbot | buildbot/test/unit/test_util.py | 1 | 1861 | # -*- test-case-name: buildbot.test.test_util -*-
from twisted.trial import unittest
from buildbot import util
class Foo(util.ComparableMixin):
compare_attrs = ["a", "b"]
def __init__(self, a, b, c):
self.a, self.b, self.c = a,b,c
class Bar(Foo, util.ComparableMixin):
compare_attrs = ["b", "c"]
class Compare(unittest.TestCase):
def testCompare(self):
f1 = Foo(1, 2, 3)
f2 = Foo(1, 2, 4)
f3 = Foo(1, 3, 4)
b1 = Bar(1, 2, 3)
self.failUnless(f1 == f2)
self.failIf(f1 == f3)
self.failIf(f1 == b1)
class test_checkRepoURL(unittest.TestCase):
def assertUrl(self, real_url, expected_url):
new_url = util.remove_userpassword(real_url)
self.assertEqual(expected_url, new_url)
def test_url_with_no_user_and_password(self):
self.assertUrl('http://myurl.com/myrepo', 'http://myurl.com/myrepo')
def test_url_with_user_and_password(self):
self.assertUrl('http://myuser:[email protected]/myrepo', 'http://myurl.com/myrepo')
def test_another_url_with_no_user_and_password(self):
self.assertUrl('http://myurl2.com/myrepo2', 'http://myurl2.com/myrepo2')
def test_another_url_with_user_and_password(self):
self.assertUrl('http://myuser2:[email protected]/myrepo2', 'http://myurl2.com/myrepo2')
def test_with_different_protocol_without_user_and_password(self):
self.assertUrl('ssh://myurl3.com/myrepo3', 'ssh://myurl3.com/myrepo3')
def test_with_different_protocol_with_user_and_password(self):
self.assertUrl('ssh://myuser3:[email protected]/myrepo3', 'ssh://myurl3.com/myrepo3')
def test_file_path(self):
self.assertUrl('/home/me/repos/my-repo', '/home/me/repos/my-repo')
def test_win32file_path(self):
self.assertUrl('c:\\repos\\my-repo', 'c:\\repos\\my-repo')
| gpl-2.0 | 2,140,252,974,185,657,300 | 32.232143 | 96 | 0.639979 | false |
bcj/AttrDict | tests/test_common.py | 1 | 29231 | # encoding: UTF-8
"""
Common tests that apply to multiple Attr-derived classes.
"""
import copy
from collections import namedtuple, Mapping, ItemsView, KeysView, ValuesView
from itertools import chain
import pickle
from sys import version_info
from nose.tools import (assert_equals, assert_not_equals,
assert_true, assert_false, assert_raises)
import six
from attrdict.mixins import Attr
Options = namedtuple(
'Options',
('cls', 'constructor', 'mutable', 'iter_methods', 'view_methods',
'recursive')
)
class AttrImpl(Attr):
"""
An implementation of Attr.
"""
def __init__(self, items=None, sequence_type=tuple):
if items is None:
items = {}
elif not isinstance(items, Mapping):
items = dict(items)
self._mapping = items
self._sequence_type = sequence_type
def _configuration(self):
"""
The configuration for an attrmap instance.
"""
return self._sequence_type
def __getitem__(self, key):
"""
Access a value associated with a key.
"""
return self._mapping[key]
def __len__(self):
"""
Check the length of the mapping.
"""
return len(self._mapping)
def __iter__(self):
"""
Iterated through the keys.
"""
return iter(self._mapping)
def __getstate__(self):
"""
Serialize the object.
"""
return (self._mapping, self._sequence_type)
def __setstate__(self, state):
"""
Deserialize the object.
"""
mapping, sequence_type = state
self._mapping = mapping
self._sequence_type = sequence_type
@classmethod
def _constructor(cls, mapping, configuration):
"""
A standardized constructor.
"""
return cls(mapping, sequence_type=configuration)
def test_attr():
"""
Tests for an class that implements Attr.
"""
for test in common(AttrImpl, mutable=False):
yield test
def test_attrmap():
"""
Run AttrMap against the common tests.
"""
from attrdict.mapping import AttrMap
for test in common(AttrMap, mutable=True):
yield test
def test_attrdict():
"""
Run AttrDict against the common tests.
"""
from attrdict.dictionary import AttrDict
view_methods = (2, 7) <= version_info < (3,)
def constructor(items=None, sequence_type=tuple):
"""
Build a new AttrDict.
"""
if items is None:
items = {}
return AttrDict._constructor(items, sequence_type)
for test in common(AttrDict, constructor=constructor,
mutable=True, iter_methods=True,
view_methods=view_methods, recursive=False):
yield test
def test_attrdefault():
"""
Run AttrDefault against the common tests.
"""
from attrdict.default import AttrDefault
def constructor(items=None, sequence_type=tuple):
"""
Build a new AttrDefault.
"""
if items is None:
items = {}
return AttrDefault(None, items, sequence_type)
for test in common(AttrDefault, constructor=constructor, mutable=True):
yield test
def common(cls, constructor=None, mutable=False, iter_methods=False,
view_methods=False, recursive=True):
"""
Iterates over tests common to multiple Attr-derived classes
cls: The class that is being tested.
mutable: (optional, False) Whether the object is supposed to be
mutable.
iter_methods: (optional, False) Whether the class implements
iter<keys,values,items> under Python 2.
view_methods: (optional, False) Whether the class implements
view<keys,values,items> under Python 2.
recursive: (optional, True) Whether recursive assignment works.
"""
tests = (
item_access, iteration, containment, length, equality,
item_creation, item_deletion, sequence_typing, addition,
to_kwargs, pickling,
)
mutable_tests = (
pop, popitem, clear, update, setdefault, copying, deepcopying,
)
if constructor is None:
constructor = cls
options = Options(cls, constructor, mutable, iter_methods, view_methods,
recursive)
if mutable:
tests = chain(tests, mutable_tests)
for test in tests:
test.description = test.__doc__.format(cls=cls.__name__)
yield test, options
def item_access(options):
"""Access items in {cls}."""
mapping = options.constructor(
{
'foo': 'bar',
'_lorem': 'ipsum',
six.u('👻'): 'boo',
3: 'three',
'get': 'not the function',
'sub': {'alpha': 'bravo'},
'bytes': b'bytes',
'tuple': ({'a': 'b'}, 'c'),
'list': [{'a': 'b'}, {'c': 'd'}],
}
)
# key that can be an attribute
assert_equals(mapping['foo'], 'bar')
assert_equals(mapping.foo, 'bar')
assert_equals(mapping('foo'), 'bar')
assert_equals(mapping.get('foo'), 'bar')
# key that cannot be an attribute
assert_equals(mapping[3], 'three')
assert_raises(TypeError, getattr, mapping, 3)
assert_equals(mapping(3), 'three')
assert_equals(mapping.get(3), 'three')
# key that cannot be an attribute (sadly)
assert_equals(mapping[six.u('👻')], 'boo')
if six.PY2:
assert_raises(UnicodeEncodeError, getattr, mapping, six.u('👻'))
else:
assert_raises(AttributeError, getattr, mapping, six.u('👻'))
assert_equals(mapping(six.u('👻')), 'boo')
assert_equals(mapping.get(six.u('👻')), 'boo')
# key that represents a hidden attribute
assert_equals(mapping['_lorem'], 'ipsum')
assert_raises(AttributeError, lambda: mapping._lorem)
assert_equals(mapping('_lorem'), 'ipsum')
assert_equals(mapping.get('_lorem'), 'ipsum')
# key that represents an attribute that already exists
assert_equals(mapping['get'], 'not the function')
assert_not_equals(mapping.get, 'not the function')
assert_equals(mapping('get'), 'not the function')
assert_equals(mapping.get('get'), 'not the function')
# does recursion work
assert_raises(AttributeError, lambda: mapping['sub'].alpha)
assert_equals(mapping.sub.alpha, 'bravo')
assert_equals(mapping('sub').alpha, 'bravo')
assert_raises(AttributeError, lambda: mapping.get('sub').alpha)
# bytes
assert_equals(mapping['bytes'], b'bytes')
assert_equals(mapping.bytes, b'bytes')
assert_equals(mapping('bytes'), b'bytes')
assert_equals(mapping.get('bytes'), b'bytes')
# tuple
assert_equals(mapping['tuple'], ({'a': 'b'}, 'c'))
assert_equals(mapping.tuple, ({'a': 'b'}, 'c'))
assert_equals(mapping('tuple'), ({'a': 'b'}, 'c'))
assert_equals(mapping.get('tuple'), ({'a': 'b'}, 'c'))
assert_raises(AttributeError, lambda: mapping['tuple'][0].a)
assert_equals(mapping.tuple[0].a, 'b')
assert_equals(mapping('tuple')[0].a, 'b')
assert_raises(AttributeError, lambda: mapping.get('tuple')[0].a)
assert_true(isinstance(mapping['tuple'], tuple))
assert_true(isinstance(mapping.tuple, tuple))
assert_true(isinstance(mapping('tuple'), tuple))
assert_true(isinstance(mapping.get('tuple'), tuple))
assert_true(isinstance(mapping['tuple'][0], dict))
assert_true(isinstance(mapping.tuple[0], options.cls))
assert_true(isinstance(mapping('tuple')[0], options.cls))
assert_true(isinstance(mapping.get('tuple')[0], dict))
assert_true(isinstance(mapping['tuple'][1], str))
assert_true(isinstance(mapping.tuple[1], str))
assert_true(isinstance(mapping('tuple')[1], str))
assert_true(isinstance(mapping.get('tuple')[1], str))
# list
assert_equals(mapping['list'], [{'a': 'b'}, {'c': 'd'}])
assert_equals(mapping.list, ({'a': 'b'}, {'c': 'd'}))
assert_equals(mapping('list'), ({'a': 'b'}, {'c': 'd'}))
assert_equals(mapping.get('list'), [{'a': 'b'}, {'c': 'd'}])
assert_raises(AttributeError, lambda: mapping['list'][0].a)
assert_equals(mapping.list[0].a, 'b')
assert_equals(mapping('list')[0].a, 'b')
assert_raises(AttributeError, lambda: mapping.get('list')[0].a)
assert_true(isinstance(mapping['list'], list))
assert_true(isinstance(mapping.list, tuple))
assert_true(isinstance(mapping('list'), tuple))
assert_true(isinstance(mapping.get('list'), list))
assert_true(isinstance(mapping['list'][0], dict))
assert_true(isinstance(mapping.list[0], options.cls))
assert_true(isinstance(mapping('list')[0], options.cls))
assert_true(isinstance(mapping.get('list')[0], dict))
assert_true(isinstance(mapping['list'][1], dict))
assert_true(isinstance(mapping.list[1], options.cls))
assert_true(isinstance(mapping('list')[1], options.cls))
assert_true(isinstance(mapping.get('list')[1], dict))
# Nonexistent key
assert_raises(KeyError, lambda: mapping['fake'])
assert_raises(AttributeError, lambda: mapping.fake)
assert_raises(AttributeError, lambda: mapping('fake'))
assert_equals(mapping.get('fake'), None)
assert_equals(mapping.get('fake', 'bake'), 'bake')
def iteration(options):
"Iterate over keys/values/items in {cls}"
raw = {'foo': 'bar', 'lorem': 'ipsum', 'alpha': 'bravo'}
mapping = options.constructor(raw)
expected_keys = frozenset(('foo', 'lorem', 'alpha'))
expected_values = frozenset(('bar', 'ipsum', 'bravo'))
expected_items = frozenset(
(('foo', 'bar'), ('lorem', 'ipsum'), ('alpha', 'bravo'))
)
assert_equals(set(iter(mapping)), expected_keys)
actual_keys = mapping.keys()
actual_values = mapping.values()
actual_items = mapping.items()
if six.PY2:
for collection in (actual_keys, actual_values, actual_items):
assert_true(isinstance(collection, list))
assert_equals(frozenset(actual_keys), expected_keys)
assert_equals(frozenset(actual_values), expected_values)
assert_equals(frozenset(actual_items), expected_items)
if options.iter_methods:
actual_keys = mapping.iterkeys()
actual_values = mapping.itervalues()
actual_items = mapping.iteritems()
for iterable in (actual_keys, actual_values, actual_items):
assert_false(isinstance(iterable, list))
assert_equals(frozenset(actual_keys), expected_keys)
assert_equals(frozenset(actual_values), expected_values)
assert_equals(frozenset(actual_items), expected_items)
if options.view_methods:
actual_keys = mapping.viewkeys()
actual_values = mapping.viewvalues()
actual_items = mapping.viewitems()
# These views don't actually extend from collections.*View
for iterable in (actual_keys, actual_values, actual_items):
assert_false(isinstance(iterable, list))
assert_equals(frozenset(actual_keys), expected_keys)
assert_equals(frozenset(actual_values), expected_values)
assert_equals(frozenset(actual_items), expected_items)
# What happens if mapping isn't a dict
from attrdict.mapping import AttrMap
mapping = options.constructor(AttrMap(raw))
actual_keys = mapping.viewkeys()
actual_values = mapping.viewvalues()
actual_items = mapping.viewitems()
# These views don't actually extend from collections.*View
for iterable in (actual_keys, actual_values, actual_items):
assert_false(isinstance(iterable, list))
assert_equals(frozenset(actual_keys), expected_keys)
assert_equals(frozenset(actual_values), expected_values)
assert_equals(frozenset(actual_items), expected_items)
else: # methods are actually views
assert_true(isinstance(actual_keys, KeysView))
assert_equals(frozenset(actual_keys), expected_keys)
assert_true(isinstance(actual_values, ValuesView))
assert_equals(frozenset(actual_values), expected_values)
assert_true(isinstance(actual_items, ItemsView))
assert_equals(frozenset(actual_items), expected_items)
# make sure empty iteration works
assert_equals(tuple(options.constructor().items()), ())
def containment(options):
"Check whether {cls} contains keys"
mapping = options.constructor(
{'foo': 'bar', frozenset((1, 2, 3)): 'abc', 1: 2}
)
empty = options.constructor()
assert_true('foo' in mapping)
assert_false('foo' in empty)
assert_true(frozenset((1, 2, 3)) in mapping)
assert_false(frozenset((1, 2, 3)) in empty)
assert_true(1 in mapping)
assert_false(1 in empty)
assert_false('banana' in mapping)
assert_false('banana' in empty)
def length(options):
"Get the length of an {cls} instance"
assert_equals(len(options.constructor()), 0)
assert_equals(len(options.constructor({'foo': 'bar'})), 1)
assert_equals(len(options.constructor({'foo': 'bar', 'baz': 'qux'})), 2)
def equality(options):
"Equality checks for {cls}"
empty = {}
mapping_a = {'foo': 'bar'}
mapping_b = {'lorem': 'ipsum'}
constructor = options.constructor
assert_true(constructor(empty) == empty)
assert_false(constructor(empty) != empty)
assert_false(constructor(empty) == mapping_a)
assert_true(constructor(empty) != mapping_a)
assert_false(constructor(empty) == mapping_b)
assert_true(constructor(empty) != mapping_b)
assert_false(constructor(mapping_a) == empty)
assert_true(constructor(mapping_a) != empty)
assert_true(constructor(mapping_a) == mapping_a)
assert_false(constructor(mapping_a) != mapping_a)
assert_false(constructor(mapping_a) == mapping_b)
assert_true(constructor(mapping_a) != mapping_b)
assert_false(constructor(mapping_b) == empty)
assert_true(constructor(mapping_b) != empty)
assert_false(constructor(mapping_b) == mapping_a)
assert_true(constructor(mapping_b) != mapping_a)
assert_true(constructor(mapping_b) == mapping_b)
assert_false(constructor(mapping_b) != mapping_b)
assert_true(constructor(empty) == constructor(empty))
assert_false(constructor(empty) != constructor(empty))
assert_false(constructor(empty) == constructor(mapping_a))
assert_true(constructor(empty) != constructor(mapping_a))
assert_false(constructor(empty) == constructor(mapping_b))
assert_true(constructor(empty) != constructor(mapping_b))
assert_false(constructor(mapping_a) == constructor(empty))
assert_true(constructor(mapping_a) != constructor(empty))
assert_true(constructor(mapping_a) == constructor(mapping_a))
assert_false(constructor(mapping_a) != constructor(mapping_a))
assert_false(constructor(mapping_a) == constructor(mapping_b))
assert_true(constructor(mapping_a) != constructor(mapping_b))
assert_false(constructor(mapping_b) == constructor(empty))
assert_true(constructor(mapping_b) != constructor(empty))
assert_false(constructor(mapping_b) == constructor(mapping_a))
assert_true(constructor(mapping_b) != constructor(mapping_a))
assert_true(constructor(mapping_b) == constructor(mapping_b))
assert_false(constructor(mapping_b) != constructor(mapping_b))
assert_true(constructor((('foo', 'bar'),)) == {'foo': 'bar'})
def item_creation(options):
"Add a key-value pair to an {cls}"
if not options.mutable:
# Assignment shouldn't add to the dict
mapping = options.constructor()
try:
mapping.foo = 'bar'
except TypeError:
pass # may fail if setattr modified
else:
pass # may assign, but shouldn't assign to dict
def item():
"""
Attempt to add an item.
"""
mapping['foo'] = 'bar'
assert_raises(TypeError, item)
assert_false('foo' in mapping)
else:
mapping = options.constructor()
# key that can be an attribute
mapping.foo = 'bar'
assert_equals(mapping.foo, 'bar')
assert_equals(mapping['foo'], 'bar')
assert_equals(mapping('foo'), 'bar')
assert_equals(mapping.get('foo'), 'bar')
mapping['baz'] = 'qux'
assert_equals(mapping.baz, 'qux')
assert_equals(mapping['baz'], 'qux')
assert_equals(mapping('baz'), 'qux')
assert_equals(mapping.get('baz'), 'qux')
# key that cannot be an attribute
assert_raises(TypeError, setattr, mapping, 1, 'one')
assert_true(1 not in mapping)
mapping[2] = 'two'
assert_equals(mapping[2], 'two')
assert_equals(mapping(2), 'two')
assert_equals(mapping.get(2), 'two')
# key that represents a hidden attribute
def add_foo():
"add _foo to mapping"
mapping._foo = '_bar'
assert_raises(TypeError, add_foo)
assert_false('_foo' in mapping)
mapping['_baz'] = 'qux'
def get_baz():
"get the _foo attribute"
return mapping._baz
assert_raises(AttributeError, get_baz)
assert_equals(mapping['_baz'], 'qux')
assert_equals(mapping('_baz'), 'qux')
assert_equals(mapping.get('_baz'), 'qux')
# key that represents an attribute that already exists
def add_get():
"add get to mapping"
mapping.get = 'attribute'
assert_raises(TypeError, add_get)
assert_false('get' in mapping)
mapping['get'] = 'value'
assert_not_equals(mapping.get, 'value')
assert_equals(mapping['get'], 'value')
assert_equals(mapping('get'), 'value')
assert_equals(mapping.get('get'), 'value')
# rewrite a value
mapping.foo = 'manchu'
assert_equals(mapping.foo, 'manchu')
assert_equals(mapping['foo'], 'manchu')
assert_equals(mapping('foo'), 'manchu')
assert_equals(mapping.get('foo'), 'manchu')
mapping['bar'] = 'bell'
assert_equals(mapping.bar, 'bell')
assert_equals(mapping['bar'], 'bell')
assert_equals(mapping('bar'), 'bell')
assert_equals(mapping.get('bar'), 'bell')
if options.recursive:
recursed = options.constructor({'foo': {'bar': 'baz'}})
recursed.foo.bar = 'qux'
recursed.foo.alpha = 'bravo'
assert_equals(recursed, {'foo': {'bar': 'qux', 'alpha': 'bravo'}})
def item_deletion(options):
"Remove a key-value from to an {cls}"
if not options.mutable:
mapping = options.constructor({'foo': 'bar'})
# could be a TypeError or an AttributeError
try:
del mapping.foo
except TypeError:
pass
except AttributeError:
pass
else:
raise AssertionError('deletion should fail')
def item(mapping):
"""
Attempt to del an item
"""
del mapping['foo']
assert_raises(TypeError, item, mapping)
assert_equals(mapping, {'foo': 'bar'})
assert_equals(mapping.foo, 'bar')
assert_equals(mapping['foo'], 'bar')
else:
mapping = options.constructor(
{'foo': 'bar', 'lorem': 'ipsum', '_hidden': True, 'get': 'value'}
)
del mapping.foo
assert_false('foo' in mapping)
del mapping['lorem']
assert_false('lorem' in mapping)
def del_hidden():
"delete _hidden"
del mapping._hidden
try:
del_hidden()
except KeyError:
pass
except TypeError:
pass
else:
assert_false("Test raised the appropriate exception")
# assert_raises(TypeError, del_hidden)
assert_true('_hidden' in mapping)
del mapping['_hidden']
assert_false('hidden' in mapping)
def del_get():
"delete get"
del mapping.get
assert_raises(TypeError, del_get)
assert_true('get' in mapping)
assert_true(mapping.get('get'), 'value')
del mapping['get']
assert_false('get' in mapping)
assert_true(mapping.get('get', 'banana'), 'banana')
def sequence_typing(options):
"Does {cls} respect sequence type?"
data = {'list': [{'foo': 'bar'}], 'tuple': ({'foo': 'bar'},)}
tuple_mapping = options.constructor(data)
assert_true(isinstance(tuple_mapping.list, tuple))
assert_equals(tuple_mapping.list[0].foo, 'bar')
assert_true(isinstance(tuple_mapping.tuple, tuple))
assert_equals(tuple_mapping.tuple[0].foo, 'bar')
list_mapping = options.constructor(data, sequence_type=list)
assert_true(isinstance(list_mapping.list, list))
assert_equals(list_mapping.list[0].foo, 'bar')
assert_true(isinstance(list_mapping.tuple, list))
assert_equals(list_mapping.tuple[0].foo, 'bar')
none_mapping = options.constructor(data, sequence_type=None)
assert_true(isinstance(none_mapping.list, list))
assert_raises(AttributeError, lambda: none_mapping.list[0].foo)
assert_true(isinstance(none_mapping.tuple, tuple))
assert_raises(AttributeError, lambda: none_mapping.tuple[0].foo)
def addition(options):
"Adding {cls} to other mappings."
left = {
'foo': 'bar',
'mismatch': False,
'sub': {'alpha': 'beta', 'a': 'b'},
}
right = {
'lorem': 'ipsum',
'mismatch': True,
'sub': {'alpha': 'bravo', 'c': 'd'},
}
merged = {
'foo': 'bar',
'lorem': 'ipsum',
'mismatch': True,
'sub': {'alpha': 'bravo', 'a': 'b', 'c': 'd'}
}
opposite = {
'foo': 'bar',
'lorem': 'ipsum',
'mismatch': False,
'sub': {'alpha': 'beta', 'a': 'b', 'c': 'd'}
}
constructor = options.constructor
assert_raises(TypeError, lambda: constructor() + 1)
assert_raises(TypeError, lambda: 1 + constructor())
assert_equals(constructor() + constructor(), {})
assert_equals(constructor() + {}, {})
assert_equals({} + constructor(), {})
assert_equals(constructor(left) + constructor(), left)
assert_equals(constructor(left) + {}, left)
assert_equals({} + constructor(left), left)
assert_equals(constructor() + constructor(left), left)
assert_equals(constructor() + left, left)
assert_equals(left + constructor(), left)
assert_equals(constructor(left) + constructor(right), merged)
assert_equals(constructor(left) + right, merged)
assert_equals(left + constructor(right), merged)
assert_equals(constructor(right) + constructor(left), opposite)
assert_equals(constructor(right) + left, opposite)
assert_equals(right + constructor(left), opposite)
# test sequence type changes
data = {'sequence': [{'foo': 'bar'}]}
assert_true(isinstance((constructor(data) + {}).sequence, tuple))
assert_true(
isinstance((constructor(data) + constructor()).sequence, tuple)
)
assert_true(isinstance((constructor(data, list) + {}).sequence, list))
# assert_true(
# isinstance((constructor(data, list) + constructor()).sequence, tuple)
# )
assert_true(isinstance((constructor(data, list) + {}).sequence, list))
assert_true(
isinstance(
(constructor(data, list) + constructor({}, list)).sequence,
list
)
)
def to_kwargs(options):
"**{cls}"
def return_results(**kwargs):
"Return result passed into a function"
return kwargs
expected = {'foo': 1, 'bar': 2}
assert_equals(return_results(**options.constructor()), {})
assert_equals(return_results(**options.constructor(expected)), expected)
def check_pickle_roundtrip(source, options, **kwargs):
"""
serialize then deserialize a mapping, ensuring the result and initial
objects are equivalent.
"""
source = options.constructor(source, **kwargs)
data = pickle.dumps(source)
loaded = pickle.loads(data)
assert_true(isinstance(loaded, options.cls))
assert_equals(source, loaded)
return loaded
def pickling(options):
"Pickle {cls}"
empty = check_pickle_roundtrip(None, options)
assert_equals(empty, {})
mapping = check_pickle_roundtrip({'foo': 'bar'}, options)
assert_equals(mapping, {'foo': 'bar'})
# make sure sequence_type is preserved
raw = {'list': [{'a': 'b'}], 'tuple': ({'a': 'b'},)}
as_tuple = check_pickle_roundtrip(raw, options)
assert_true(isinstance(as_tuple['list'], list))
assert_true(isinstance(as_tuple['tuple'], tuple))
assert_true(isinstance(as_tuple.list, tuple))
assert_true(isinstance(as_tuple.tuple, tuple))
as_list = check_pickle_roundtrip(raw, options, sequence_type=list)
assert_true(isinstance(as_list['list'], list))
assert_true(isinstance(as_list['tuple'], tuple))
assert_true(isinstance(as_list.list, list))
assert_true(isinstance(as_list.tuple, list))
as_raw = check_pickle_roundtrip(raw, options, sequence_type=None)
assert_true(isinstance(as_raw['list'], list))
assert_true(isinstance(as_raw['tuple'], tuple))
assert_true(isinstance(as_raw.list, list))
assert_true(isinstance(as_raw.tuple, tuple))
def pop(options):
"Popping from {cls}"
mapping = options.constructor({'foo': 'bar', 'baz': 'qux'})
assert_raises(KeyError, lambda: mapping.pop('lorem'))
assert_equals(mapping.pop('lorem', 'ipsum'), 'ipsum')
assert_equals(mapping, {'foo': 'bar', 'baz': 'qux'})
assert_equals(mapping.pop('baz'), 'qux')
assert_false('baz' in mapping)
assert_equals(mapping, {'foo': 'bar'})
assert_equals(mapping.pop('foo', 'qux'), 'bar')
assert_false('foo' in mapping)
assert_equals(mapping, {})
def popitem(options):
"Popping items from {cls}"
expected = {'foo': 'bar', 'lorem': 'ipsum', 'alpha': 'beta'}
actual = {}
mapping = options.constructor(dict(expected))
for _ in range(3):
key, value = mapping.popitem()
assert_equals(expected[key], value)
actual[key] = value
assert_equals(expected, actual)
assert_raises(AttributeError, lambda: mapping.foo)
assert_raises(AttributeError, lambda: mapping.lorem)
assert_raises(AttributeError, lambda: mapping.alpha)
assert_raises(KeyError, mapping.popitem)
def clear(options):
"clear the {cls}"
mapping = options.constructor(
{'foo': 'bar', 'lorem': 'ipsum', 'alpha': 'beta'}
)
mapping.clear()
assert_equals(mapping, {})
assert_raises(AttributeError, lambda: mapping.foo)
assert_raises(AttributeError, lambda: mapping.lorem)
assert_raises(AttributeError, lambda: mapping.alpha)
def update(options):
"update a {cls}"
mapping = options.constructor({'foo': 'bar', 'alpha': 'bravo'})
mapping.update(alpha='beta', lorem='ipsum')
assert_equals(mapping, {'foo': 'bar', 'alpha': 'beta', 'lorem': 'ipsum'})
mapping.update({'foo': 'baz', 1: 'one'})
assert_equals(
mapping,
{'foo': 'baz', 'alpha': 'beta', 'lorem': 'ipsum', 1: 'one'}
)
assert_equals(mapping.foo, 'baz')
assert_equals(mapping.alpha, 'beta')
assert_equals(mapping.lorem, 'ipsum')
assert_equals(mapping(1), 'one')
def setdefault(options):
"{cls}.setdefault"
mapping = options.constructor({'foo': 'bar'})
assert_equals(mapping.setdefault('foo', 'baz'), 'bar')
assert_equals(mapping.foo, 'bar')
assert_equals(mapping.setdefault('lorem', 'ipsum'), 'ipsum')
assert_equals(mapping.lorem, 'ipsum')
assert_true(mapping.setdefault('return_none') is None)
assert_true(mapping.return_none is None)
assert_equals(mapping.setdefault(1, 'one'), 'one')
assert_equals(mapping[1], 'one')
assert_equals(mapping.setdefault('_hidden', 'yes'), 'yes')
assert_raises(AttributeError, lambda: mapping._hidden)
assert_equals(mapping['_hidden'], 'yes')
assert_equals(mapping.setdefault('get', 'value'), 'value')
assert_not_equals(mapping.get, 'value')
assert_equals(mapping['get'], 'value')
def copying(options):
"copying a {cls}"
mapping_a = options.constructor({'foo': {'bar': 'baz'}})
mapping_b = copy.copy(mapping_a)
mapping_c = mapping_b
mapping_b.foo.lorem = 'ipsum'
assert_equals(mapping_a, mapping_b)
assert_equals(mapping_b, mapping_c)
mapping_c.alpha = 'bravo'
def deepcopying(options):
"deepcopying a {cls}"
mapping_a = options.constructor({'foo': {'bar': 'baz'}})
mapping_b = copy.deepcopy(mapping_a)
mapping_c = mapping_b
mapping_b['foo']['lorem'] = 'ipsum'
assert_not_equals(mapping_a, mapping_b)
assert_equals(mapping_b, mapping_c)
mapping_c.alpha = 'bravo'
assert_not_equals(mapping_a, mapping_b)
assert_equals(mapping_b, mapping_c)
assert_false('lorem' in mapping_a.foo)
assert_equals(mapping_a.setdefault('alpha', 'beta'), 'beta')
assert_equals(mapping_c.alpha, 'bravo')
| mit | -2,616,914,232,908,486,700 | 30.07766 | 79 | 0.615787 | false |
joachimmetz/python-gflags | gflags/flags_modules_for_testing/module_bar.py | 1 | 4958 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Auxiliary module for testing gflags.py.
The purpose of this module is to define a few flags. We want to make
sure the unit tests for gflags.py involve more than one module.
"""
__author__ = '[email protected] (Alex Salcianu)'
import gflags
FLAGS = gflags.FLAGS
def DefineFlags(flag_values=FLAGS):
"""Defines some flags.
Args:
flag_values: The FlagValues object we want to register the flags
with.
"""
# The 'tmod_bar_' prefix (short for 'test_module_bar') ensures there
# is no name clash with the existing flags.
gflags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.',
flag_values=flag_values)
gflags.DEFINE_string('tmod_bar_y', 'default', 'String flag.',
flag_values=flag_values)
gflags.DEFINE_boolean('tmod_bar_z', False,
'Another boolean flag from module bar.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.',
flag_values=flag_values)
def RemoveOneFlag(flag_name, flag_values=FLAGS):
"""Removes the definition of one flag from gflags.FLAGS.
Note: if the flag is not defined in gflags.FLAGS, this function does
not do anything (in particular, it does not raise any exception).
Motivation: We use this function for cleanup *after* a test: if
there was a failure during a test and not all flags were declared,
we do not want the cleanup code to crash.
Args:
flag_name: A string, the name of the flag to delete.
flag_values: The FlagValues object we remove the flag from.
"""
if flag_name in flag_values.FlagDict():
flag_values.__delattr__(flag_name)
def NamesOfDefinedFlags():
"""Returns: List of names of the flags declared in this module."""
return ['tmod_bar_x',
'tmod_bar_y',
'tmod_bar_z',
'tmod_bar_t',
'tmod_bar_u',
'tmod_bar_v']
def RemoveFlags(flag_values=FLAGS):
"""Deletes the flag definitions done by the above DefineFlags().
Args:
flag_values: The FlagValues object we remove the flags from.
"""
for flag_name in NamesOfDefinedFlags():
RemoveOneFlag(flag_name, flag_values=flag_values)
def GetModuleName():
"""Uses gflags._GetCallingModule() to return the name of this module.
For checking that _GetCallingModule works as expected.
Returns:
A string, the name of this module.
"""
# Calling the protected _GetCallingModule generates a lint warning,
# but we do not have any other alternative to test that function.
return gflags._GetCallingModule() # pylint: disable=protected-access
def ExecuteCode(code, global_dict):
"""Executes some code in a given global environment.
For testing of _GetCallingModule.
Args:
code: A string, the code to be executed.
global_dict: A dictionary, the global environment that code should
be executed in.
"""
# Indeed, using exec generates a lint warning. But some user code
# actually uses exec, and we have to test for it ...
exec code in global_dict # pylint: disable=exec-used
def DisclaimKeyFlags():
"""Disclaims flags declared in this module."""
gflags.DISCLAIM_key_flags()
| bsd-3-clause | 1,752,783,587,478,691,600 | 35.455882 | 72 | 0.705728 | false |
cloudbase/maas | src/provisioningserver/testing/fakeapi.py | 1 | 6224 | # Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Fake Provisioning API.
:class:`FakeSynchronousProvisioningAPI` is intended to be useful in a Django
environment, or similar, where the Provisioning API is being used via
xmlrpclib.ServerProxy for example.
:class:`FakeAsynchronousProvisioningAPI` is intended to be used in a Twisted
environment, where all functions return :class:`defer.Deferred`s.
"""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
"FakeAsynchronousProvisioningAPI",
"FakeSynchronousProvisioningAPI",
]
from base64 import b64encode
from functools import wraps
from provisioningserver.interfaces import IProvisioningAPI
from provisioningserver.utils import filter_dict
from twisted.internet import defer
from zope.interface import implementer
from zope.interface.interface import Method
class FakeProvisioningDatabase(dict):
def __missing__(self, key):
self[key] = {"name": key}
return self[key]
def select(self, keys):
"""Select a subset of this mapping."""
return filter_dict(self, frozenset(keys))
def delete(self, keys):
"""Delete a subset of this mapping."""
for key in keys:
if key in self:
del self[key]
def dump(self):
"""Dump this mapping.
Keys are assumed to be immutable, and values are assumed to have a
`copy` method, like a `dict` for example.
"""
return {
key: value.copy()
for key, value in self.items()
}
@implementer(IProvisioningAPI)
class FakeProvisioningAPIBase:
# TODO: Referential integrity might be a nice thing.
def __init__(self):
super(FakeProvisioningAPIBase, self).__init__()
self.distros = FakeProvisioningDatabase()
self.profiles = FakeProvisioningDatabase()
self.nodes = FakeProvisioningDatabase()
# This records nodes that start/stop commands have been issued
# for. If a node has been started, its name maps to 'start'; if
# it has been stopped, its name maps to 'stop' (whichever
# happened most recently).
self.power_status = {}
def add_distro(self, name, initrd, kernel):
self.distros[name]["initrd"] = initrd
self.distros[name]["kernel"] = kernel
return name
def add_profile(self, name, distro):
self.profiles[name]["distro"] = distro
return name
def add_node(self, name, hostname, profile, power_type, preseed_data):
self.nodes[name]["hostname"] = hostname
self.nodes[name]["profile"] = profile
self.nodes[name]["mac_addresses"] = []
self.nodes[name]["ks_meta"] = {
"MAAS_PRESEED": b64encode(preseed_data),
}
self.nodes[name]["power_type"] = power_type
return name
def modify_distros(self, deltas):
for name, delta in deltas.items():
distro = self.distros[name]
distro.update(delta)
def modify_profiles(self, deltas):
for name, delta in deltas.items():
profile = self.profiles[name]
profile.update(delta)
def modify_nodes(self, deltas):
for name, delta in deltas.items():
node = self.nodes[name]
node.update(delta)
def get_distros_by_name(self, names):
return self.distros.select(names)
def get_profiles_by_name(self, names):
return self.profiles.select(names)
def get_nodes_by_name(self, names):
return self.nodes.select(names)
def delete_distros_by_name(self, names):
return self.distros.delete(names)
def delete_profiles_by_name(self, names):
return self.profiles.delete(names)
def delete_nodes_by_name(self, names):
return self.nodes.delete(names)
def get_distros(self):
return self.distros.dump()
def get_profiles(self):
return self.profiles.dump()
def get_nodes(self):
return self.nodes.dump()
def start_nodes(self, names):
for name in names:
self.power_status[name] = 'start'
def stop_nodes(self, names):
for name in names:
self.power_status[name] = 'stop'
PAPI_METHODS = {
name: getattr(FakeProvisioningAPIBase, name)
for name in IProvisioningAPI.names(all=True)
if isinstance(IProvisioningAPI[name], Method)
}
def sync_xmlrpc_func(func):
"""Decorate a function so that it acts similarly to a synchronously
accessed remote XML-RPC call.
All method calls return synchronously.
"""
@wraps(func)
def wrapper(*args, **kwargs):
assert len(kwargs) == 0, (
"The Provisioning API is meant to be used via XML-RPC, "
"for now, so its methods are prevented from use with "
"keyword arguments, which XML-RPC does not support.")
# TODO: Convert exceptions into Faults.
return func(*args)
return wrapper
# Generate an synchronous variant.
FakeSynchronousProvisioningAPI = type(
b"FakeSynchronousProvisioningAPI", (FakeProvisioningAPIBase,), {
name: sync_xmlrpc_func(func) for name, func in PAPI_METHODS.items()
})
def async_xmlrpc_func(func):
"""Decorate a function so that it acts similarly to an asynchronously
accessed remote XML-RPC call.
All method calls return asynchronously, via a :class:`defer.Deferred`.
"""
@wraps(func)
def wrapper(*args, **kwargs):
assert len(kwargs) == 0, (
"The Provisioning API is meant to be used via XML-RPC, "
"for now, so its methods are prevented from use with "
"keyword arguments, which XML-RPC does not support.")
# TODO: Convert exceptions into Faults.
return defer.execute(func, *args)
return wrapper
# Generate an asynchronous variant.
FakeAsynchronousProvisioningAPI = type(
b"FakeAsynchronousProvisioningAPI", (FakeProvisioningAPIBase,), {
name: async_xmlrpc_func(func) for name, func in PAPI_METHODS.items()
})
| agpl-3.0 | 4,621,239,559,335,503,000 | 29.509804 | 76 | 0.64653 | false |
sassoftware/catalog-service | catalogService/rest/drivers/openstack/openstackclient.py | 1 | 23160 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from catalogService import errors
from catalogService.rest import baseDriver
from catalogService.rest.models import images
from catalogService.rest.models import instances
try:
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.client import Client as KeystoneClient
from keystoneclient.session import Session as KeystoneSession
from novaclient.v1_1.client import Client as NovaClient
from glanceclient import Client as GlanceClient
import logging
logging.getLogger('iso8601.iso8601').setLevel(logging.ERROR)
except ImportError:
NovaClient = None #pyflakes=ignore
class OpenStack_Image(images.BaseImage):
"OpenStack Image"
NOVA_PORT = 5000
CATALOG_NEW_FLOATING_IP = "new floating ip-"
CATALOG_NEW_FLOATING_IP_DESC = "[New floating IP in {pool}]"
# This is provided by the nova api
#class OpenStack_InstanceTypes(instances.InstanceTypes):
# "OpenStack Instance Types"
#
# idMap = [
# ('xenent.small', "Small"),
# ('xenent.medium', "Medium"),
# ]
# Nova address
# Nova port
# Glance address (until apis are integrated)
# Glance port
_configurationDescriptorXmlData = """<?xml version='1.0' encoding='UTF-8'?>
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.rpath.org/permanent/descriptor-1.0.xsd descriptor-1.0.xsd">
<metadata>
<displayName>OpenStack Configuration</displayName>
<descriptions>
<desc>Configure OpenStack</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>name</name>
<descriptions>
<desc>Nova Server</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/novaServerName.html'/>
</field>
<field>
<name>nova_port</name>
<descriptions>
<desc>Nova Port</desc>
</descriptions>
<type>int</type>
<constraints>
<descriptions>
<desc>Valid ports are integers between 1 and 65535</desc>
</descriptions>
<range><min>1</min><max>65535</max></range>
</constraints>
<required>true</required>
<default>%(nova_port)s</default>
<help href='configuration/novaPortNumber.html'/>
</field>
<field>
<name>alias</name>
<descriptions>
<desc>Name</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/alias.html'/>
</field>
<field>
<name>description</name>
<descriptions>
<desc>Full Description</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/description.html'/>
</field>
<field>
<name>project_name</name>
<descriptions>
<desc>Project Name</desc>
</descriptions>
<type>str</type>
<required>true</required>
<help href='configuration/project_name.html'/>
</field>
</dataFields>
</descriptor>""" % dict(nova_port=NOVA_PORT, )
# User Name
# Auth Token
_credentialsDescriptorXmlData = """<?xml version='1.0' encoding='UTF-8'?>
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.rpath.org/permanent/descriptor-1.0.xsd descriptor-1.0.xsd">
<metadata>
<displayName>OpenStack User Credentials</displayName>
<descriptions>
<desc>User Credentials for OpenStack</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>username</name>
<descriptions>
<desc>User Name</desc>
</descriptions>
<type>str</type>
<constraints>
<descriptions>
<desc>Field must contain between 1 and 32 characters</desc>
</descriptions>
<length>32</length>
</constraints>
<required>true</required>
</field>
<field>
<name>password</name>
<descriptions>
<desc>Password</desc>
</descriptions>
<type>str</type>
<constraints>
<descriptions>
<desc>Field must contain between 1 and 40 characters</desc>
</descriptions>
<length>40</length>
</constraints>
<required>true</required>
<password>true</password>
</field>
</dataFields>
</descriptor>
"""
# http://glance.openstack.org/client.html
# http://pypi.python.org/pypi/python-novaclient
class ConsolidatedClient(object):
def __init__(self, keystone_client, nova_client, glance_client):
self.keystone = keystone_client
self.nova = nova_client
self.glance = glance_client
class OpenStackClient(baseDriver.BaseDriver):
Image = OpenStack_Image
cloudType = 'openstack'
configurationDescriptorXmlData = _configurationDescriptorXmlData
credentialsDescriptorXmlData = _credentialsDescriptorXmlData
RBUILDER_BUILD_TYPE = 'RAW_HD_IMAGE'
NovaClientClass = NovaClient
KEYSTONE_API_VERSION = '2.0'
GLANCE_CLIENT_VERSION = '2'
@classmethod
def isDriverFunctional(cls):
return cls.NovaClientClass is not None
getImageIdFromMintImage = baseDriver.BaseDriver._getImageIdFromMintImage_local
def _authUrl(self, server, port, secure=True):
return "%s://%s:%s" % ('https' if secure else 'http', server, port)
def _secureToInsecureFallback(self, callback, *args, **kwargs):
kwSecure = kwargs.copy()
kwSecure['secure'] = True
try:
# try calling the callback with secure=True
return callback(self, *args, **kwSecure)
except Exception, eSecure:
eSecure_type, eSecure_val, eSecure_trace = sys.exc_info()
kwInsecure = kwargs.copy()
kwInsecure['secure'] = False
# try calling the callback with secure=False
try:
return callback(self, *args, **kwInsecure)
except Exception, eInsecure:
# if insecure version also fails, transparently raise the secure exception
raise eSecure_type, eSecure_val, eSecure_trace
def drvCreateCloudClient(self, credentials):
cloudConfig = self.getTargetConfiguration()
server = cloudConfig['name']
port = cloudConfig['nova_port']
projectName = cloudConfig['project_name']
try:
session = KeystoneSession()
def authenticate(self, **kwargs):
secure = kwargs.pop('secure')
authUrl = self._authUrl(server, port, secure=secure)
keystoneCli = KeystoneClient(self.KEYSTONE_API_VERSION,
tenant_name=projectName,
auth_url=authUrl,
username=credentials['username'],
password=credentials['password'],
session=session)
auth = v2_auth.Password(
keystoneCli.auth_url,
username=credentials['username'],
password=credentials['password'])
session.auth = auth
keystoneCli.authenticate()
auth.auth_ref = keystoneCli.auth_ref
return keystoneCli
keystoneCli = self._secureToInsecureFallback(authenticate)
novaCli = self.NovaClientClass(auth_token=keystoneCli.auth_token,
project_id=projectName,
auth_url=keystoneCli.auth_url,
session=session)
endpoint = session.get_endpoint(service_type="image")
glanceCli = GlanceClient(self.GLANCE_CLIENT_VERSION,
endpoint=endpoint,
project_id=projectName,
token=keystoneCli.auth_token,
session=session)
clients = ConsolidatedClient(keystoneCli, novaCli, glanceCli)
except Exception, e:
raise errors.PermissionDenied(message =
"Error initializing client: %s" % (e, ))
return clients
def drvVerifyCloudConfiguration(self, dataDict):
serverName = dataDict['name']
serverPort = dataDict['nova_port']
def verify(self, **kwargs):
secure = kwargs.pop('secure')
self._verifyServerUrl(self._authUrl(serverName, serverPort, secure=secure))
self._secureToInsecureFallback(verify)
def terminateInstances(self, instanceIds):
running_instances = self.getInstances(instanceIds)
for server in running_instances:
server.delete() # there is no terminate method in novaclient
insts = instances.BaseInstances()
insts.extend(running_instances)
# Set state
for inst in insts:
inst.setState("Terminating")
return insts
def terminateInstance(self, instanceId):
return self.terminateInstances([instanceId])
def _get_flavors(self):
objlist = self.client.nova.flavors.list()
objlist.sort(key=lambda x: (x.vcpus, x.ram, x.disk))
return objlist
def _get_availability_zones(self):
objlist = self.client.nova.availability_zones.list(detailed=False)
objlist = [ x for x in objlist if x.zoneState.get('available') ]
objlist.sort(key=lambda x: x.zoneName)
return objlist
def drvPopulateImageDeploymentDescriptor(self, descr, extraArgs=None):
descr.setDisplayName("OpenStack Launch Parameters")
descr.addDescription("OpenStack Launch Parameters")
self.drvImageDeploymentDescriptorCommonFields(descr)
self._imageDeploymentSpecifcDescriptorFields(descr, extraArgs=extraArgs)
return self._drvPopulateDescriptorFromTarget(descr)
def drvPopulateLaunchDescriptor(self, descr, extraArgs=None):
descr.setDisplayName("OpenStack Launch Parameters")
descr.addDescription("OpenStack Launch Parameters")
self.drvLaunchDescriptorCommonFields(descr)
self._launchSpecificDescriptorFields(descr, extraArgs=extraArgs)
return self._drvPopulateDescriptorFromTarget(descr)
def _drvPopulateDescriptorFromTarget(self, descr):
pass
def _retriveSSHKeyPairs(self, descr):
keyPairs = [ descr.ValueWithDescription(x[0], descriptions = x[1])
for x in self._cliGetKeyPairs() ]
if not keyPairs:
raise errors.ParameterError("No OpenStack SSH key pairs defined, please create one")
return keyPairs
def _launchSpecificDescriptorFields(self, descr, extraArgs=None):
avzones = self._get_availability_zones()
descr.addDataField("availabilityZone",
descriptions = [
("Availability Zone", None),
(u"Zone de disponibilit\u00e9", "fr_FR")],
help = [
("launch/availabilityZones.html", None)],
default = [ avzones[0].zoneName ],
required=True,
type = descr.EnumeratedType([
descr.ValueWithDescription(x.zoneName, descriptions = x.zoneName)
for x in avzones ]
))
targetFlavors = self._get_flavors()
if not targetFlavors:
raise errors.ParameterError("No instance flavors defined")
flavors = [ descr.ValueWithDescription(str(f.id),
descriptions = "%s (VCPUs: %d, RAM: %d MB)" % (f.name, f.vcpus, f.ram)) for f in targetFlavors ]
descr.addDataField('flavor',
descriptions = 'Flavor',
required = True,
help = [
('launch/flavor.html', None)
],
type = descr.EnumeratedType(flavors),
default=flavors[0].key,
)
networks = self._cliGetNetworks()
descr.addDataField('network',
descriptions = 'Network',
required = True,
help = [
('launch/network.html', None)
],
type = descr.EnumeratedType(
descr.ValueWithDescription(x.id, descriptions = x.label)
for x in networks),
default=[networks[0].id],
)
descr.addDataField("keyName",
descriptions = [ ("SSH Key Pair", None), ("Paire de clefs", "fr_FR") ],
help = [
("launch/keyPair.html", None)
],
type = descr.EnumeratedType(self._retriveSSHKeyPairs(descr))
)
fpList = self._cliGetFloatingIps()
descr.addDataField('floatingIp',
descriptions = 'Floating IP',
required = True,
help = [
('launch/floatingIp.html', None)
],
type = descr.EnumeratedType(
descr.ValueWithDescription(x['id'], descriptions = x['label'])
for x in fpList),
default=fpList[0]['id'],
)
return descr
def _cliGetFloatingIps(self):
cli = self.client.nova
pools = cli.floating_ip_pools.list()
objs = cli.floating_ips.list()
unassigned = [
dict(
id=CATALOG_NEW_FLOATING_IP + x.name,
label=CATALOG_NEW_FLOATING_IP_DESC.format(pool=x.name),
pool=x.name)
for x in pools ]
for obj in objs:
if obj.instance_id:
continue
unassigned.append(dict(id=obj.id,
label= "%s in pool %s" % (obj.ip, obj.pool),
pool=obj.pool,
ip=obj.ip))
unassigned.sort(key=lambda x: x.get('ip'))
return unassigned
def _cliGetKeyPairs(self):
try:
rs = self.client.nova.keypairs.list()
except:
raise
return [ (x.id, x.name) for x in rs ]
def _cliGetNetworks(self):
networks = self.client.nova.networks.list()
networks.sort(key=lambda x: x.label.lower())
if not networks:
raise errors.ParameterError("No networks defined, please create one")
return networks
def _imageDeploymentSpecifcDescriptorFields(self, descr, **kwargs):
pass
# TODO: remove when novaclient has caught up to v1.1.
# This pulls a resource id from from a resource ref url
def _get_id_from_ref(self, resource_ref):
return resource_ref.split('/')[-1]
@classmethod
def _idFromRef(cls, ref):
if ref is None:
return ref
if isinstance(ref, int):
return str(ref)
# Grab the last part of the URL and return it
return os.path.basename(ref)
def drvGetInstances(self, instanceIds, force=False):
client = self.client.nova
cloudAlias = self.getCloudAlias()
instanceList = instances.BaseInstances()
images = self.getAllImages()
# Hash images so we can quickly return a ref
imagesMap = dict((self._idFromRef(image.opaqueId), image)
for image in images if hasattr(image, 'opaqueId'))
servers = sorted(client.servers.list(), key=self.sortKey)
for server in servers:
instanceId = str(server.id)
imageId = None
imgobj = server.image
if imgobj:
imageRef = self._idFromRef(imgobj['id'])
image = imagesMap.get(imageRef)
if image:
imageId = image.id
publicDnsName = privateDnsName = None
if server.addresses.values():
addrList = server.addresses.values()[0]
floatingAddrs = [ x['addr'] for x in addrList if x['OS-EXT-IPS:type'] == 'floating' ]
fixedAddrs = [ x['addr'] for x in addrList if x['OS-EXT-IPS:type'] == 'fixed' ]
if floatingAddrs:
publicDnsName = floatingAddrs[0]
if fixedAddrs:
privateDnsName = fixedAddrs[0]
inst = self._nodeFactory.newInstance(id = instanceId,
imageId = imageId,
instanceId = instanceId,
instanceName = server.name,
instanceDescription = server.name,
dnsName = 'UNKNOWN',
publicDnsName = publicDnsName,
privateDnsName = privateDnsName,
state = server.status,
launchTime = server.created if hasattr(server, 'created') else None,
cloudName = self.cloudName,
cloudAlias = cloudAlias)
instanceList.append(inst)
instanceList.sort(key = lambda x: (x.getState(), x.getInstanceId()))
return self.filterInstances(instanceIds, instanceList)
@classmethod
def _getServerAddressByType(cls, server, addressType):
if not server.addresses:
return None
addrlist = server.addresses.get(addressType)
if not addrlist:
return None
return addrlist[0]['addr']
def getLaunchInstanceParameters(self, image, descriptorData):
params = baseDriver.BaseDriver.getLaunchInstanceParameters(self,
image, descriptorData)
getField = descriptorData.getField
srUuid = getField('storageRepository')
params['srUuid'] = srUuid
return params
def deployImageProcess(self, job, image, auth, **params):
# RCE-1751: always redeploy.
if 0 and image.getIsDeployed():
self._msg(job, "Image is already deployed")
return image.getImageId()
ppop = params.pop
imageName = ppop('imageName')
cloudConfig = self.getTargetConfiguration()
nameLabel = image.getLongName()
nameDescription = image.getBuildDescription()
self._deployImage(job, image, auth, imageName=imageName)
self._msg(job, 'Image deployed')
return image.getImageId()
def launchInstanceProcess(self, job, image, auth, **launchParams):
ppop = launchParams.pop
instanceName = ppop('instanceName')
instanceDescription = ppop('instanceDescription')
flavorRef = ppop('flavor')
networkRef = ppop('network')
zoneRef = ppop('availabilityZone')
floatingIp = ppop('floatingIp')
if floatingIp.startswith(CATALOG_NEW_FLOATING_IP):
poolName = floatingIp[len(CATALOG_NEW_FLOATING_IP):]
floatingIp = self.client.nova.floating_ips.create(pool=poolName)
else:
floatingIp = self.client.nova.floating_ips.get(floatingIp)
keyName = ppop('keyName', None)
cloudConfig = self.getTargetConfiguration()
nameLabel = image.getLongName()
nameDescription = image.getBuildDescription()
imageName = image.getBaseFileName()
if not image.getIsDeployed():
imageId = self._deployImage(job, image, auth, imageName=imageName)
else:
imageId = getattr(image, 'opaqueId')
job.addHistoryEntry('Launching')
instId = self._launchInstanceOnTarget(job, instanceName, imageId,
flavorRef, keyName, floatingIp, zoneRef, networkRef)
return [ instId ]
@classmethod
def sortKey(cls, x):
return x.id
def getImagesFromTarget(self, imageIdsFilter):
cloudAlias = self.getCloudAlias()
client = self.client.nova
ret = []
images = sorted(client.images.list(detailed=True), key=self.sortKey)
for image in images:
# image.id is numeric
imageId = str(image.id)
imageName = image.name
img = self._nodeFactory.newImage(
id = imageId,
imageId = imageId,
isDeployed = True,
is_rBuilderImage = False,
shortName = imageName,
productName = imageName,
longName = imageName,
cloudName = self.cloudName,
cloudAlias = cloudAlias)
img.opaqueId = self._getLinkRel(image, 'self')
ret.append(img)
return self.filterImages(imageIdsFilter, ret)
@classmethod
def _getLinkRelFromList(cls, list, rel):
for link in list:
if link['rel'] == rel:
return link['href']
return None
@classmethod
def _getLinkRel(cls, obj, rel):
return cls._getLinkRelFromList(obj.links, rel)
def _getImageDiskFormat(self):
return 'raw'
def _getImageContainerFormat(self):
return 'bare'
def _importImage(self, job, imageMetadata, fileObj):
job.addHistoryEntry('Creating image')
glanceImage = self.client.glance.images.create(**imageMetadata)
job.addHistoryEntry('Uploading image content')
self.client.glance.images.upload(glanceImage.id, fileObj)
return str(glanceImage.id)
def _deployImageFromFile(self, job, image, path, *args, **kwargs):
imageName = kwargs.get('imageName', image.getShortName())
try:
job.addHistoryEntry('Uncompressing image')
logger = lambda *x: self._msg(job, *x)
archive = baseDriver.Archive(path, logger)
archive.extract()
archiveMembers = list(archive)
assert len(archiveMembers) == 1
member = archiveMembers[0]
fobj = archive.extractfile(member)
job.addHistoryEntry('Importing image')
imageDiskFormat = self._getImageDiskFormat()
imageContainerFormat = self._getImageContainerFormat()
imageMetadata = {'name':imageName, 'disk_format':imageDiskFormat,
'container_format':imageContainerFormat}
imageId = self._importImage(job, imageMetadata, fobj)
finally:
pass
return imageId
def _launchInstanceOnTarget(self, job, name, imageRef, flavorRef, keyName, floatingIp, zoneRef, networkRef):
client = self.client.nova
server = client.servers.create(name, imageRef, flavorRef,
key_name=keyName, nics=[{'net-id' : networkRef}],
availability_zone=zoneRef)
for i in range(20):
if server.status == 'ACTIVE':
break
job.addHistoryEntry('Waiting for server to become active')
time.sleep(2*i + 1)
server = client.servers.get(server)
server.add_floating_ip(floatingIp)
return str(server.id)
| apache-2.0 | 6,831,616,599,087,930,000 | 36.056 | 156 | 0.60095 | false |
openjck/apod-scraper | scraper.py | 1 | 6524 | #!/usr/bin/env python
import bleach
from bs4 import BeautifulSoup
from collections import OrderedDict
from dateutil import parser
import regex
import requests
import scraperwiki
import urlparse
class Page:
def __init__(self, path, basename, encoding):
self.path = path
self.basename = basename
self.encoding = encoding
self.url = path + basename
class Archive(Page):
def __init__(self, path, basename, encoding):
Page.__init__(self, path, basename, encoding)
@property
def links(self):
link_re = 'ap[0-9]+\.html'
soup = make_soup(self.url, self.encoding, parser='html.parser')
return soup.find_all(href=regex.compile(link_re))
class Entry(Page):
def __init__(self, path, basename, encoding, link):
Page.__init__(self, path, basename, encoding)
self.link = link
@property
def entry_url(self):
return self.url
@property
def date(self):
date_raw = self.link.previous_sibling[:-3]
date = parser.parse(date_raw).strftime('%Y-%m-%d')
return unicode(date, 'UTF-8')
@property
def title(self):
return self.link.text
@property
def credit(self):
soup = self.get_soup()
html = str(soup)
# The credit information is always below the title. Sometimes the title
# on the picture page is slightly different from the title on the index
# page, however, so fuzzy matching is used here to account for any
# differences.
match = regex.search('<b>\s*?(?:{0}){{e<={1}}}\s*?<(?:\/b|br.*?)>(.*?)<p>'.format(regex.escape(self.link.text.encode('UTF-8')), int(float(len(self.link.text)) * 0.25)), html, regex.DOTALL | regex.IGNORECASE)
if not match:
# If the above fails for some reason, one last attempt will be made
# to locate the credit information by searching between the title
# and the explanation.
match = regex.search('<b>.*?<(?:\/b|br.*?)>(.*?)<p>.*?<(?:b|h3)>\s*?Explanation(?::)?\s*?<\/(?:b|h3)>(?::)?', html, regex.DOTALL | regex.IGNORECASE)
if match:
# Remove all tags except the anchor tags, and remove all excess
# whitespace characters.
credit = ' '.join(bleach.clean(match.group(1), tags=['a'], attributes={'a': ['href']}, strip=True).split())
else:
credit = '';
return credit
@property
def explanation(self):
soup = self.get_soup()
html = str(soup)
match = regex.search('<(?:b|h3)>\s*?Explanation(?::)?\s*?<\/(?:b|h3)>(?::)?(.*?)<p>', html, regex.DOTALL | regex.IGNORECASE)
if match:
explanation = ' '.join(bleach.clean(match.group(1), tags=['a'], attributes={'a': ['href']}, strip=True).split())
else:
explanation = ''
return explanation
@property
def picture_thumbnail_url(self):
soup = self.get_soup()
picture_thumbail_link = soup.find('img', src=regex.compile('image/'))
# Check if there is a smaller version of the picture on the page.
if picture_thumbail_link:
picture_thumbnail_url = self.path + picture_thumbail_link['src']
else:
picture_thumbnail_url = ''
return unicode(picture_thumbnail_url, 'UTF-8')
@property
def picture_url(self):
soup = self.get_soup()
picture_link = soup.find('a', href=regex.compile(self.path.replace('.', '\.') + 'image/'))
# Check if there is a higher-resolution link to the picture.
if picture_link:
picture_url = picture_link['href']
else:
picture_url = ''
return unicode(picture_url, 'UTF-8')
@property
def video_url(self):
soup = self.get_soup()
video_link = soup.find('iframe')
if video_link:
video_url = video_link['src']
else:
video_url = ''
return unicode(video_url, 'UTF-8')
# Cache the soup.
def get_soup(self):
if not hasattr(self, 'soup'):
self.soup = make_soup(self.url, self.encoding, True, self.path)
return self.soup
def make_soup(url, encoding, absolute=False, base='', parser='lxml'):
html = requests.get(url)
if parser:
soup = BeautifulSoup(html.content, parser, from_encoding=encoding)
else:
soup = BeautifulSoup(html.content, from_encoding=encoding)
# Make all links absolute.
# http://stackoverflow.com/a/4468467/715866
if absolute:
for a in soup.find_all('a', href=True):
a['href'] = urlparse.urljoin(base, a['href'])
return soup
def save(url, date, title, credit, explanation, picture_thumbnail_url, picture_url, video_url, data_version):
data = OrderedDict()
data['url'] = url;
data['date'] = date;
data['title'] = title;
data['credit'] = credit;
data['explanation'] = explanation;
data['picture_thumbnail_url'] = picture_thumbnail_url;
data['picture_url'] = picture_url;
data['video_url'] = video_url;
data_versions = OrderedDict()
data_versions['url'] = url;
data_versions['data_version'] = data_version;
scraperwiki.sql.save(['url'], data)
scraperwiki.sql.save(['url'], data_versions, table_name='data_versions')
def table_exists(table):
try:
scraperwiki.sql.select('* FROM %s' % table)
return True
except:
return False
def main():
# Change this number when the scraping algorithm changes. All pages will be
# re-scraped.
version = '1.1.1'
path = 'http://apod.nasa.gov/apod/'
site_encoding = 'windows-1252'
archive = Archive(path, 'archivepix.html', site_encoding)
versions = table_exists('data_versions')
for link in archive.links:
entry = Entry(path, link['href'], site_encoding, link)
if versions:
result = scraperwiki.sql.select('url, data_version FROM data_versions WHERE url = "%s" LIMIT 1' % entry.entry_url)
# Only scrape and save the page if it contains a picture or video and
# if it has not already been scraped at this version.
if (not versions or not result or result[0]['data_version'] != version) and (entry.picture_thumbnail_url or entry.video_url):
save(entry.entry_url, entry.date, entry.title, entry.credit, entry.explanation, entry.picture_thumbnail_url, entry.picture_url, entry.video_url, data_version=version)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,320,870,269,722,252,000 | 31.949495 | 215 | 0.601625 | false |
kyxw007/MovieRename | kyxw007/FileTool.py | 1 | 1084 | # coding=utf-8
import os
import Utils, Rename
root_dir = "/Volumes/XiaoMi-usb0/下载"
tool = Rename.RenameTool()
def get_suffix(file_name):
index = file_name.rfind('.')
return file_name[index:len(file_name)]
def folder_rename(root_dir):
file_list = os.listdir(root_dir)
for file_name in filter(Utils.hasChinese, file_list):
print("老文件名:", file_name)
tool.find_fitness_movie(file_name)
new_file_name = tool.new_filename + get_suffix(file_name)
print("新文件名:", new_file_name)
os.rename(root_dir + "/" + file_name, root_dir + "/" + new_file_name)
def single_rename(path, file_name):
print("老文件名:", file_name)
tool.find_fitness_movie(file_name)
new_file_name = tool.new_filename + get_suffix(file_name)
print("新文件名:", new_file_name)
os.rename(path + "/" + file_name, path + "/" + new_file_name)
single_rename("/Volumes/XiaoMi-usb0/下载", "火星救援.The Martian.2015.评分[8.4].主演[马特·达蒙].导演[雷德利·斯科特].Mp4Ba")
# folder_rename(root_dir)
| apache-2.0 | 1,555,840,494,825,192,000 | 27.628571 | 101 | 0.645709 | false |
srmnitc/tis-tools | runscripts/make_q4q6_dist/make_q4q6_dist.py | 1 | 13555 | import os
import sys
import subprocess as sub
import numpy as np
import time
import logging
import tistools_helpers.tistools_helpers as tistools_helpers
#SRM:set up logger for the general error messages
logger = logging.getLogger(__name__)
handler = logging.FileHandler("analysis.log")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.propagate = False
#workdir
workdir = '/home/users/menonsqr/storage/HCP19/tis_run'
seedfileaddress = '/home/users/menonsqr/SeedHCP19/seed.dat'
binary = '/home/users/menonsqr/tis-tools.git/trunk/runscripts/make_q4q6_dist/orderparameter/main'
tstcluster = 413
maxconfs=10
#create helpers class
helpers = tistools_helpers.tistools_helpers()
#class for seed
class Seed(object):
def __init__(self,seedfileaddress):
#self.atoms=np.empty([natoms,5])
self.seedfile=seedfileaddress
self.exists=True
#read the positions of seed atoms
def ReadSeed(self,read=True,idlist=None):
self.seedids = []
if read==True:
if os.path.isfile(self.seedfile) and os.path.getsize(self.seedfile) > 0:
for line in open(self.seedfile):
self.seedids.append(int(line.strip()))
self.atoms=np.empty([len(self.seedids),7])
else:
self.exists=False
else:
self.seedids=idlist
if len(self.seedids)>0:
self.atoms=np.empty([len(self.seedids),7])
else:
self.exists=False
#populate the positions of seed atoms
def PopulateSeed(self,filename,read=True):
if read==True:
atoms = read_alles(filename)
else:
atoms = filename
#add the atoms and positions
k = 0
for i in range(len(atoms)):
if atoms[i][0] in self.seedids:
self.atoms[k][0]=atoms[i][0]
self.atoms[k][1]=atoms[i][1]
self.atoms[k][2]=atoms[i][2]
self.atoms[k][3]=atoms[i][3]
self.atoms[k][4]=atoms[i][4]
self.atoms[k][5]=atoms[i][5]
self.atoms[k][6]=atoms[i][6]
k+=1
def CalculateDistances(self,otheratoms):
loo = []
for atom in self.atoms:
dist = []
for oatom in otheratoms.atoms:
#print 'seed'
#print seedatom[0]
a = oatom[1]
b = oatom[2]
c = oatom[3]
distance = np.sqrt((a-atom[1])**2 + (b-atom[2])**2 + (c-atom[3])**2 )
dist.append(distance)
mindist=min(dist)
#print mindist
#print (mindist<1e-5)
if mindist<1e-5:
#print "oh"
mindist=0.00
atom[4]=mindist
loo.append(mindist)
return loo
#function to read dump files
def read_alles(filename,filetype="dump"):
if (filetype=="dump"):
#ninenumber of lines are not required
#after that column 0,3,4,5 to be read.
count = 0
data = []
print "reading atom file"
for line in open(filename,'r'):
data.append(line)
boxsizelist = []
natoms = int(data[3])
#atoms values are as follows
# 0 : id
# 1,2,3 : x,y,z
# 4 : whichever distance value
# 5,6 : avg q4 and q6 respectively
# #total of seven parameters
atoms = np.empty([natoms,7])
i = 0
for line in data:
if (count==5) or (count==6) or (count==7):
raw = line.split()
boxsizelist.append(float(raw[0]))
boxsizelist.append(float(raw[1]))
elif (count>8):
raw = line.split()
atoms[i][0] = int(raw[0])
atoms[i][1] = float(raw[3])
atoms[i][2] = float(raw[4])
atoms[i][3] = float(raw[5])
atoms[i][4] = 99999.00
atoms[i][5] = 99999.00
atoms[i][6] = 99999.00
#atoms[i][4] = False
i+=1
count+=1
#print atoms
#print boxsizelist
return atoms
#main function that is to be called
def MakeStructureHistogram(pathtype,manual=False,gzip=False):
"""
special function to make histograms
hardcoded. Remove at some point.
"""
tmpfile = 'my_tmp'
snapshots=1
#set up histograms
distance1 = []
distance2 = []
distance3 = []
distance4 = []
distance5 = []
distance6 = []
distance7 = []
alle = []
if manual==False:
interfacelist = helpers.generate_intflist()
else:
interfacelist = helpers.read_intflist()
for interface in interfacelist:
if snapshots>maxconfs:
break
interface = interface.strip()
intfpath = os.path.join(os.getcwd(),"tis","la",interface)
intfpath = intfpath.strip()
pathpath = os.path.join(intfpath,pathtype+".dat")
pathpath = pathpath.strip()
pathlist = []
filenamelist = []
#we get the list of all paths that needs to be analysed
for path in open(pathpath,'r'):
pathlist.append(path)
print "finalised paths"
#may the analysis start
for path in pathlist:
if snapshots>maxconfs:
break
path = path.strip()
pathpath= os.path.join(intfpath,path)
identifier = interface+path
#we are in the folder now
#we have to read the actual trajectory
actualtraj = os.path.join(workdir,'tis','la',interface,path)
data = helpers.combine_paths_return(actualtraj,gzip=gzip)
print "read paths"
#we have the data on standby
#time to read the output raw data histo file.
histofile = os.path.join(pathpath,(identifier+'.histo.list'))
histodataslices = []
histodata = []
count=0
#print "ll"
#print histofile
if os.path.exists(histofile):
#print histofile
#print "kkll"
for line in open(histofile,'r'):
histodata.append(line.strip())
count+=1
if count==12:
histodataslices.append(histodata)
histodata = []
count =0
else:
continue
print "read histolists"
#loooping over each slice in the trajectory
for i in range(len(histodataslices)):
#print snapshots
bccids = map(int,histodataslices[i][3].split())
fccids = map(int,histodataslices[i][5].split())
hcpids = map(int,histodataslices[i][7].split())
udfids = map(int,histodataslices[i][9].split())
surids = map(int,histodataslices[i][11].split())
nucsize = len(bccids)+len(fccids)+len(hcpids)+len(udfids)
#print fccids
#print nucsize
#check if the guy should be part of histo, and which histo
if (nucsize <= tstcluster+3) and (nucsize >= tstcluster-3):
if snapshots>maxconfs:
break
snapshots+=1
print "value found"
tmpfile = os.path.join(os.getcwd(),identifier+'.temp')
outfile = open(tmpfile,'w')
for j in range(len(data[i])):
outfile.write(data[i][j])
outfile.flush()
outfile.close()
print "applying op"
#apply order parameter and read histo stuff
cmd = [binary,tmpfile]
proc = sub.Popen(cmd, stdin=sub.PIPE,stdout=sub.PIPE,stderr=sub.PIPE)
out,err = proc.communicate()
proc.wait()
print "reading the atoms"
#read the slice
#modify read alles to read in the q4 q6 too.- done
atoms = read_alles(tmpfile)
os.system(('rm %s')% tmpfile)
print "setting up seed"
#set up the seed classes
seed = Seed(seedfileaddress)
seed.ReadSeed()
seed.PopulateSeed(atoms,read=False)
#delete the seed particles from the lists
bccids = [x for x in bccids if x not in seed.seedids]
fccids = [x for x in fccids if x not in seed.seedids]
hcpids = [x for x in hcpids if x not in seed.seedids]
udfids = [x for x in udfids if x not in seed.seedids]
#set up surface class
surface = Seed('dummy')
surface.ReadSeed(read=False,idlist=surids)
if surface.exists:
surface.PopulateSeed(atoms,read=False)
#find udf ids in surface
#udfsurids = [x for x in udfids if x in surids]
udfsurids = [x for x in hcpids if x in surids]
#udfcoreids = [x for x in udfids if x not in surids]
udfcoreids = [x for x in hcpids if x not in surids]
print "populating seeds"
#set up UDF class
udfsur = Seed('dummy')
udfsur.ReadSeed(read=False,idlist=udfsurids)
if udfsur.exists:
udfsur.PopulateSeed(atoms,read=False)
udfcore = Seed('dummy')
udfcore.ReadSeed(read=False,idlist=udfcoreids)
if udfcore.exists:
udfcore.PopulateSeed(atoms,read=False)
print "reading q4q6files"
qlist = []
for line in open('result1.dat','r'):
line = line.strip()
raw = line.split()
dummy = [int(raw[0]),float(raw[1]),float(raw[2])]
qlist.append(dummy)
print "trimming q4q6files"
qlistcore = [ pos for pos in qlist if pos[0] in udfcoreids ]
print "assingning pos values to atoms"
for atomito in udfcore.atoms:
for pos in qlistcore:
if atomito[0]==pos[0]:
atomito[5]=pos[1]
atomito[6]=pos[2]
break
print "calculating distances"
#seeds are populated. Now find distance of each atom to the surface.
udfcore.CalculateDistances(surface)
print "making distance lists"
#now add the points to the arrays.
for atomcito in udfcore.atoms:
if atomcito[4]<=1.0:
distance1.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=2.0:
distance2.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=3.0:
distance3.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=4.0:
distance4.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=5.0:
distance5.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=6.0:
distance6.append([atomcito[5],atomcito[6]])
elif atomcito[4]<=7.0:
distance7.append([atomcito[5],atomcito[6]])
else:
print "jsjsjsj"
alle.append([atomcito[5],atomcito[6]])
print "finished slice"
print snapshots
#write out the files
print "writing distance lists"
fout = open('distance1.dat','w')
for i in range(len(distance1)):
fout.write(("%f %f\n")%(distance1[i][0],distance1[i][1]))
fout.close()
fout = open('distance2.dat','w')
for i in range(len(distance2)):
fout.write(("%f %f\n")%(distance2[i][0],distance2[i][1]))
fout.close()
fout = open('distance3.dat','w')
for i in range(len(distance3)):
fout.write(("%f %f\n")%(distance3[i][0],distance3[i][1]))
fout.close()
fout = open('distance4.dat','w')
for i in range(len(distance4)):
fout.write(("%f %f\n")%(distance4[i][0],distance4[i][1]))
fout.close()
fout = open('distance5.dat','w')
for i in range(len(distance5)):
fout.write(("%f %f\n")%(distance5[i][0],distance5[i][1]))
fout.close()
fout = open('distance6.dat','w')
for i in range(len(distance6)):
fout.write(("%f %f\n")%(distance6[i][0],distance6[i][1]))
fout.close()
fout = open('distance7.dat','w')
for i in range(len(distance7)):
fout.write(("%f %f\n")%(distance7[i][0],distance7[i][1]))
fout.close()
fout = open('alle.dat','w')
for i in range(len(alle)):
fout.write(("%f %f\n")%(alle[i][0],alle[i][1]))
fout.close()
print "finishing up"
if __name__=='__main__':
MakeStructureHistogram('AB',manual=False,gzip=True)
| gpl-3.0 | 3,505,470,412,904,452,600 | 33.579082 | 115 | 0.507562 | false |
galbiati/video-representations | models/ATWModel.py | 1 | 3142 | import tensorflow as tf
from tensorflow.python.framework import ops
from model import Model
class ATWModel(Model):
"""
ATWModel implements a variant on the E-T-D model from model.Model()
Instead of doing next frame prediction, ATW attempts to encode the entire
sequence, then reproduce the video from only the final latent vectors.
__init__ args:
:encoder is a function that returns a batch of image encodings (rank 2 tensor)
:cell is a recurrent neural network cell that can be passed to tf.nn.rnn_cell.dynamic_rnn
:decoder is a function that returns a batch of decoded images (rank 4 tensor)
:latent_size is the size of the latent space
:activation is the activation function for the LSTM cells
:batchsize is the size of batches (necessary for proper reshaping)
:seqlen is the length of sequences (necessary for proper reshaping)
"""
def __init__(self, encoder, cell, decoder,
latent_size, activation,
batchsize, seqlen):
self.latent_size = latent_size
self.encoder = lambda inputs: encoder(inputs, latent_size=latent_size)
self.cell_fw = cell(num_units=latent_size, activation=activation)
self.cell_bw = cell(num_units=latent_size, activation=activation)
self.decoder = decoder
self.batchsize = batchsize
self.seqlen = seqlen
self.stacked_shape = (batchsize*seqlen, 60, 80, 3)
self.sequence_shape = (batchsize, seqlen, 60, 80, 3)
def build(self, inputs, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
inputs = self.stack(inputs)
encoded = self.encoder(inputs)
encoded = self.unstack(encoded)
with tf.variable_scope('lstm', reuse=reuse):
# initialize hidden state with ones instead of zeros to ensure pass-through at start
initial_state = tfrnn.LSTMStateTuple(
tf.ones((self.batchsize, self.latent_size)),
tf.zeros((self.batchsize, self.latent_size))
)
# encoder pass
_, seeds = tf.nn.dynamic_rnn(
self.cell_fw, encoded,
initial_state=initial_state,
sequence_length=[self.seqlen]*self.batchsize,
dtype=tf.float32, swap_memory=True,
)
# decoder pass
def rnn_step(next_tuple, next_elem):
input, state = next_tuple
output, next_state = self.cell_fw(input, state)
return (output, next_state)
state = seeds
next_input = state[1]
elems = np.arange(self.seqlen)
outputs, states = tf.scan(
rnn_step, elems, (next_input, state),
swap_memory=True
)
transitioned = tf.transpose(outputs, (1, 0, 2))
transitioned_ = self.stack(transitioned)
with tf.variable_scope('encoder', reuse=True):
decoded = self.decoder(transitioned_)
decoded = self.unstack(decoded)
return encoded, transitioned, decoded
| mit | -998,173,920,038,844,300 | 37.790123 | 96 | 0.610121 | false |
streampref/wcimport | tool/query/endseq.py | 1 | 5096 | # -*- coding: utf-8 -*-
'''
Queries for experiments with ENDSEQ operator
'''
import os
from tool.attributes import get_move_attribute_list, get_place_attribute_list
from tool.experiment import SLI, RAN, ALGORITHM, \
CQL_ALG, QUERY, Q_MOVE, Q_PLACE
from tool.io import get_query_dir, write_to_txt, get_out_file, get_env_file
from tool.query.stream import get_register_stream, REG_Q_OUTPUT_STR, REG_Q_STR
# =============================================================================
# Query using ENDSEQ operator
# =============================================================================
ENDSEQ_QUERY = '''
SELECT SUBSEQUENCE END POSITION
FROM SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s;
'''
# =============================================================================
# CQL Queries
# =============================================================================
# Query to get sequence from stream
CQL_Z = '''
SELECT SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND]
FROM s;
'''
# Query equivalent to ENDSEQ operator
CQL_EQUIV = '''
SELECT _pos - {ran} + 1 AS _pos, {att} FROM z WHERE _pos >= {ran}
'''
def gen_endseq_query(configuration, experiment_conf):
'''
Generate ENDSEQ query
'''
query_dir = get_query_dir(configuration, experiment_conf)
filename = query_dir + os.sep + 'endseq.cql'
query = ENDSEQ_QUERY.format(ran=experiment_conf[RAN],
sli=experiment_conf[SLI])
write_to_txt(filename, query)
def gen_cql_z_query(query_dir, experiment_conf):
'''
Consider RANGE and SLIDE and generate Z relation
'''
query = CQL_Z.format(ran=experiment_conf[RAN],
sli=experiment_conf[SLI])
filename = query_dir + os.sep + 'z.cql'
write_to_txt(filename, query)
def gen_cql_final_query(query_dir, experiment_conf):
'''
Generate final query CQL query
'''
filename = query_dir + os.sep + 'equiv.cql'
if os.path.isfile(filename):
return
range_value = experiment_conf[RAN]
if experiment_conf[QUERY] == Q_MOVE:
att_list = get_move_attribute_list('z.')
elif experiment_conf[QUERY] == Q_PLACE:
att_list = get_place_attribute_list('z.')
att_str = ', '.join(att_list)
pos_query_list = []
for position in range(1, range_value + 1):
pos_query = CQL_EQUIV.format(att=att_str, ran=position)
pos_query_list.append(pos_query)
query = '\nUNION\n'.join(pos_query_list) + ';'
out_file = open(filename, 'w')
out_file.write(query)
out_file.close()
def gen_cql_queries(configuration, experiment_conf):
'''
Generate CQL queries
'''
query_dir = get_query_dir(configuration, experiment_conf)
gen_cql_z_query(query_dir, experiment_conf)
gen_cql_final_query(query_dir, experiment_conf)
def gen_all_queries(configuration, experiment_list):
'''
Generate all queries
'''
for exp_conf in experiment_list:
if exp_conf[ALGORITHM] == CQL_ALG:
gen_cql_queries(configuration, exp_conf)
else:
gen_endseq_query(configuration, exp_conf)
def gen_endseq_env(configuration, experiment_conf, output):
'''
Generate environment for ENDSEQ
'''
text = get_register_stream(experiment_conf)
# Get query filename
query_dir = get_query_dir(configuration, experiment_conf)
filename = query_dir + os.sep + 'endseq.cql'
# Register query
if output:
# Get output filename
out_file = get_out_file(configuration, experiment_conf)
text += REG_Q_OUTPUT_STR.format(qname='endseq', qfile=filename,
ofile=out_file)
else:
text += REG_Q_STR.format(qname='endseq', qfile=filename)
# Get environment filename
filename = get_env_file(configuration, experiment_conf)
write_to_txt(filename, text)
def gen_cql_env(configuration, experiment_conf, output):
'''
Generate environment for CQL
'''
text = get_register_stream(experiment_conf)
query_dir = get_query_dir(configuration, experiment_conf)
# Environment files for equivalent CQL queries
filename = query_dir + os.sep + 'z.cql'
text += REG_Q_STR.format(qname='z', qfile=filename)
# Final equivalent query
filename = query_dir + os.sep + 'equiv.cql'
if output:
# Get output filename
out_file = get_out_file(configuration, experiment_conf)
text += REG_Q_OUTPUT_STR.format(qname='equiv', qfile=filename,
ofile=out_file)
else:
text += REG_Q_STR.format(qname='equiv', qfile=filename)
filename = get_env_file(configuration, experiment_conf)
write_to_txt(filename, text)
def gen_all_env(configuration, experiment_list, output=False):
'''
Generate all environments
'''
for exp_conf in experiment_list:
if exp_conf[ALGORITHM] == CQL_ALG:
gen_cql_env(configuration, exp_conf, output)
else:
gen_endseq_env(configuration, exp_conf, output)
| gpl-3.0 | 4,162,606,464,994,467 | 31.877419 | 79 | 0.601256 | false |
libsh-archive/sh | test/regress/ceil.cpp.py | 1 | 1153 | #!/usr/bin/python
import shtest, sys, common
from common import *
from math import *
def ceil_test(p, types=[]):
if is_array(p):
result = [ceil(a) for a in p]
else:
result = [ceil(p)]
return shtest.make_test(result, [p], types)
def insert_into(test):
test.add_test(ceil_test((3.0, 3.1, 3.5, 3.9)))
test.add_test(ceil_test((-3.0, -3.1, -3.5, -3.9)))
test.add_test(ceil_test(-0.75))
test.add_test(ceil_test(0.75))
test.add_test(ceil_test(0.2))
test.add_test(ceil_test(-0.2))
test.add_test(ceil_test((-0.0, 0.0)))
test.add_test(ceil_test((-12345.6787, 12324.4838)))
test.add_test(ceil_test((1234567890123456789.0, )))
test.add_test(ceil_test((-1234567890123456789.0, )))
# Test ceil in stream programs
test = shtest.StreamTest('ceil', 1)
test.add_call(shtest.Call(shtest.Call.call, 'ceil', 1))
insert_into(test)
test.output_header(sys.stdout)
test.output(sys.stdout, False)
# Test ceil in immediate mode
test = shtest.ImmediateTest('ceil_im', 1)
test.add_call(shtest.Call(shtest.Call.call, 'ceil', 1))
insert_into(test)
test.output(sys.stdout, False)
test.output_footer(sys.stdout)
| lgpl-2.1 | -1,104,598,066,792,645,400 | 28.564103 | 56 | 0.657415 | false |
jhermann/rituals | src/rituals/util/antglob.py | 1 | 6871 | # -*- coding: utf-8 -*-
# pylint: disable=too-few-public-methods
""" Recursive globbing with ant-style syntax.
"""
#
# The MIT License (MIT)
#
# Original source (2014-02-17) from https://github.com/zacherates/fileset.py
# Copyright (c) 2012 Aaron Maenpaa
#
# Modifications at https://github.com/jhermann/rituals
# Copyright ⓒ 2015 Jürgen Hermann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
from ._compat import string_types
# TODO: allow '?'
# TODO: matching for Windows? (need to canonize to forward slashes in 'root')
__all__ = ['FileSet', 'includes', 'excludes']
def glob2re(part):
"""Convert a path part to regex syntax."""
return "[^/]*".join(
re.escape(bit).replace(r'\[\^', '[^').replace(r'\[', '[').replace(r'\]', ']')
for bit in part.split("*")
)
def parse_glob(pattern):
"""Generate parts of regex transformed from glob pattern."""
if not pattern:
return
bits = pattern.split("/")
dirs, filename = bits[:-1], bits[-1]
for dirname in dirs:
if dirname == "**":
yield "(|.+/)"
else:
yield glob2re(dirname) + "/"
yield glob2re(filename)
def compile_glob(spec):
"""Convert the given glob `spec` to a compiled regex."""
parsed = "".join(parse_glob(spec))
regex = "^{0}$".format(parsed)
return re.compile(regex)
class Pattern():
"""A single pattern for either inclusion or exclusion."""
def __init__(self, spec, inclusive):
"""Create regex-based pattern matcher from glob `spec`."""
self.compiled = compile_glob(spec.rstrip('/'))
self.inclusive = inclusive
self.is_dir = spec.endswith('/')
def __str__(self):
"""Return inclusiveness indicator and original glob pattern."""
return ('+' if self.inclusive else '-') + self.compiled.pattern
def matches(self, path):
"""Check this pattern against given `path`."""
return bool(self.compiled.match(path))
class FileSet():
""" Ant-style file and directory matching.
Produces an iterator of all of the files that match the provided patterns.
Note that directory matches must end with a slash, and if they're exclusions,
they won't be scanned (which prunes anything in that directory that would
otherwise match).
Directory specifiers:
** matches zero or more directories.
/ path separator.
File specifiers:
* glob style wildcard.
[chars] inclusive character sets.
[^chars] exclusive character sets.
Examples:
**/*.py recursively match all python files.
foo/**/*.py recursively match all python files in the 'foo' directory.
*.py match all the python files in the current directory.
*/*.txt match all the text files in top-level directories.
foo/**/* all files under directory 'foo'.
*/ top-level directories.
foo/ the directory 'foo' itself.
**/foo/ any directory named 'foo'.
**/.* hidden files.
**/.*/ hidden directories.
"""
def __init__(self, root, patterns):
if isinstance(patterns, string_types):
patterns = [patterns]
self.root = root
self.patterns = [i if hasattr(i, 'inclusive') else includes(i) for i in patterns]
def __repr__(self):
return "<FileSet at {0} {1}>".format(repr(self.root), ' '.join(str(i) for i in self. patterns))
def included(self, path, is_dir=False):
"""Check patterns in order, last match that includes or excludes `path` wins. Return `None` on undecided."""
inclusive = None
for pattern in self.patterns:
if pattern.is_dir == is_dir and pattern.matches(path):
inclusive = pattern.inclusive
#print('+++' if inclusive else '---', path, pattern)
return inclusive
def __iter__(self):
for path in self.walk():
yield path
def __or__(self, other):
return set(self) | set(other)
def __ror__(self, other):
return self | other
def __and__(self, other):
return set(self) & set(other)
def __rand__(self, other):
return self & other
def walk(self, **kwargs):
""" Like `os.walk` and taking the same keyword arguments,
but generating paths relative to the root.
Starts in the fileset's root and filters based on its patterns.
If ``with_root=True`` is passed in, the generated paths include
the root path.
"""
lead = ''
if 'with_root' in kwargs and kwargs.pop('with_root'):
lead = self.root.rstrip(os.sep) + os.sep
for base, dirs, files in os.walk(self.root, **kwargs):
prefix = base[len(self.root):].lstrip(os.sep)
bits = prefix.split(os.sep) if prefix else []
for dirname in dirs[:]:
path = '/'.join(bits + [dirname])
inclusive = self.included(path, is_dir=True)
if inclusive:
yield lead + path + '/'
elif inclusive is False:
dirs.remove(dirname)
for filename in files:
path = '/'.join(bits + [filename])
if self.included(path):
yield lead + path
def includes(pattern):
"""A single inclusive glob pattern."""
return Pattern(pattern, inclusive=True)
def excludes(pattern):
"""A single exclusive glob pattern."""
return Pattern(pattern, inclusive=False)
| gpl-2.0 | 4,840,534,147,977,032,000 | 33.862944 | 116 | 0.600903 | false |
DigiThinkIT/stem | test/unit/version.py | 1 | 8982 | """
Unit tests for the stem.version.Version parsing and class.
"""
import unittest
import stem.util.system
import stem.version
from stem.version import Version
try:
# added in python 3.3
from unittest.mock import patch
except ImportError:
from mock import patch
TOR_VERSION_OUTPUT = """Mar 22 23:09:37.088 [notice] Tor v0.2.2.35 \
(git-73ff13ab3cc9570d). This is experimental software. Do not rely on it for \
strong anonymity. (Running on Linux i686)
Tor version 0.2.2.35 (git-73ff13ab3cc9570d)."""
class TestVersion(unittest.TestCase):
@patch('stem.util.system.call')
@patch.dict(stem.version.VERSION_CACHE)
def test_get_system_tor_version(self, call_mock):
call_mock.return_value = TOR_VERSION_OUTPUT.splitlines()
version = stem.version.get_system_tor_version()
self.assert_versions_match(version, 0, 2, 2, 35, None, 'git-73ff13ab3cc9570d')
self.assertEqual('73ff13ab3cc9570d', version.git_commit)
call_mock.assert_called_once_with('tor --version')
self.assertEqual({'tor': version}, stem.version.VERSION_CACHE)
def test_parsing(self):
"""
Tests parsing by the Version class constructor.
"""
# valid versions with various number of compontents to the version
version = Version('0.1.2.3-tag')
self.assert_versions_match(version, 0, 1, 2, 3, 'tag', None)
version = Version('0.1.2.3')
self.assert_versions_match(version, 0, 1, 2, 3, None, None)
version = Version('0.1.2-tag')
self.assert_versions_match(version, 0, 1, 2, None, 'tag', None)
version = Version('0.1.2')
self.assert_versions_match(version, 0, 1, 2, None, None, None)
# checks an empty tag
version = Version('0.1.2.3-')
self.assert_versions_match(version, 0, 1, 2, 3, '', None)
version = Version('0.1.2-')
self.assert_versions_match(version, 0, 1, 2, None, '', None)
# check with extra informaton
version = Version('0.1.2.3-tag (git-73ff13ab3cc9570d)')
self.assert_versions_match(version, 0, 1, 2, 3, 'tag', 'git-73ff13ab3cc9570d')
self.assertEqual('73ff13ab3cc9570d', version.git_commit)
version = Version('0.1.2.3-tag ()')
self.assert_versions_match(version, 0, 1, 2, 3, 'tag', '')
version = Version('0.1.2 (git-73ff13ab3cc9570d)')
self.assert_versions_match(version, 0, 1, 2, None, None, 'git-73ff13ab3cc9570d')
# checks invalid version strings
self.assertRaises(ValueError, stem.version.Version, '')
self.assertRaises(ValueError, stem.version.Version, '1.2.3.4nodash')
self.assertRaises(ValueError, stem.version.Version, '1.2.3.a')
self.assertRaises(ValueError, stem.version.Version, '1.2.a.4')
self.assertRaises(ValueError, stem.version.Version, '1x2x3x4')
self.assertRaises(ValueError, stem.version.Version, '12.3')
self.assertRaises(ValueError, stem.version.Version, '1.-2.3')
def test_comparison(self):
"""
Tests comparision between Version instances.
"""
# check for basic incrementing in each portion
self.assert_version_is_greater('1.1.2.3-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.2.2.3-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.1.3.3-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.1.2.4-tag', '0.1.2.3-tag')
self.assert_version_is_greater('0.1.2.3-ugg', '0.1.2.3-tag')
self.assert_version_is_equal('0.1.2.3-tag', '0.1.2.3-tag')
# check with common tags
self.assert_version_is_greater('0.1.2.3-beta', '0.1.2.3-alpha')
self.assert_version_is_greater('0.1.2.3-rc', '0.1.2.3-beta')
# checks that a missing patch level equals zero
self.assert_version_is_equal('0.1.2', '0.1.2.0')
self.assert_version_is_equal('0.1.2-tag', '0.1.2.0-tag')
# checks for missing patch or status
self.assert_version_is_greater('0.1.2.3-tag', '0.1.2.3')
self.assert_version_is_greater('0.1.2.3-tag', '0.1.2-tag')
self.assert_version_is_greater('0.1.2.3-tag', '0.1.2')
self.assert_version_is_equal('0.1.2.3', '0.1.2.3')
self.assert_version_is_equal('0.1.2', '0.1.2')
def test_nonversion_comparison(self):
"""
Checks that we can be compared with other types.
In python 3 on only equality comparisons work, greater than and less than
comparisons result in a TypeError.
"""
test_version = Version('0.1.2.3')
self.assertNotEqual(test_version, None)
self.assertNotEqual(test_version, 5)
def test_string(self):
"""
Tests the Version -> string conversion.
"""
# checks conversion with various numbers of arguments
self.assert_string_matches('0.1.2.3-tag')
self.assert_string_matches('0.1.2.3')
self.assert_string_matches('0.1.2')
def test_requirements_greater_than(self):
"""
Checks a VersionRequirements with a single greater_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version('0.2.2.36'))
self.assertTrue(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.37') >= requirements)
self.assertTrue(Version('0.2.3.36') >= requirements)
self.assertFalse(Version('0.2.2.35') >= requirements)
self.assertFalse(Version('0.2.1.38') >= requirements)
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version('0.2.2.36'), False)
self.assertFalse(Version('0.2.2.35') >= requirements)
self.assertFalse(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.37') >= requirements)
def test_requirements_less_than(self):
"""
Checks a VersionRequirements with a single less_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.less_than(Version('0.2.2.36'))
self.assertTrue(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.35') >= requirements)
self.assertTrue(Version('0.2.1.38') >= requirements)
self.assertFalse(Version('0.2.2.37') >= requirements)
self.assertFalse(Version('0.2.3.36') >= requirements)
requirements = stem.version._VersionRequirements()
requirements.less_than(Version('0.2.2.36'), False)
self.assertFalse(Version('0.2.2.37') >= requirements)
self.assertFalse(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.35') >= requirements)
def test_requirements_in_range(self):
"""
Checks a VersionRequirements with a single in_range rule.
"""
requirements = stem.version._VersionRequirements()
requirements.in_range(Version('0.2.2.36'), Version('0.2.2.38'))
self.assertFalse(Version('0.2.2.35') >= requirements)
self.assertTrue(Version('0.2.2.36') >= requirements)
self.assertTrue(Version('0.2.2.37') >= requirements)
self.assertFalse(Version('0.2.2.38') >= requirements)
# rule for 'anything in the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.in_range(Version('0.2.2.0'), Version('0.2.3.0'))
for index in xrange(0, 100):
self.assertTrue(Version('0.2.2.%i' % index) >= requirements)
def test_requirements_multiple_rules(self):
"""
Checks a VersionRequirements is the logical 'or' when it has multiple rules.
"""
# rule to say 'anything but the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version('0.2.3.0'))
requirements.less_than(Version('0.2.2.0'), False)
self.assertTrue(Version('0.2.3.0') >= requirements)
self.assertFalse(Version('0.2.2.0') >= requirements)
for index in xrange(0, 100):
self.assertFalse(Version('0.2.2.%i' % index) >= requirements)
def assert_versions_match(self, version, major, minor, micro, patch, status, extra):
"""
Asserts that the values for a types.Version instance match the given
values.
"""
self.assertEqual(major, version.major)
self.assertEqual(minor, version.minor)
self.assertEqual(micro, version.micro)
self.assertEqual(patch, version.patch)
self.assertEqual(status, version.status)
self.assertEqual(extra, version.extra)
if extra is None:
self.assertEqual(None, version.git_commit)
def assert_version_is_greater(self, first_version, second_version):
"""
Asserts that the parsed version of the first version is greate than the
second (also checking the inverse).
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1 > version2, True)
self.assertEqual(version1 < version2, False)
def assert_version_is_equal(self, first_version, second_version):
"""
Asserts that the parsed version of the first version equals the second.
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1, version2)
def assert_string_matches(self, version):
"""
Parses the given version string then checks that its string representation
matches the input.
"""
self.assertEqual(version, str(Version(version)))
| lgpl-3.0 | -1,618,298,573,717,635,800 | 34.223529 | 86 | 0.676353 | false |
cadrev/Titanic-Prediction | data-munging.py | 1 | 3412 | #
# Title : Data munging(AKA cleaning) the Titanic Data
# Author : Felan Carlo Garcia
#
# Notes:
# -- Code is based on the Kaggle Python Tutorial
# -- data cleaning prior to implementing a machine learning algorithm.
import numpy as np
import pandas as pd
def processdata(filename, outputname):
df = pd.read_csv(filename,header=0)
# Make a new column 'Gender' and EmbarkedNum to convert the string
# information into an integer value.
# We do this because general machine learning algorithms do not
# work on string values.
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
df['EmbarkedNum'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 1}).astype(int)
# Executing the code:
# --print df[df['Age'].isnull()][Sex']-- shows that the titanic data contains
# some null values of the ages of the passengers.
# In this case, we can either drop the row or we can assign an arbitrary
# value to fill the missing data.
# For this code, arbitrary age data is obtained by using the median
# age data of the passengers. We make a new column 'AgeFill' and place
# the median data on the missing values instead of directly modifying
# the 'Age' column
df['AgeFill'] = df['Age']
for i in range(0, 2):
for j in range(0, 3):
median = df[(df['Gender'] == i) & (df['Pclass'] == j+1)]['Age'].dropna().median()
df.loc[ (df.Age.isnull()) & (df.Gender == i) & (df.Pclass == j+1),'AgeFill'] = median
# We add a new column 'AgeIsNull' to know which records has a missing
# values previously.
# We then interpolate the missing values from the 'Fare' column.
df['AgeIsNull'] = pd.isnull(df.Age).astype(int)
df['Fare'] = df['Fare'].interpolate()
# ------------- Feature Engineering Part --------------------
# Feature Engineering is the process of using domain/expert
# knowledge of the data to create features that make machine
# learning algorithms work better.
#
# In this case, studying the data shows that women and children
# have higher survival rates compared to men. Thus we add
# two additional features: 'Female' and 'Children', in an attempt
# to assist our learning model in its prediction.
# At the same time we add features Age*Class and FamilySize
# as additional engineered feature that may help our learning
# model
df['Children'] = df['AgeFill'].map(lambda x: 1 if x < 6.0 else 0)
df['Female'] = df['Gender'].map(lambda x: 1 if x == 0 else 0)
df['FamilySize'] = df['SibSp'] + df['Parch']
df['Age*Class'] = df['AgeFill'] * df['Pclass']
# Since most machine learning algorithms don't work on strings,
# we drop the columns in our pandas dataframe containing object
# datatypes.
# The code:
# --print df.dtypes[df.dtypes.map(lambda x: x=='object')]--
# will show which columns are made of object datatypes.
#
# In this case these are the following columns containing
# object.string:
# Age, Name, Sex, Ticket, Cabin, Embarked, Fare
#
# We drop the following objects columns along with the other data
# since they wont likely contribute to our machine learning
# prediction
df = df.drop(['Age','Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
df.to_csv(outputname, sep=',', index=False)
return df
def main():
print processdata('titanic-data-shuffled.csv', 'final-data.csv')
if __name__ == '__main__':
main()
| mit | 6,481,765,875,969,964,000 | 36.911111 | 91 | 0.663247 | false |
bfirsh/docker-py | setup.py | 1 | 2327 | #!/usr/bin/env python
import codecs
import os
import sys
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.5.2, != 2.11.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.2.1'
]
if sys.platform == 'win32':
requirements.append('pypiwin32 >= 219')
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
# While not imported explicitly, the ipaddress module is required for
# ssl_match_hostname to verify hosts match with certificates via
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
}
version = None
exec(open('docker/version.py').read())
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
long_description = ''
try:
with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
# setup.py from working properly.
pass
setup(
name="docker",
version=version,
description="A Python library for the Docker Engine API.",
long_description=long_description,
url='https://github.com/docker/docker-py',
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
maintainer='Joffrey F',
maintainer_email='[email protected]',
)
| apache-2.0 | -7,816,205,614,684,412,000 | 30.026667 | 78 | 0.646755 | false |
NERC-CEH/ecomaps | ecomaps/controllers/wmsviz.py | 1 | 9385 | # Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
#
"""
Controller for the 'View' tab - allowing the display of WMC map layers
@author C Byrom Feb 08, Modified D Lowe, May 09
"""
import logging
import urllib2
import urlparse
from cStringIO import StringIO
import xml.sax.saxutils as saxutils
#from ows_server.models import Utilities
from paste.httpexceptions import HTTPNotFound
from paste.request import parse_querystring
import ecomaps.lib.utils as utils
# ecomaps imports
from ecomaps.model import selectedItem
from ecomaps.lib.base import BaseController, response, config, request, c, session, render, abort
from ecomaps.lib.base import app_globals as g
from ecomaps.lib.wmc_util import GetWebMapContext, GetWebMapCapabilities, GetLegend, GetLegendUrl, GetFeatureInfo, openURL, GetResponse, parseEndpointString, getQueryParameter
from ecomaps.lib.build_figure import build_figure
from ecomaps.lib.status_builder import StatusBuilder
from ecomaps.lib.base import request
log = logging.getLogger(__name__)
class WmsvizController(BaseController):
_pilImageFormats = {
'image/png': 'PNG',
'image/jpg': 'JPEG',
'image/jpeg': 'JPEG',
'image/gif': 'GIF',
'image/tiff': 'TIFF'
}
indexTemplate = 'wmsviz.html'
def index(self):
"""
Default controller method to handle the initial requests to the page
"""
log.debug('entered wmsviz controller index action')
return HTTPNotFound
g.helpIcon='layout/icons/help.png' #needs to go in config
self.inputs=dict(parse_querystring(request.environ))
log.info(self.inputs)
c.wmcURL = ""
# check if all we're doing is removing a view item
if 'removeItem' in self.inputs:
return self.removeViewItem(self.inputs['removeItem'])
# check if we're doing an AJAX callback to get some WMC data
if 'REQUEST' in self.inputs:
if self.inputs['REQUEST'] == 'GetWebMapContext':
wmc= GetWebMapContext(self)
log.debug("finished wmsviz controller index action, req = GetWebMapContext")
return wmc
if self.inputs['REQUEST'] == 'GetWebMapCapabilities':
wmcDoc = GetWebMapCapabilities(self.inputs['ENDPOINT'])
response.headers['Content-Type'] = 'text/xml'
log.debug("finished wmsviz controller index action, req = GetWebMapCapabilities")
return wmcDoc
elif self.inputs['REQUEST'] == 'GetLegend':
resp = GetLegend(self)
log.debug("finished wmsviz controller index action, req = GetLegend")
return resp
elif self.inputs['REQUEST'] == 'GetLegendUrl':
resp = GetLegendUrl(self)
log.debug("finished wmsviz controller index action, req = GetLegendUrl")
return resp
if self.inputs['REQUEST'] == 'GetDisplayOptions':
jsonTxt = GetResponse(self.inputs['URL'])
response.headers['Content-Type'] = 'application/json'
log.debug("finished wmsviz controller index action, req = GetDisplayOptions")
return jsonTxt
if self.inputs['REQUEST'] == 'GetAxisConfig':
respText = GetResponse(self.inputs['URL'])
response.headers['Content-Type'] = 'text/xml'
return respText
if self.inputs['REQUEST'] == 'proxy':
# Client is requesting to use server as a proxy. Only forward the request if the
# request parameter value is for an acceptable request type.
url = self.inputs['URL']
requestType = getQueryParameter(url, 'request')
if requestType.lower() == 'getfeatureinfo':
try:
info = GetFeatureInfo(url)
except Exception, exc:
log.info("REQUEST:proxy Error making request to %s: %s" % (self.inputs['URL'], exc.__str__()))
info = "<p>Information is not available for this layer or position.</p>"
log.debug("finished wmsviz controller index action, req = GetFeatureInfo")
return "<FeatureInfo>" + saxutils.escape(info) + "</FeatureInfo>"
else:
log.info("Proxy forwarding refused for request of type %s to URL %s" % (requestType, url))
return None
#get server information from config file
g.server=config['app_conf']['serverurl']
statusBuilder = StatusBuilder()
status = statusBuilder.getCurrentStatus('wmsviz')
initialSetup = self._buildInitialSetup(self.inputs.get('ENDPOINT'))
session.save()
log.info('SAVED SESSION')
c.initialSetupJSON = utils.toJSON(initialSetup)
c.initialStatus = utils.toJSON(status)
log.debug("request.params = %s" % (request.params,))
log.debug("request.headers = %s" % (request.headers,))
log.debug("finished wmsviz controller index action")
return render(self.indexTemplate)
def _buildInitialSetup(self, endpointParam):
initialSetup = []
if endpointParam != None:
for ep in self.inputs['ENDPOINT'].split(','):
endpoint = {}
o = urlparse.urlparse(ep)
if o.path.find(':') > 0:
path = o.path[:o.path.find(':')]
url = "%(scheme)s://%(hostname)s%(port)s%(path)s" % {
'scheme' : o.scheme if o.scheme != None else '',
'hostname' : o.hostname if o.hostname != None else '',
'port' : ':' + str(o.port) if o.port != None else '',
'path': path,
}
layers = o.path[o.path.find(':')+1:].split('|')
endpoint['layers'] = layers
else:
url = ep
layers = ""
endpoint['url'] = url
initialSetup.append(endpoint)
return initialSetup
def addViewItem(self,endpoint):
"""
Add a selected item to the session
- if this is the first item, then display the selections tab
@param endpoint: WMC endpoint
"""
item = selectedItem.SelectedItem(None, None, None, endpoint)
selections = [item,]
# avoid duplicates
if 'viewItems' in session:
for selection in session['viewItems']:
if selection.wmcURL != endpoint:
selections.append(selection)
session['viewItems'] = selections
session.save()
def removeViewItem(self,endpoint):
"""
Remove view item from session data
- NB, do this by rebuilding the session data without the input data included
@param endpoint: The WMC endpoint of the view item to remove
"""
selections = []
for selection in session['viewItems']:
if selection.wmcURL != endpoint:
selections.append(selection)
# if the new list is empty, remove the session variable
if len(selections) == 0:
del session['viewItems']
c.UpdatePageTabs=1
else:
session['viewItems'] = selections
session.save()
def removeAllViewItems(self):
"""
Remove all old view items - clears out old endpoints
"""
session['viewItems']=[]
session.save()
def get_figure(self):
log.debug("running wmsvis.get_figure")
# Use .copy() on params to get a *writeable* MultiDict instance
params = request.params.copy()
log.debug("params = %s" % (params,))
# The response headers must be strings, not unicode, for modwsgi -
# ensure that the format is a string, omitting any non-ASCII
# characters.
format = params.pop('figFormat', 'image/png')
formatStr = format.encode('ascii', 'ignore')
finalImage = build_figure(params)
buffer = StringIO()
finalImage.save(buffer, self._pilImageFormats[formatStr])
response.headers['Content-Type'] = formatStr
# Remove headers that prevent browser caching, otherwise IE will not
# allow the image to be saved in its original format.
if 'Cache-Control' in response.headers:
del response.headers['Cache-Control']
if 'Pragma' in response.headers:
del response.headers['Pragma']
return buffer.getvalue()
| gpl-2.0 | 131,777,740,538,252,640 | 36.242063 | 175 | 0.558977 | false |
EtienneCmb/tensorpac | tensorpac/methods/meth_erpac.py | 1 | 5370 | """Individual methods for assessing ERPAC."""
import numpy as np
from scipy.stats import chi2
from joblib import Parallel, delayed
from tensorpac.gcmi import nd_mi_gg
from tensorpac.config import CONFIG
def pearson(x, y, st='i...j, k...j->ik...'):
"""Pearson correlation for multi-dimensional arrays.
Parameters
----------
x, y : array_like
Compute pearson correlation between the multi-dimensional arrays
x and y.
st : string | 'i..j, k..j->ik...'
The string to pass to the np.einsum function.
Returns
-------
cov: array_like
The pearson correlation array.
"""
n = x.shape[-1]
# Distribution center :
mu_x = x.mean(-1, keepdims=True)
mu_y = y.mean(-1, keepdims=True)
# Distribution deviation :
s_x = x.std(-1, ddof=n - 1, keepdims=True)
s_y = y.std(-1, ddof=n - 1, keepdims=True)
# Compute correlation coefficient :
cov = np.einsum(st, x, y)
mu_xy = np.einsum(st, mu_x, mu_y)
cov -= n * mu_xy
cov /= np.einsum(st, s_x, s_y)
return cov
def erpac(pha, amp):
"""Event-Related Phase Amplitude Coupling.
This function computed the correlation coefficient between a circular and a
linear random variable at each time point and across trials. Adapted from
the function circ_corrcc Circular Statistics Toolbox for
Matlab By Philipp Berens, 2009 :cite:`berens2009circstat`. This function is
an adaptation of Voytek, 2013 :cite:`voytek2013method` for tensors.
Parameters
----------
pha, amp : array_like
Respectively the arrays of phases of shape (n_pha, ..., n_epochs) and
the array of amplitudes of shape (n_amp, ..., n_epochs).
Returns
-------
rho : array_like
Array of correlation coefficients of shape (n_amp, n_pha, ...)
pval : array_like
Array of p-values of shape (n_amp, n_pha, ...).
References
----------
Voytek et al. 2013 :cite:`voytek2013method`
"""
# Compute correlation coefficient for sin and cos independently
n = pha.shape[-1]
sa, ca = np.sin(pha), np.cos(pha)
rxs = pearson(amp, sa)
rxc = pearson(amp, ca)
rcs = pearson(sa, ca, st='i...j, k...j->i...')
rcs = rcs[np.newaxis, ...]
# Compute angular-linear correlation (equ. 27.47)
rho = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2))
# Compute pvalue :
pval = 1. - chi2.cdf(n * rho**2, 2)
return rho, pval
def ergcpac(pha, amp, smooth=None, n_jobs=-1):
"""Event Related PAC using the Gaussian Copula Mutual Information.
This function assumes that phases and amplitudes have already been
prepared i.e. phases should be represented in a unit circle
(np.c_[np.sin(pha), np.cos(pha)]) and both inputs should also have been
copnormed.
Parameters
----------
pha, amp : array_like
Respectively arrays of phases of shape (n_pha, n_times, 2, n_epochs)
and the array of amplitudes of shape (n_amp, n_times, 1, n_epochs).
Returns
-------
erpac : array_like
Array of correlation coefficients of shape (n_amp, n_pha, n_times)
References
----------
Ince et al. 2017 :cite:`ince2017statistical`
"""
# get shapes
(n_pha, n_times, _, n_epochs), n_amp = pha.shape, amp.shape[0] # noqa
# compute mutual information across trials
ergcpac = np.zeros((n_amp, n_pha, n_times))
if isinstance(smooth, int):
# define the temporal smoothing vector
vec = np.arange(smooth, n_times - smooth, 1)
times = [slice(k - smooth, k + smooth + 1) for k in vec]
# move time axis to avoid to do it inside parallel
pha, amp = np.moveaxis(pha, 1, -2), np.moveaxis(amp, 1, -2)
# function to run in parallel across times
def _fcn(t): # noqa
_erpac = np.zeros((n_amp, n_pha), dtype=float)
xp, xa = pha[..., t, :], amp[..., t, :]
for a in range(n_amp):
_xa = xa.reshape(n_amp, 1, -1)
for p in range(n_pha):
_xp = xp.reshape(n_pha, 2, -1)
_erpac[a, p] = nd_mi_gg(_xp[p, ...], _xa[a, ...])
return _erpac
# run the function across time points
_ergcpac = Parallel(n_jobs=n_jobs, **CONFIG['JOBLIB_CFG'])(delayed(
_fcn)(t) for t in times)
# reconstruct the smoothed array
for a in range(n_amp):
for p in range(n_pha):
mean_vec = np.zeros((n_times,), dtype=float)
for t, _gc in zip(times, _ergcpac):
ergcpac[a, p, t] += _gc[a, p]
mean_vec[t] += 1
ergcpac[a, p, :] /= mean_vec
else:
for a in range(n_amp):
for p in range(n_pha):
ergcpac[a, p, ...] = nd_mi_gg(pha[p, ...], amp[a, ...])
return ergcpac
def swap_erpac_trials(pha):
"""Swap trials across the last dimension."""
tr_ = np.random.permutation(pha.shape[-1])
return pha[..., tr_]
def _ergcpac_perm(pha, amp, smooth=None, n_jobs=-1, n_perm=200):
def _ergcpac_single_perm():
p = swap_erpac_trials(pha)
return ergcpac(p, amp, smooth=smooth, n_jobs=1)
out = Parallel(n_jobs=n_jobs, **CONFIG['JOBLIB_CFG'])(delayed(
_ergcpac_single_perm)() for _ in range(n_perm))
return np.stack(out)
| bsd-3-clause | -5,151,203,058,610,020,000 | 32.987342 | 79 | 0.577281 | false |
tobiasgoecke/django-messages | tests/settings.py | 1 | 1335 | import os.path
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_messages'
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SITE_ID = 1
SECRET_KEY = '+zzix-&k$afk-k0d0s7v01w0&15z#ne$71qf28#e$$c*@g742z'
ROOT_URLCONF = "urls"
DEBUG = True
STATIC_URL = '/static/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'database.db'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| bsd-3-clause | -2,423,822,688,440,526,300 | 25.7 | 71 | 0.632959 | false |
bruckhaus/challenges | python_challenges/project_euler/p012_highly_divisible.py | 1 | 2097 | __author__ = 'tilmannbruckhaus'
import numpy
import sys
class HighlyDivisible:
# Highly divisible triangular number
# Problem 12
# The sequence of triangle numbers is generated by adding the natural numbers.
# So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
# The first ten terms would be:
#
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#
# Let us list the factors of the first seven triangle numbers:
#
# 1: 1
# 3: 1,3
# 6: 1,2,3,6
# 10: 1,2,5,10
# 15: 1,3,5,15
# 21: 1,3,7,21
# 28: 1,2,4,7,14,28
# We can see that 28 is the first triangle number to have over five divisors.
#
# What is the value of the first triangle number to have over five hundred divisors?
#
# See: http://www.wikihow.com/Determine-the-Number-of-Divisors-of-an-Integer
def __init__(self):
pass
@staticmethod
def find(limit):
index = 1
candidate = 1
while True:
factors = HighlyDivisible.factor(candidate)
if HighlyDivisible.num_divisors(factors) > limit:
return candidate
index += 1
candidate += index
@staticmethod
def factor(candidate):
factors = []
for i in range(2, int(numpy.ma.sqrt(candidate)) + 1):
exponent = 0
while candidate % i == 0:
# i is a factor
exponent += 1
candidate /= i
if exponent > 0:
factors.append([i, exponent])
if candidate > 1:
# we are left with a prime:
factors.append([candidate, 1])
return factors
@staticmethod
def num_divisors(factors):
num_divisors = 1
for (divisor, exponent) in factors:
# see wikiHow link above
num_divisors *= exponent + 1
return num_divisors
if __name__ == '__main__':
count = 500
result = HighlyDivisible.find(count)
print "\nThe value of the first triangle number to have over", count, "divisors is", result
| mit | -1,834,102,411,324,664,800 | 27.337838 | 95 | 0.556986 | false |
Ultimaker/Cura | plugins/VersionUpgrade/VersionUpgrade21to22/VersionUpgrade21to22.py | 1 | 18773 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import configparser #To get version numbers from config files.
from typing import Dict, Iterable, List, Optional, Set, Tuple
from UM.VersionUpgrade import VersionUpgrade # Superclass of the plugin.
from . import MachineInstance # To upgrade machine instances.
from . import Preferences #To upgrade preferences.
from . import Profile # To upgrade profiles.
## Which machines have material-specific profiles in the new version?
#
# These are the 2.1 machine identities with "has_machine_materials": true in
# their definitions in Cura 2.2. So these are the machines for which profiles
# need to split into multiple profiles, one for each material and variant.
#
# Each machine has the materials and variants listed in which it needs to
# split, since those might be different per machine.
#
# This should contain the definition as they are stated in the profiles. The
# inheritance structure cannot be found at this stage, since the definitions
# may have changed in later versions than 2.2.
_machines_with_machine_quality = {
"ultimaker2plus": {
"materials": { "generic_abs", "generic_cpe", "generic_pla", "generic_pva", "generic_cpe_plus", "generic_nylon", "generic_pc", "generic_tpu" },
"variants": { "0.25 mm", "0.4 mm", "0.6 mm", "0.8 mm" }
},
"ultimaker2_extended_plus": {
"materials": { "generic_abs", "generic_cpe", "generic_pla", "generic_pva", "generic_cpe_plus", "generic_nylon", "generic_pc", "generic_tpu" },
"variants": { "0.25 mm", "0.4 mm", "0.6 mm", "0.8 mm" }
}
} # type: Dict[str, Dict[str, Set[str]]]
## How to translate material names from the old version to the new.
_material_translations = {
"PLA": "generic_pla",
"ABS": "generic_abs",
"CPE": "generic_cpe",
"CPE+": "generic_cpe_plus",
"Nylon": "generic_nylon",
"PC": "generic_pc",
"TPU": "generic_tpu",
} # type: Dict[str, str]
## How to translate material names for in the profile names.
_material_translations_profiles = {
"PLA": "pla",
"ABS": "abs",
"CPE": "cpe",
"CPE+": "cpep",
"Nylon": "nylon",
"PC": "pc",
"TPU": "tpu",
} # type: Dict[str, str]
## How to translate printer names from the old version to the new.
_printer_translations = {
"ultimaker2plus": "ultimaker2_plus"
} # type: Dict[str, str]
_printer_translations_profiles = {
"ultimaker2plus": "um2p", #Does NOT get included in PLA profiles!
"ultimaker2_extended_plus": "um2ep" #Has no profiles for CPE+, Nylon, PC and TPU!
} # type: Dict[str, str]
## How to translate profile names from the old version to the new.
#
# This must have an entry for every built-in profile, since it also services
# as a set for which profiles were built-in.
_profile_translations = {
"Low Quality": "low",
"Normal Quality": "normal",
"High Quality": "high",
"Ulti Quality": "high", #This one doesn't have an equivalent. Map it to high.
"abs_0.25_normal": "um2p_abs_0.25_normal",
"abs_0.4_fast": "um2p_abs_0.4_fast",
"abs_0.4_high": "um2p_abs_0.4_high",
"abs_0.4_normal": "um2p_abs_0.4_normal",
"abs_0.6_normal": "um2p_abs_0.6_normal",
"abs_0.8_normal": "um2p_abs_0.8_normal",
"cpe_0.25_normal": "um2p_cpe_0.25_normal",
"cpe_0.4_fast": "um2p_cpe_0.4_fast",
"cpe_0.4_high": "um2p_cpe_0.4_high",
"cpe_0.4_normal": "um2p_cpe_0.4_normal",
"cpe_0.6_normal": "um2p_cpe_0.6_normal",
"cpe_0.8_normal": "um2p_cpe_0.8_normal",
"cpep_0.4_draft": "um2p_cpep_0.4_draft",
"cpep_0.4_normal": "um2p_cpep_0.4_normal",
"cpep_0.6_draft": "um2p_cpep_0.6_draft",
"cpep_0.6_normal": "um2p_cpep_0.6_normal",
"cpep_0.8_draft": "um2p_cpep_0.8_draft",
"cpep_0.8_normal": "um2p_cpep_0.8_normal",
"nylon_0.25_high": "um2p_nylon_0.25_high",
"nylon_0.25_normal": "um2p_nylon_0.25_normal",
"nylon_0.4_fast": "um2p_nylon_0.4_fast",
"nylon_0.4_normal": "um2p_nylon_0.4_normal",
"nylon_0.6_fast": "um2p_nylon_0.6_fast",
"nylon_0.6_normal": "um2p_nylon_0.6_normal",
"nylon_0.8_draft": "um2p_nylon_0.8_draft",
"nylon_0.8_normal": "um2p_nylon_0.8_normal",
"pc_0.25_high": "um2p_pc_0.25_high",
"pc_0.25_normal": "um2p_pc_0.25_normal",
"pc_0.4_fast": "um2p_pc_0.4_fast",
"pc_0.4_normal": "um2p_pc_0.4_normal",
"pc_0.6_fast": "um2p_pc_0.6_fast",
"pc_0.6_normal": "um2p_pc_0.6_normal",
"pc_0.8_draft": "um2p_pc_0.8_draft",
"pc_0.8_normal": "um2p_pc_0.8_normal",
"pla_0.25_normal": "pla_0.25_normal", #Note that the PLA profiles don't get the um2p_ prefix, though they are for UM2+.
"pla_0.4_fast": "pla_0.4_fast",
"pla_0.4_high": "pla_0.4_high",
"pla_0.4_normal": "pla_0.4_normal",
"pla_0.6_normal": "pla_0.6_normal",
"pla_0.8_normal": "pla_0.8_normal",
"tpu_0.25_high": "um2p_tpu_0.25_high",
"tpu_0.4_normal": "um2p_tpu_0.4_normal",
"tpu_0.6_fast": "um2p_tpu_0.6_fast"
} # type: Dict[str, str]
## Settings that are no longer in the new version.
_removed_settings = {
"fill_perimeter_gaps",
"support_area_smoothing"
} # type: Set[str]
## How to translate setting names from the old version to the new.
_setting_name_translations = {
"remove_overlapping_walls_0_enabled": "travel_compensate_overlapping_walls_0_enabled",
"remove_overlapping_walls_enabled": "travel_compensate_overlapping_walls_enabled",
"remove_overlapping_walls_x_enabled": "travel_compensate_overlapping_walls_x_enabled",
"retraction_hop": "retraction_hop_enabled",
"skin_overlap": "infill_overlap",
"skirt_line_width": "skirt_brim_line_width",
"skirt_minimal_length": "skirt_brim_minimal_length",
"skirt_speed": "skirt_brim_speed",
"speed_support_lines": "speed_support_infill",
"speed_support_roof": "speed_support_interface",
"support_roof_density": "support_interface_density",
"support_roof_enable": "support_interface_enable",
"support_roof_extruder_nr": "support_interface_extruder_nr",
"support_roof_line_distance": "support_interface_line_distance",
"support_roof_line_width": "support_interface_line_width",
"support_roof_pattern": "support_interface_pattern"
} # type: Dict[str, str]
## Custom profiles become quality_changes. This dictates which quality to base
# the quality_changes profile on.
#
# Which quality profile to base the quality_changes on depends on the machine,
# material and nozzle.
#
# If a current configuration is missing, fall back to "normal".
_quality_fallbacks = {
"ultimaker2_plus": {
"ultimaker2_plus_0.25": {
"generic_abs": "um2p_abs_0.25_normal",
"generic_cpe": "um2p_cpe_0.25_normal",
#No CPE+.
"generic_nylon": "um2p_nylon_0.25_normal",
"generic_pc": "um2p_pc_0.25_normal",
"generic_pla": "pla_0.25_normal",
"generic_tpu": "um2p_tpu_0.25_high"
},
"ultimaker2_plus_0.4": {
"generic_abs": "um2p_abs_0.4_normal",
"generic_cpe": "um2p_cpe_0.4_normal",
"generic_cpep": "um2p_cpep_0.4_normal",
"generic_nylon": "um2p_nylon_0.4_normal",
"generic_pc": "um2p_pc_0.4_normal",
"generic_pla": "pla_0.4_normal",
"generic_tpu": "um2p_tpu_0.4_normal"
},
"ultimaker2_plus_0.6": {
"generic_abs": "um2p_abs_0.6_normal",
"generic_cpe": "um2p_cpe_0.6_normal",
"generic_cpep": "um2p_cpep_0.6_normal",
"generic_nylon": "um2p_nylon_0.6_normal",
"generic_pc": "um2p_pc_0.6_normal",
"generic_pla": "pla_0.6_normal",
"generic_tpu": "um2p_tpu_0.6_fast",
},
"ultimaker2_plus_0.8": {
"generic_abs": "um2p_abs_0.8_normal",
"generic_cpe": "um2p_cpe_0.8_normal",
"generic_cpep": "um2p_cpep_0.8_normal",
"generic_nylon": "um2p_nylon_0.8_normal",
"generic_pc": "um2p_pc_0.8_normal",
"generic_pla": "pla_0.8_normal",
#No TPU.
}
}
} # type: Dict[str, Dict[str, Dict[str, str]]]
## How to translate variants of specific machines from the old version to the
# new.
_variant_translations = {
"ultimaker2_plus": {
"0.25 mm": "ultimaker2_plus_0.25",
"0.4 mm": "ultimaker2_plus_0.4",
"0.6 mm": "ultimaker2_plus_0.6",
"0.8 mm": "ultimaker2_plus_0.8"
},
"ultimaker2_extended_plus": {
"0.25 mm": "ultimaker2_extended_plus_0.25",
"0.4 mm": "ultimaker2_extended_plus_0.4",
"0.6 mm": "ultimaker2_extended_plus_0.6",
"0.8 mm": "ultimaker2_extended_plus_0.8"
}
} # type: Dict[str, Dict[str, str]]
## How to translate variant names for in the profile names.
_variant_translations_profiles = {
"0.25 mm": "0.25",
"0.4 mm": "0.4",
"0.6 mm": "0.6",
"0.8 mm": "0.8"
} # type: Dict[str, str]
## Cura 2.2's material profiles use a different naming scheme for variants.
#
# Getting pretty stressed out by this sort of thing...
_variant_translations_materials = {
"ultimaker2_plus": {
"0.25 mm": "ultimaker2_plus_0.25_mm",
"0.4 mm": "ultimaker2_plus_0.4_mm",
"0.6 mm": "ultimaker2_plus_0.6_mm",
"0.8 mm": "ultimaker2_plus_0.8_mm"
},
"ultimaker2_extended_plus": {
"0.25 mm": "ultimaker2_plus_0.25_mm",
"0.4 mm": "ultimaker2_plus_0.4_mm",
"0.6 mm": "ultimaker2_plus_0.6_mm",
"0.8 mm": "ultimaker2_plus_0.8_mm"
}
} # type: Dict[str, Dict[str, str]]
## Converts configuration from Cura 2.1's file formats to Cura 2.2's.
#
# It converts the machine instances and profiles.
class VersionUpgrade21to22(VersionUpgrade):
## Gets the fallback quality to use for a specific machine-variant-material
# combination.
#
# For custom profiles we fall back onto this quality profile, since we
# don't know which quality profile it was based on.
#
# \param machine The machine ID of the user's configuration in 2.2.
# \param variant The variant ID of the user's configuration in 2.2.
# \param material The material ID of the user's configuration in 2.2.
@staticmethod
def getQualityFallback(machine: str, variant: str, material: str) -> str:
if machine not in _quality_fallbacks:
return "normal"
if variant not in _quality_fallbacks[machine]:
return "normal"
if material not in _quality_fallbacks[machine][variant]:
return "normal"
return _quality_fallbacks[machine][variant][material]
## Gets the set of built-in profile names in Cura 2.1.
#
# This is required to test if profiles should be converted to a quality
# profile or a quality-changes profile.
@staticmethod
def builtInProfiles() -> Iterable[str]:
return _profile_translations.keys()
## Gets a set of the machines which now have per-material quality profiles.
#
# \return A set of machine identifiers.
@staticmethod
def machinesWithMachineQuality() -> Dict[str, Dict[str, Set[str]]]:
return _machines_with_machine_quality
## Converts machine instances from format version 1 to version 2.
#
# \param serialised The serialised machine instance in version 1.
# \param filename The supposed file name of the machine instance, without
# extension.
# \return A tuple containing the new filename and the serialised machine
# instance in version 2, or None if the input was not of the correct
# format.
def upgradeMachineInstance(self, serialised: str, filename: str) -> Optional[Tuple[List[str], List[str]]]:
machine_instance = MachineInstance.importFrom(serialised, filename)
if not machine_instance: #Invalid file format.
return None
return machine_instance.export()
## Converts preferences from format version 2 to version 3.
#
# \param serialised The serialised preferences file in version 2.
# \param filename The supposed file name of the preferences file, without
# extension.
# \return A tuple containing the new filename and the serialised
# preferences in version 3, or None if the input was not of the correct
# format.
def upgradePreferences(self, serialised: str, filename: str) -> Optional[Tuple[List[str], List[str]]]:
preferences = Preferences.importFrom(serialised, filename)
if not preferences: #Invalid file format.
return None
return preferences.export()
## Converts profiles from format version 1 to version 2.
#
# \param serialised The serialised profile in version 1.
# \param filename The supposed file name of the profile, without
# extension.
# \return A tuple containing the new filename and the serialised profile
# in version 2, or None if the input was not of the correct format.
def upgradeProfile(self, serialised: str, filename: str) -> Optional[Tuple[List[str], List[str]]]:
profile = Profile.importFrom(serialised, filename)
if not profile: # Invalid file format.
return None
return profile.export()
## Translates a material name for the change from Cura 2.1 to 2.2.
#
# \param material A material name in Cura 2.1.
# \return The name of the corresponding material in Cura 2.2.
@staticmethod
def translateMaterial(material: str) -> str:
if material in _material_translations:
return _material_translations[material]
return material
## Translates a material name for the change from Cura 2.1 to 2.2 in
# quality profile names.
#
# \param material A material name in Cura 2.1.
# \return The name of the corresponding material in the quality profiles
# in Cura 2.2.
@staticmethod
def translateMaterialForProfiles(material: str) -> str:
if material in _material_translations_profiles:
return _material_translations_profiles[material]
return material
## Translates a printer name that might have changed since the last
# version.
#
# \param printer A printer name in Cura 2.1.
# \return The name of the corresponding printer in Cura 2.2.
@staticmethod
def translatePrinter(printer: str) -> str:
if printer in _printer_translations:
return _printer_translations[printer]
return printer #Doesn't need to be translated.
## Translates a printer name for the change from Cura 2.1 to 2.2 in quality
# profile names.
#
# \param printer A printer name in 2.1.
# \return The name of the corresponding printer in Cura 2.2.
@staticmethod
def translatePrinterForProfile(printer: str) -> str:
if printer in _printer_translations_profiles:
return _printer_translations_profiles[printer]
return printer
## Translates a built-in profile name that might have changed since the
# last version.
#
# \param profile A profile name in the old version.
# \return The corresponding profile name in the new version.
@staticmethod
def translateProfile(profile: str) -> str:
if profile in _profile_translations:
return _profile_translations[profile]
return profile #Doesn't need to be translated.
## Updates settings for the change from Cura 2.1 to 2.2.
#
# The keys and values of settings are changed to what they should be in
# the new version. Each setting is changed in-place in the provided
# dictionary. This changes the input parameter.
#
# \param settings A dictionary of settings (as key-value pairs) to update.
# \return The same dictionary.
@staticmethod
def translateSettings(settings: Dict[str, str]) -> Dict[str, str]:
new_settings = {}
for key, value in settings.items():
if key in _removed_settings:
continue
if key == "retraction_combing": #Combing was made into an enum instead of a boolean.
new_settings[key] = "off" if (value == "False") else "all"
continue
if key == "cool_fan_full_layer": #Layer counting was made one-indexed.
new_settings[key] = str(int(value) + 1)
continue
if key in _setting_name_translations:
new_settings[_setting_name_translations[key]] = value
continue
new_settings[key] = value
return new_settings
## Translates a setting name for the change from Cura 2.1 to 2.2.
#
# \param setting The name of a setting in Cura 2.1.
# \return The name of the corresponding setting in Cura 2.2.
@staticmethod
def translateSettingName(setting: str) -> str:
if setting in _setting_name_translations:
return _setting_name_translations[setting]
return setting #Doesn't need to be translated.
## Translates a variant name for the change from Cura 2.1 to 2.2
#
# \param variant The name of a variant in Cura 2.1.
# \param machine The name of the machine this variant is part of in Cura
# 2.2's naming.
# \return The name of the corresponding variant in Cura 2.2.
@staticmethod
def translateVariant(variant: str, machine: str) -> str:
if machine in _variant_translations and variant in _variant_translations[machine]:
return _variant_translations[machine][variant]
return variant
## Translates a variant name for the change from Cura 2.1 to 2.2 in
# material profiles.
#
# \param variant The name of the variant in Cura 2.1.
# \param machine The name of the machine this variant is part of in Cura
# 2.2's naming.
# \return The name of the corresponding variant for in material profiles
# in Cura 2.2.
@staticmethod
def translateVariantForMaterials(variant: str, machine: str) -> str:
if machine in _variant_translations_materials and variant in _variant_translations_materials[machine]:
return _variant_translations_materials[machine][variant]
return variant
## Translates a variant name for the change from Cura 2.1 to 2.2 in quality
# profiles.
#
# \param variant The name of the variant in Cura 2.1.
# \return The name of the corresponding variant for in quality profiles in
# Cura 2.2.
@staticmethod
def translateVariantForProfiles(variant: str) -> str:
if variant in _variant_translations_profiles:
return _variant_translations_profiles[variant]
return variant | lgpl-3.0 | 8,983,434,077,378,631,000 | 41 | 150 | 0.635753 | false |
nathanielvarona/airflow | airflow/jobs/local_task_job.py | 1 | 8884 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import signal
from typing import Optional
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.models.taskinstance import TaskInstance
from airflow.stats import Stats
from airflow.task.task_runner import get_task_runner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import provide_session
from airflow.utils.state import State
class LocalTaskJob(BaseJob):
"""LocalTaskJob runs a single task instance."""
__mapper_args__ = {'polymorphic_identity': 'LocalTaskJob'}
def __init__(
self,
task_instance: TaskInstance,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
pickle_id: Optional[str] = None,
pool: Optional[str] = None,
*args,
**kwargs,
):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_runner = None
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super().__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
# pylint: disable=unused-argument
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
self.task_instance.refresh_from_db()
if self.task_instance.state not in State.finished:
self.task_instance.set_state(State.FAILED)
self.task_instance._run_finished_callback( # pylint: disable=protected-access
error="task received sigterm"
)
raise AirflowException("LocalTaskJob received SIGTERM signal")
# pylint: enable=unused-argument
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance.check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool,
):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
heartbeat_time_limit = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# task callback invocation happens either here or in
# self.heartbeat() instead of taskinstance._run_raw_task to
# avoid race conditions
#
# When self.terminating is set to True by heartbeat_callback, this
# loop should not be restarted. Otherwise self.handle_task_exit
# will be invoked and we will end up with duplicated callbacks
while not self.terminating:
# Monitor the task to see if it's done. Wait in a syscall
# (`os.wait`) for as long as possible so we notice the
# subprocess finishing as quick as we can
max_wait_time = max(
0, # Make sure this value is never negative,
min(
(
heartbeat_time_limit
- (timezone.utcnow() - self.latest_heartbeat).total_seconds() * 0.75
),
self.heartrate,
),
)
return_code = self.task_runner.return_code(timeout=max_wait_time)
if return_code is not None:
self.handle_task_exit(return_code)
return
self.heartbeat()
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
# This can only really happen if the worker can't read the DB for a long time
time_since_last_heartbeat = (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limit exceeded!")
raise AirflowException(
"Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s).".format(time_since_last_heartbeat, heartbeat_time_limit)
)
finally:
self.on_kill()
def handle_task_exit(self, return_code: int) -> None:
"""Handle case where self.task_runner exits by itself"""
self.log.info("Task exited with return code %s", return_code)
self.task_instance.refresh_from_db()
# task exited by itself, so we need to check for error file
# in case it failed due to runtime exception/error
error = None
if self.task_instance.state == State.RUNNING:
# This is for a case where the task received a sigkill
# while running
self.task_instance.set_state(State.FAILED)
if self.task_instance.state != State.SUCCESS:
error = self.task_runner.deserialize_run_error()
self.task_instance._run_finished_callback(error=error) # pylint: disable=protected-access
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
if not same_hostname:
self.log.warning(
"The recorded hostname %s " "does not match this instance's hostname " "%s",
ti.hostname,
fqdn,
)
raise AirflowException("Hostname of job runner does not match")
current_pid = self.task_runner.process.pid
same_process = ti.pid == current_pid
if ti.pid is not None and not same_process:
self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif self.task_runner.return_code() is None and hasattr(self.task_runner, 'process'):
self.log.warning(
"State of this instance has been externally set to %s. " "Terminating instance.", ti.state
)
self.task_runner.terminate()
if ti.state == State.SUCCESS:
error = None
else:
# if ti.state is not set by taskinstance.handle_failure, then
# error file will not be populated and it must be updated by
# external source suck as web UI
error = self.task_runner.deserialize_run_error() or "task marked as failed externally"
ti._run_finished_callback(error=error) # pylint: disable=protected-access
self.terminating = True
| apache-2.0 | 6,241,240,399,067,721,000 | 41.711538 | 109 | 0.605583 | false |
ykoga-kyutech/nippou_proj | nippou_app/migrations/0001_initial.py | 1 | 4531 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import django.utils.timezone
import django.contrib.auth.models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(blank=True, verbose_name='last login', null=True)),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], error_messages={'unique': 'A user with that username already exists.'}, unique=True, max_length=30)),
('first_name', models.CharField(blank=True, verbose_name='first name', max_length=30)),
('last_name', models.CharField(blank=True, verbose_name='last name', max_length=30)),
('email', models.EmailField(blank=True, verbose_name='email address', max_length=254)),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)),
('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('user_dev', models.CharField(verbose_name='所属', max_length=512)),
('groups', models.ManyToManyField(help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', blank=True, verbose_name='groups', related_query_name='user', to='auth.Group')),
('user_permissions', models.ManyToManyField(help_text='Specific permissions for this user.', related_name='user_set', blank=True, verbose_name='user permissions', related_query_name='user', to='auth.Permission')),
],
options={
'abstract': False,
'verbose_name_plural': 'users',
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='nippou_data',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(verbose_name='タイトル', max_length=512)),
('text', models.TextField(verbose_name='本文')),
('date', models.DateTimeField(verbose_name='投稿日時', default=datetime.datetime.now)),
('open', models.BooleanField(verbose_name='公開', default=False)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('task_name', models.CharField(verbose_name='タスク名', max_length=512)),
('time_yotei', models.IntegerField(verbose_name='予定時間')),
('time_jitsu', models.IntegerField(verbose_name='実時間')),
('task_y', models.TextField(verbose_name='Y:やったこと')),
('task_w', models.TextField(verbose_name='W:わかったこと')),
('task_t', models.TextField(verbose_name='T:次やること')),
('nippou', models.ForeignKey(to='nippou_app.nippou_data')),
],
),
]
| mit | -7,798,382,819,515,398,000 | 62.557143 | 432 | 0.612272 | false |
sapphon/minecraftpython | src/main/resources/assets/minecraftpython/scripts/base/items.py | 1 | 4641 | IRON_SHOVEL = 'iron_shovel'
IRON_PICKAXE = 'iron_pickaxe'
IRON_AXE = 'iron_axe'
FLINT_AND_STEEL = 'flint_and_steel'
APPLE = 'apple'
BOW = 'bow'
ARROW = 'arrow'
COAL = 'coal'
DIAMOND = 'diamond'
IRON_INGOT = 'iron_ingot'
GOLD_INGOT = 'gold_ingot'
IRON_SWORD = 'iron_sword'
WOODEN_SWORD = 'wooden_sword'
WOODEN_SHOVEL = 'wooden_shovel'
WOODEN_PICKAXE = 'wooden_pickaxe'
WOODEN_AXE = 'wooden_axe'
STONE_SWORD = 'stone_sword'
STONE_SHOVEL = 'stone_shovel'
STONE_PICKAXE = 'stone_pickaxe'
STONE_AXE = 'stone_axe'
DIAMOND_SWORD = 'diamond_sword'
DIAMOND_SHOVEL = 'diamond_shovel'
DIAMOND_PICKAXE = 'diamond_pickaxe'
DIAMOND_AXE = 'diamond_axe'
STICK = 'stick'
BOWL = 'bowl'
MUSHROOM_STEW = 'mushroom_stew'
GOLDEN_SWORD = 'golden_sword'
GOLDEN_SHOVEL = 'golden_shovel'
GOLDEN_PICKAXE = 'golden_pickaxe'
GOLDEN_AXE = 'golden_axe'
STRING = 'string'
FEATHER = 'feather'
GUNPOWDER = 'gunpowder'
WOODEN_HOE = 'wooden_hoe'
STONE_HOE = 'stone_hoe'
IRON_HOE = 'iron_hoe'
DIAMOND_HOE = 'diamond_hoe'
GOLDEN_HOE = 'golden_hoe'
WHEAT_SEEDS = 'wheat_seeds'
WHEAT = 'wheat'
BREAD = 'bread'
LEATHER_HELMET = 'leather_helmet'
LEATHER_CHESTPLATE = 'leather_chestplate'
LEATHER_LEGGINGS = 'leather_leggings'
LEATHER_BOOTS = 'leather_boots'
CHAINMAIL_HELMET = 'chainmail_helmet'
CHAINMAIL_CHESTPLATE = 'chainmail_chestplate'
CHAINMAIL_LEGGINGS = 'chainmail_leggings'
CHAINMAIL_BOOTS = 'chainmail_boots'
IRON_HELMET = 'iron_helmet'
IRON_CHESTPLATE = 'iron_chestplate'
IRON_LEGGINGS = 'iron_leggings'
IRON_BOOTS = 'iron_boots'
DIAMOND_HELMET = 'diamond_helmet'
DIAMOND_CHESTPLATE = 'diamond_chestplate'
DIAMOND_LEGGINGS = 'diamond_leggings'
DIAMOND_BOOTS = 'diamond_boots'
GOLDEN_HELMET = 'golden_helmet'
GOLDEN_CHESTPLATE = 'golden_chestplate'
GOLDEN_LEGGINGS = 'golden_leggings'
GOLDEN_BOOTS = 'golden_boots'
FLINT = 'flint'
PORKCHOP = 'porkchop'
COOKED_PORKCHOP = 'cooked_porkchop'
PAINTING = 'painting'
GOLDEN_APPLE = 'golden_apple'
SIGN = 'sign'
WOODEN_DOOR = 'wooden_door'
BUCKET = 'bucket'
WATER_BUCKET = 'water_bucket'
LAVA_BUCKET = 'lava_bucket'
MINECART = 'minecart'
SADDLE = 'saddle'
IRON_DOOR = 'iron_door'
REDSTONE = 'redstone'
SNOWBALL = 'snowball'
BOAT = 'boat'
LEATHER = 'leather'
MILK_BUCKET = 'milk_bucket'
BRICK = 'brick'
CLAY_BALL = 'clay_ball'
REEDS = 'reeds'
PAPER = 'paper'
BOOK = 'book'
SLIME_BALL = 'slime_ball'
CHEST_MINECART = 'chest_minecart'
FURNACE_MINECART = 'furnace_minecart'
EGG = 'egg'
COMPASS = 'compass'
FISHING_ROD = 'fishing_rod'
CLOCK = 'clock'
GLOWSTONE_DUST = 'glowstone_dust'
FISH = 'fish'
COOKED_FISHED = 'cooked_fished'
DYE = 'dye'
BONE = 'bone'
SUGAR = 'sugar'
CAKE = 'cake'
BED = 'bed'
REPEATER = 'repeater'
COOKIE = 'cookie'
FILLED_MAP = 'filled_map'
SHEARS = 'shears'
MELON = 'melon'
PUMPKIN_SEEDS = 'pumpkin_seeds'
MELON_SEEDS = 'melon_seeds'
BEEF = 'beef'
COOKED_BEEF = 'cooked_beef'
CHICKEN = 'chicken'
COOKED_CHICKEN = 'cooked_chicken'
ROTTEN_FLESH = 'rotten_flesh'
ENDER_PEARL = 'ender_pearl'
BLAZE_ROD = 'blaze_rod'
GHAST_TEAR = 'ghast_tear'
GOLD_NUGGET = 'gold_nugget'
NETHER_WART = 'nether_wart'
POTION = 'potion'
GLASS_BOTTLE = 'glass_bottle'
SPIDER_EYE = 'spider_eye'
FERMENTED_SPIDER_EYE = 'fermented_spider_eye'
BLAZE_POWDER = 'blaze_powder'
MAGMA_CREAM = 'magma_cream'
BREWING_STAND = 'brewing_stand'
CAULDRON = 'cauldron'
ENDER_EYE = 'ender_eye'
SPECKLED_MELON = 'speckled_melon'
SPAWN_EGG = 'spawn_egg'
EXPERIENCE_BOTTLE = 'experience_bottle'
FIRE_CHARGE = 'fire_charge'
WRITABLE_BOOK = 'writable_book'
WRITTEN_BOOK = 'written_book'
EMERALD = 'emerald'
ITEM_FRAME = 'item_frame'
FLOWER_POT = 'flower_pot'
CARROT = 'carrot'
POTATO = 'potato'
BAKED_POTATO = 'baked_potato'
POISONOUS_POTATO = 'poisonous_potato'
MAP = 'map'
GOLDEN_CARROT = 'golden_carrot'
SKULL = 'skull'
CARROT_ON_A_STICK = 'carrot_on_a_stick'
NETHER_STAR = 'nether_star'
PUMPKIN_PIE = 'pumpkin_pie'
FIREWORKS = 'fireworks'
FIREWORK_CHARGE = 'firework_charge'
ENCHANTED_BOOK = 'enchanted_book'
TRIPWIRE_HOOK = 'tripwire_hook'
COMPARATOR = 'comparator'
NETHERBRICK = 'netherbrick'
QUARTZ = 'quartz'
TNT_MINECART = 'tnt_minecart'
HOPPER_MINECART = 'hopper_minecart'
IRON_HORSE_ARMOR = 'iron_horse_armor'
GOLDEN_HORSE_ARMOR = 'golden_horse_armor'
DIAMOND_HORSE_ARMOR = 'diamond_horse_armor'
LEAD = 'lead'
NAME_TAG = 'name_tag'
COMMAND_BLOCK_MINECART = 'command_block_minecart'
RECORD_13 = 'record_13'
RECORD_CAT = 'record_cat'
RECORD_BLOCKS = 'record_blocks'
RECORD_CHIRP = 'record_chirp'
RECORD_FAR = 'record_far'
RECORD_MALL = 'record_mall'
RECORD_MELLOHI = 'record_mellohi'
RECORD_STAL = 'record_stal'
RECORD_STRAD = 'record_strad'
RECORD_WARD = 'record_ward'
RECORD_11 = 'record_11'
RECORD_WAIT = 'record_wait'
CL_00000044 = 'CL_00000044' | gpl-3.0 | 2,584,095,510,397,654,000 | 25.83237 | 49 | 0.721827 | false |
ingadhoc/odoo-help | help_doc_portal/__openerp__.py | 1 | 1517 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'active': False,
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'category': 'Knowledge Management',
'demo_xml': [
],
'depends': ['help_doc', 'portal'],
'description': 'Autoinstallable module for documentation with portal module installed',
'name': 'Odoo Portal Help Documentation',
'test': [],
'data': [
'menu_item.xml',
],
'version': '8.0.0.0.0',
'auto_install': True,
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,180,211,664,426,069,500 | 36.925 | 91 | 0.591958 | false |
rocky/python-spark | example/python2/py2_parser.py | 1 | 20426 | # Copyright (c) 2016-2017 Rocky Bernstein
"""
More complex expression parsing
"""
# from __future__ import print_function
import sys
from spark_parser.ast import AST
from py2_scan import Python2Scanner, ENDMARKER
from spark_parser import GenericASTBuilder
DEFAULT_DEBUG = {'rules': False, 'transition': False, 'reduce' : False,
'errorstack': 'full', 'context': True, 'dups': True}
class PythonParser(GenericASTBuilder):
"""A more complete spark example: a Python 2 Parser.
Note: function parse() comes from GenericASTBuilder
"""
def __init__(self, start='file_input', debug=DEFAULT_DEBUG):
super(PythonParser, self).__init__(AST, start, debug=debug)
self.start = start
self.debug = debug
# Put left-recursive list non-terminals:
# x ::= x y
# x ::=
self.collect = frozenset(('stmts', 'comments', 'dot_names', 'dots',
'comp_op_exprs', 'newline_or_stmts',
'comma_names', 'comma_fpdef_opt_eqtests',)
)
def debug_reduce(self, rule, tokens, parent, i):
"""Customized format and print for our kind of tokens
which gets called in debugging grammar reduce rules
"""
prefix = ' '
if parent and tokens:
p_token = tokens[parent]
if hasattr(p_token, 'line'):
prefix = 'L.%3d.%03d: ' % (p_token.line, p_token.column)
pass
pass
print("%s%s ::= %s" % (prefix, rule[0], ' '.join(rule[1])))
def nonterminal(self, nt, args):
# nonterminal with a (reserved) single word derivation
no_skip = ('pass_stmt', 'continue_stmt', 'break_stmt', 'return_stmt')
has_len = hasattr(args, '__len__')
if nt in self.collect and len(args) > 1:
#
# Collect iterated thingies together.
#
rv = args[0]
for arg in args[1:]:
rv.append(arg)
elif (has_len and len(args) == 1 and
hasattr(args[0], '__len__') and args[0] not in no_skip and
len(args[0]) == 1):
# Remove singleton derivations
rv = GenericASTBuilder.nonterminal(self, nt, args[0])
del args[0] # save memory
elif (has_len and len(args) == 2 and
hasattr(args[1], '__len__') and len(args[1]) == 0):
# Remove trailing epsilon rules, but only when there
# are two items.
if hasattr(args[0], '__len__') and len(args[0]) == 1:
# Remove singleton derivation
rv = args[0]
else:
rv = GenericASTBuilder.nonterminal(self, nt, args[:1])
del args[1] # save memory
else:
rv = GenericASTBuilder.nonterminal(self, nt, args)
return rv
##########################################################
# Python 2 grammar rules. Grammar rule functions
# start with the name p_ and are collected automatically
##########################################################
def p_python_grammar(self, args):
'''
### Note: comment rules that start ## are rules from python26.gr
## We use them to assist checking translation to a SPARK-format grammar.
single_input ::= NEWLINE
single_input ::= simple_stmt
single_input ::= compound_stmt NEWLINE
file_input ::= newline_or_stmts ENDMARKER
newline_or_stmts ::= newline_or_stmt*
# Grammar uses NEWLINE instead of 'sep', but ; does separate statements.
# The grammar is vague on how NEWLINE, INDENT, and DEDENT are computed.
newline_or_stmt ::= sep
newline_or_stmt ::= stmt_plus
newline_or_stmt ::= comment sep
stmts ::= stmt*
stmts ::= stmt sep
stmt_plus ::= stmt+
eval_input ::= testlist newlines ENDMARKER
newlines ::= NEWLINE+
decorator ::= AT dotted_name arglist_opt NEWLINE
arglist_opt ::= arglist?
## arglist ::= (argument ',')*
## (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
arglist ::= argument_commas arglist2
argument_commas ::= argument_commas argument_comma
argument_commas ::=
argument_comma ::= argument COMMA
## (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
arglist2 ::= argument comma_opt
arglist2 ::= START test comma_arguments comma_starstar_test_opt
arglist2 ::= STARSTAR test
comma_arguments ::= comma_argument*
comma_argument ::= COMMA argument
comma_starstar_test_opt ::= COMMA STARSTAR test
comma_starstar_test_opt ::=
## Really [keyword '='] test
## argument ::= test [gen_for] | test '=' test
argument ::= test gen_for_opt
argument ::= test EQUAL test
## list_iter ::= list_for | list_if
list_iter ::= list_for
list_iter ::= list_if
## list_for ::= 'for' exprlist 'in' testlist_safe [list_iter]
list_for ::= FOR exprlist IN testlist_safe list_iter_opt
list_iter_opt ::= list_iter?
## list_if ::= 'if' old_test [list_iter]
list_if ::= IF old_test list_iter_opt
gen_for_opt ::= gen_for?
## gen_iter ::= gen_for | gen_if
gen_iter ::= gen_for
gen_iter ::= gen_if
## gen_for ::= 'for' exprlist 'in' or_test [gen_iter]
gen_for ::= FOR exprlist IN or_test gen_iter_opt
gen_iter_opt ::= gen_iter?
## gen_if ::= 'if' old_test [gen_iter]
gen_if ::= IF old_test gen_iter_opt
## testlist1 ::= test (',' test)*
testlist1 ::= test comma_tests
decorators ::= decorator+
decorated ::= decorators classdef_or_funcdef
classdef_or_funcdef ::= classdef
classdef_or_funcdef ::= funcdef
funcdef ::= DEF NAME parameters COLON suite
parameters ::= LPAREN varargslist_opt RPAREN
varargslist_opt ::= varargslist?
# FILL IN
## varargslist ::= fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
## varargslist ::= fpdef ['=' test] (',' fpdef ['=' test])* [',']
varargslist ::= fpdef eq_test_opt comma_fpdef_opt_eqtests comma_opt
## (',' fpdef ['=' test])*
comma_fpdef_opt_eqtests ::= comma_fpdef_opt_eqtests COMMA fpdef eq_test_opt
comma_fpdef_opt_eqtests ::=
star_names ::= star_names STAR NAME star_star_opt
star_names ::= star_names star_star_opt
star_names ::=
eq_tests ::= eq_tests eq_test
eq_tests ::=
eq_test_opt ::= eq_test?
eq_test ::= EQUAL test
star_star_opt ::= COMMA STAR_STAR NAME
star_star_opt ::=
## fpdef ::= NAME | '(' fplist ')'
fpdef ::= NAME
fpdef ::= LPAREN fplist RPAREN
## fplist ::= fpdef (',' fpdef)* [',']
fplist ::= fpdef fplist1 comma_opt
## (',' fpdef)* [',']
fplist1 ::= fplist COMMA fpdef
fplist1 ::=
comma_opt ::= COMMA?
stmt ::= simple_stmt
stmt ::= compound_stmt
simple_stmt ::= small_stmt
small_stmt ::= expr_stmt
small_stmt ::= print_stmt
small_stmt ::= del_stmt
small_stmt ::= pass_stmt
small_stmt ::= flow_stmt
small_stmt ::= import_stmt
small_stmt ::= global_stmt
small_stmt ::= exec_stmt
small_stmt ::= assert_stmt
## expr_stmt ::= testlist (augassign (yield_expr|testlist)
## | ('=' (yield_expr|testlist))*)
expr_stmt ::= testlist AUGASSIGN yield_expr_or_testlist
expr_stmt ::= testlist EQUAL yield_expr_or_testlists
yield_expr_or_testlists ::= yield_expr_or_testlists yield_expr_or_testlist
yield_expr_or_testlists ::= yield_expr_or_testlist
yield_expr_or_testlist ::= yield_expr
yield_expr_or_testlist ::= testlist
## yield_expr ::= 'yield' [testlist]
yield_expr ::= YIELD testlist_opt
print_stmt ::= PRINT test_params_or_redirect
test_params_or_redirect ::= test comma_test_opt comma_opt
# FIXME: go over Not quite right as there is one or more..
test_params_or_redirect ::= REDIRECT test comma_test_opt comma_opt
comma_test_opt ::= COMMA test
comma_test_opt ::=
del_stmt ::= DEL exprlist
pass_stmt ::= PASS
flow_stmt ::= break_stmt
flow_stmt ::= continue_stmt
flow_stmt ::= return_stmt
flow_stmt ::= raise_stmt
flow_stmt ::= yield_stmt
break_stmt ::= BREAK
continue_stmt ::= CONTINUE
# return_stmt ::= 'return' [testlist]
return_stmt ::= RETURN testlist_opt
testlist_opt ::= testlist?
yield_stmt ::= yield_expr
raise_stmt ::= RAISE test_opt3
test_opt3 ::= test COMMA test COMMA test
test_opt3 ::= test COMMA test
test_opt3 ::= test
global_stmt ::= GLOBAL NAME comma_names
comma_names ::= comma_name*
comma_name ::= COMMA NAME
exec_stmt ::= EXEC expr
exec_stmt ::= EXEC expr IN test
exec_stmt ::= EXEC expr IN test COMMA test
assert_stmt ::= ASSERT test
assert_stmt ::= ASSERT test COMMA test
test_opt ::= test?
## exprlist ::= expr (',' expr)* [',']
exprlist ::= expr comma_exprs comma_opt
## (',' expr)*
comma_exprs ::= comma_exprs COMMA expr
comma_exprs ::=
# testlist ::= test (',' test)* [',']
testlist ::= test comma_tests comma_opt
# (',' test)*
comma_tests ::= comma_tests COMMA test
comma_tests ::=
## Backward compatibility cruft to support:
## [ x for x in lambda : True, lambda : False if x() ]
## even while also allowing:
## lambda x : 5 if x else 2
## (But not a mix of the two)
## testlist_safe ::= old_test [(',' old_test)+ [',']]
testlist_safe ::= old_test testlist_safe1_opt
testlist_safe1_opt ::= comma_old_tests comma_opt
testlist_safe1_opt ::=
## (',' old_test)+
comma_old_tests ::= comma_old_tests comma_old_test
comma_old_tests ::= comma_old_test
comma_old_test ::= COMMA old_test
## old_test ::= or_test | old_lambdef
old_test ::= or_test
old_test ::= old_lambdef
## old_lambdef ::= 'lambda' [varargslist] ':' old_test
old_lambdef ::= LAMBDA varargslist_opt COLON old_test
test ::= or_test IF or_test ELSE test
test ::= or_test
test ::= lambdef
or_test ::= and_test or_and_tests
## ('or' and_test)*
or_and_tests ::= or_and_test*
or_and_test ::= OR and_test
## and_test ::= not_test ('and' not_test)*
and_test ::= not_test and_not_tests
## ('and' not_test)*
and_not_tests ::= and_not_tests AND not_test
and_not_tests ::=
## not_test ::= 'not' not_test | comparison
not_test ::= NOT not_test
not_test ::= comparison
## comparison ::= expr (comp_op expr)*
comparison ::= expr comp_op_exprs
## (comp_op expr)*
comp_op_exprs ::= comp_op_exprs comp_op expr
comp_op_exprs ::=
comp_op ::= COMP_OP
comp_op ::= IN
comp_op ::= IS
comp_op ::= IS NOT
# Condensation of this
## expr ::= xor_expr ('|' xor_expr)*
## xor_expr ::= and_expr ('^' and_expr)*
## and_expr ::= shift_expr ('&' shift_expr)*
## shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
## arith_expr ::= term (('+'|'-') term)*
## term ::= factor (('*'|'/'|'%'|'//') factor)*
## We don't care about operator precidence
expr ::= factor binop_arith_exprs
binop_arith_exprs ::= binop_arith_exprs binop factor
binop_arith_exprs ::=
binop ::= BINOP
binop ::= PLUS
binop ::= MINUS
binop ::= STAR
## factor ::= ('+'|'-'|'~') factor | power
factor ::= op_factor factor
factor ::= power
op_factor ::= PLUS
op_factor ::= MINUS
op_factor ::= TILDE
power ::= atom trailers starstar_factor_opt
## atom ::= ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']'
## | '{' [dictmaker] '}' | '`' testlist1 '`'
## | NAME | NUMBER | STRING+)
atom ::= LPAREN yield_expr_or_testlist_gexp_opt RPAREN
atom ::= LBRACKET listmaker_opt RBRACKET
atom ::= LBRACE dictmaker_opt RBRACE
atom ::= BACKTICK testlist1 BACKTICK
atom ::= NUMBER
atom ::= NAME
atom ::= strings
dictmaker_opt ::= dictmaker?
## [yield_expr|testlist_gexp]
yield_expr_or_testlist_gexp_opt ::= yield_expr
yield_expr_or_testlist_gexp_opt ::= testlist_gexp
yield_expr_or_testlist_gexp_opt ::=
listmaker_opt ::= listmaker?
## listmaker ::= test ( list_for | (',' test)* [','] )
listmaker ::= test list_for_or_comma_tests_comma_opt
list_for_or_comma_tests_comma_opt ::= list_for
list_for_or_comma_tests_comma_opt ::= comma_tests comma_opt
## testlist_gexp ::= test ( gen_for | (',' test)* [','] )
testlist_gexp ::= test gen_for_or_comma_tests_comma_opt
gen_for_or_comma_tests_comma_opt ::= gen_for
gen_for_or_comma_tests_comma_opt ::= comma_tests comma_opt
lambdef ::= LAMBDA varargslist_opt COLON test
trailers ::= trailer*
## trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
trailer ::= LPAREN arglist_opt RPAREN
trailer ::= LBRACKET subscriptlist RBRACKET
trailer ::= DOT NAME
## subscriptlist ::= subscript (',' subscript)* [',']
subscriptlist ::= subscript comma_subscripts comma_opt
## (',' subscript)*
comma_subscripts ::= comma_subscripts comma_subscript
comma_subscripts ::=
## ',' subscript
comma_subscript ::= COMMA subscript
## subscript ::= '.' '.' '.' | test | [test] ':' [test] [sliceop]
subscript ::= DOT DOT DOT
subscript ::= test
subscript ::= test_opt COLON test_opt sliceop_opt
sliceop_opt ::= sliceop?
## sliceop ::= ':' [test]
sliceop ::= COLON test_opt
starstar_factor_opt ::= STARSTAR factor
starstar_factor_opt ::=
## dictmaker ::= test ':' test (',' test ':' test)* [',']
dictmaker ::= test COLON comma_test_colon_tests comma_opt
## (',' test ':' test)*
comma_test_colon_tests ::= comma_test_colon_tests comma_test_colon_test
comma_test_colon_tests ::=
## (',' test ':' test)
comma_test_colon_test ::= COMMA test COLON test
classdef ::= CLASS NAME class_subclass_opt COLON suite
class_subclass_opt ::= LPAREN testlist_opt RPAREN
class_subclass_opt ::=
strings ::= STRING+
sep ::= comments
sep ::= NEWLINE
sep ::= SEMICOLON
comments ::= comment+
comment ::= COMMENT
comment ::= COMMENT NEWLINE
'''
# Import-related grammar
def p_import(self, args):
"""
## import_stmt ::= import_name | import_from
import_stmt ::= import_name
import_stmt ::= import_from
## import_name ::= IMPORT dotted_as_names
import_name ::= IMPORT dotted_as_names
## import_from ::= ('from' ('.'* dotted_name | '.'+)
## 'import' ('*' | '(' import_as_names ')' | import_as_names))
import_from ::= FROM dots_dotted_name_or_dots import_list
import_as_name ::= NAME
import_as_name ::= NAME AS NAME
dotted_as_name ::= dotted_name
dotted_as_name ::= dotted_name AS NAME
dots_dotted_name_or_dots ::= dots dotted_name
dots_dotted_name_or_dots ::= DOT dots
dots ::= DOT*
## 'import' ('*' | '(' import_as_names ')' | import_as_names))
import_list ::= IMPORT STAR
import_list ::= IMPORT LPAREN import_as_names RPAREN
import_list ::= IMPORT import_as_names
## import_as_names ::= import_as_name ((',' import_as_name)+\) [',']
# Note: we don't do the opt comma at the end
import_as_names ::= import_as_name comma_import_as_names
## (',' import_as_name)+
comma_import_as_names ::= comma_import_as_names comma_import_as_name
comma_import_as_names ::=
## ',' import_as_name
comma_import_as_name ::= COMMA import_as_name
comma_dotted_as_names ::= dotted_as_name+
dotted_as_names ::= dotted_as_name comma_dotted_as_names
comma_dotted_as_names ::= comma_dotted_as_names COMMA dotted_as_name
comma_dotted_as_names ::=
dotted_name ::= NAME dot_names
dot_names ::= dot_names DOT NAME
dot_names ::=
"""
def p_compund_stmt(self, args):
"""
compound_stmt ::= if_stmt
compound_stmt ::= while_stmt
compound_stmt ::= for_stmt
compound_stmt ::= try_stmt
compound_stmt ::= with_stmt
compound_stmt ::= funcdef
compound_stmt ::= classdef
compound_stmt ::= decorated
if_stmt ::= IF test COLON suite elif_suites else_suite_opt
if_stmt ::= IF test COLON NEWLINE suite elif_suites else_suite_opt
elif_suites ::= elif_suites ELIF test COLON suite
elif_suites ::=
else_suite_opt ::= ELSE COLON suite
else_suite_opt ::=
## while_stmt ::= 'while' test ':' suite ['else' ':' suite]
while_stmt ::= WHILE test COLON suite else_suite_opt
## for_stmt ::= 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
for_stmt ::= FOR exprlist IN testlist COLON suite else_colon_suite_opt
## ['else' ':' suite]
else_colon_suite_opt ::= ELSE COLON suite
else_colon_suite_opt ::=
## try_stmt ::= ('try' ':' suite
## ((except_clause ':' suite)+
## ['else' ':' suite]
## ['finally' ':' suite] |
## 'finally' ':' suite))
## with_stmt ::= with' test [ with_var ] ':' suite
with_stmt ::= WITH test with_var_opt COLON suite
with_var_opt ::= with_var?
## with_var ::= 'as' expr
with_var ::= AS expr
suite ::= stmt_plus
suite ::= NEWLINE indent stmt_plus NEWLINE DEDENT
suite ::= NEWLINE indent stmt_plus DEDENT
indent ::= INDENT comments
indent ::= INDENT
"""
def parse_python2(python_stmts, start='file_input',
show_tokens=False, parser_debug=DEFAULT_DEBUG, check=False):
assert isinstance(python_stmts, str)
tokens = Python2Scanner().tokenize(python_stmts)
if show_tokens:
for t in tokens:
print(t)
# For heavy grammar debugging:
# parser_debug = {'rules': True, 'transition': True, 'reduce': True,
# 'errorstack': 'full', 'context': True, 'dups': True}
# Normal debugging:
# parser_debug = {'rules': False, 'transition': False, 'reduce': True,
# 'errorstack': 'full', 'context': True, 'dups': True}
parser = PythonParser(start=start, debug=parser_debug)
if check:
parser.check_grammar()
return parser.parse(tokens)
if __name__ == '__main__':
if len(sys.argv) == 1:
for python2_stmts in (
# # "if True: pass",
# """
# while True:
# if False:
# continue
# """,
# "if True: pass",
"""return f()""",
):
print(python2_stmts)
print('-' * 30)
ast = parse_python2(python2_stmts + ENDMARKER,
start='file_input', show_tokens=False, check=True)
print(ast)
print('=' * 30)
else:
python2_stmts = " ".join(sys.argv[1:])
parse_python2(python2_stmts, show_tokens=False, check=True)
| mit | -2,205,583,393,401,705,700 | 31.015674 | 88 | 0.527808 | false |
maru/fiubar | fiubar/config/settings/local.py | 1 | 2473 | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
from django.contrib.messages import constants as message_constants
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = get_secret('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/2.0/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = get_secret('DJANGO_SECRET_KEY',
default='Z&r}+t&ZTLV`*M3`i|50FWCPWfdyuPigh8')
# DATABASE CONFIGURATION
DATABASES['default'] = get_secret('DATABASE_DEFAULT', DATABASES['default'])
MESSAGE_LEVEL = message_constants.DEBUG
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = get_secret('DJANGO_EMAIL_BACKEND',
'django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
INSTALLED_APPS += ['debug_toolbar']
INTERNAL_IPS = ['127.0.0.1', ]
# tricks to have debug toolbar when developing with docker
if get_secret('USE_DOCKER', default='no') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = get_secret('ACCOUNT_DEFAULT_HTTP_PROTOCOL',
default='http')
# ACCOUNT_ADAPTER = 'fiubar.models.SignupClosedAdapter'
ALLOWED_HOSTS = get_secret('DJANGO_ALLOWED_HOSTS',
default=['127.0.0.1', 'localhost'])
| mit | -7,611,459,301,191,556,000 | 29.158537 | 80 | 0.534169 | false |
NicovincX2/Python-3.5 | Génie logiciel/Architecture logicielle/Patron de conception/Patron de structure/flyweight.py | 1 | 2973 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://codesnipers.com/?q=python-flyweights"""
import weakref
class FlyweightMeta(type):
def __new__(mcs, name, parents, dct):
"""
:param name: class name
:param parents: class parents
:param dct: dict: includes class attributes, class methods,
static methods, etc
:return: new class
"""
# set up instances pool
dct['pool'] = weakref.WeakValueDictionary()
return super(FlyweightMeta, mcs).__new__(mcs, name, parents, dct)
@staticmethod
def _serialize_params(cls, *args, **kwargs):
"""Serialize input parameters to a key.
Simple implementation is just to serialize it as a string
"""
args_list = map(str, args)
args_list.extend([str(kwargs), cls.__name__])
key = ''.join(args_list)
return key
def __call__(cls, *args, **kwargs):
key = FlyweightMeta._serialize_params(cls, *args, **kwargs)
pool = getattr(cls, 'pool', {})
instance = pool.get(key)
if not instance:
instance = super(FlyweightMeta, cls).__call__(*args, **kwargs)
pool[key] = instance
return instance
class Card(object):
"""The object pool. Has builtin reference counting"""
_CardPool = weakref.WeakValueDictionary()
"""Flyweight implementation. If the object exists in the
pool just return it (instead of creating a new one)"""
def __new__(cls, value, suit):
obj = Card._CardPool.get(value + suit)
if not obj:
obj = object.__new__(cls)
Card._CardPool[value + suit] = obj
obj.value, obj.suit = value, suit
return obj
# def __init__(self, value, suit):
# self.value, self.suit = value, suit
def __repr__(self):
return "<Card: %s%s>" % (self.value, self.suit)
class Card2(object):
__metaclass__ = FlyweightMeta
def __init__(self, *args, **kwargs):
# print('Init {}: {}'.format(self.__class__, (args, kwargs)))
pass
if __name__ == '__main__':
# comment __new__ and uncomment __init__ to see the difference
c1 = Card('9', 'h')
c2 = Card('9', 'h')
print(c1, c2)
print(c1 == c2, c1 is c2)
print(id(c1), id(c2))
c1.temp = None
c3 = Card('9', 'h')
print(hasattr(c3, 'temp'))
c1 = c2 = c3 = None
c3 = Card('9', 'h')
print(hasattr(c3, 'temp'))
# Tests with metaclass
instances_pool = getattr(Card2, 'pool')
cm1 = Card2('10', 'h', a=1)
cm2 = Card2('10', 'h', a=1)
cm3 = Card2('10', 'h', a=2)
assert (cm1 == cm2) != cm3
assert (cm1 is cm2) is not cm3
assert len(instances_pool) == 2
del cm1
assert len(instances_pool) == 2
del cm2
assert len(instances_pool) == 1
del cm3
assert len(instances_pool) == 0
### OUTPUT ###
# (<Card: 9h>, <Card: 9h>)
# (True, True)
# (31903856, 31903856)
# True
# False
| gpl-3.0 | -3,190,265,489,521,152,500 | 24.62931 | 74 | 0.557686 | false |
drivefast/pycipherwallet | example/http_router.py | 1 | 3024 | import time
import bottle
import bcrypt
import sqlalchemy
from sqlalchemy.sql import text as sql_statement
import cipherwallet.api_router
ROOT = '/path/to/pycipherwallet/example'
@bottle.route('/<folder:re:css>/<filename:re:.*\.css>')
@bottle.route('/<folder:re:js>/<filename:re:.*\.js>')
@bottle.route('/<folder:re:img>/<filename:re:.*\.(png|jpg|ico)>')
def static_css(folder, filename):
return bottle.static_file(folder + "/" + filename, root=ROOT)
@bottle.route('/<filename:re:.*\.html>')
def static(filename):
return bottle.static_file(filename, root=ROOT)
@bottle.route('/js/cipherwallet.js')
def cipherwalletjs():
return bottle.static_file("js/cipherwallet.js", root=ROOT)
@bottle.post('/user/<user_id>')
def create_user(user_id):
"""
This sample web service is created to look similar to what is called with a POST method
by your signup web page when the user presses the "create user" submit button. Form
data is POSTed from the signup page.
If data signup page data was loaded from the mobile app (QR code scanning), we also
register the user to use cipherwallet (QR code scanning) for the logins
This should mostly be *your* procedure to create an user record, and should work regardless
of whether cipherwallet is active or not
"""
try:
# connect to the database (normally, the cipherwallet sdk will connect to the same database)
# we use a sqlite database here as an example
db_engine = sqlalchemy.create_engine('sqlite:///your.db', echo=True)
db = db_engine.connect()
except:
bottle.abort(503, "Service Unavailable")
# make sure we have valid data
firstname = bottle.request.POST.get('firstname', "").strip()
password1 = bottle.request.POST.get('password1', "").strip()
if (
user_id is None or len(user_id) < 5 or len(user_id) > 64 or
len(firstname) < 1 or len(firstname) > 64 or
len(password1) < 5 or len(password1) > 64
):
bottle.abort(400, "Bad Request")
# encrypt the password (you DO store the passwords in encrypted form, dont you)
password = bcrypt.hashpw(password1, bcrypt.gensalt())
# if the user already exists, delete it
# (obviously, you wouldn't do that on your real website)
db.execute(
sql_statement("DELETE FROM users WHERE email = :user_id;"),
user_id=user_id
)
# now add the user
ret = db.execute(
sql_statement(
"INSERT INTO users(firstname, email, password, created_on) " +
"VALUES (:firstname, :email, :password, :now);"
),
firstname=firstname,
email=user_id,
password=password,
now=time.time()
)
if ret.rowcount:
return {
'firstname': firstname,
'email': user_id,
}
else:
bottle.abort(503, "Service Unavailable")
if __name__ == "__main__":
bottle.debug(True)
bottle.run(host="127.0.0.1", port=8070, reloader=True)
| mit | -8,174,461,802,536,404,000 | 34.576471 | 100 | 0.642526 | false |
zhlinh/leetcode | 0038.Count and Say/solution.py | 1 | 1175 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-02-17
Last_modify: 2016-02-17
******************************************
'''
'''
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
1 is read off as "one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n, generate the nth sequence.
Note: The sequence of integers will be represented as a string.
'''
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
curr = "1"
for i in range(n - 1):
count = 1
prev = curr
say = prev[0]
curr = ""
for j in range(1, len(prev)):
if prev[j] == say:
count += 1
else:
curr += str(count) + say
count = 1
say = prev[j]
curr += str(count) + say
return curr
| apache-2.0 | -3,460,842,501,020,716,500 | 25.111111 | 76 | 0.455319 | false |
vitorio/ocropodium | ocradmin/nodelib/nodes/cuneiform.py | 1 | 1938 | """
Cuneiform Recogniser
"""
from __future__ import absolute_import
import os
import codecs
import shutil
import tempfile
import subprocess as sp
import numpy
from nodetree import node
from . import base
from .. import stages, types, utils
class CuneiformRecognizer(base.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
binary = "cuneiform"
stage = stages.RECOGNIZE
intypes = [numpy.ndarray]
parameters = [
dict(name="single_column", type="bool", value=False)
]
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
args = [self.binary, "-f", "hocr", "-o", outfile]
if self._params.get("single_column", False):
args.extend(["--singlecolumn"])
return args + [image]
def process(self, binary):
"""
Convert a full page.
"""
hocr = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
print err
return u"!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
with codecs.open(tmp.name, "r", "utf8") as tread:
hocr = tread.read()
os.unlink(tmp.name)
os.unlink(btmp.name)
utils.set_progress(self.logger, self.progress_func, 100, 100)
return hocr
| apache-2.0 | -6,946,050,462,958,837,000 | 27.925373 | 82 | 0.54644 | false |
AVOXI/b2bua | sippy/SipTransactionManager.py | 1 | 30074 | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from Timeout import Timeout
from SipHeader import SipHeader
from SipResponse import SipResponse
from SipRequest import SipRequest
from SipAddress import SipAddress
from SipRoute import SipRoute
from SipHeader import SipHeader
from datetime import datetime
from hashlib import md5
from traceback import print_exc
from time import time
import sys, socket
class NETS_1918(object):
nets = (('10.0.0.0', 0xffffffffl << 24), ('172.16.0.0', 0xffffffffl << 20), ('192.168.0.0', 0xffffffffl << 16))
nets = [(reduce(lambda z, v: (int(z) << 8l) | int(v), x[0].split('.', 4)) & x[1], x[1]) for x in nets]
def check1918(addr):
try:
addr = reduce(lambda x, y: (int(x) << 8l) | int(y), addr.split('.', 4))
for naddr, mask in NETS_1918.nets:
if addr & mask == naddr:
return True
except:
pass
return False
class SipTransactionConsumer(object):
compact = False
cobj = None
def __init__(self, cobj, compact):
self.compact = compact
self.cobj = cobj
def cleanup(self):
self.cobj = None
class SipTransaction(object):
tout = None
tid = None
address = None
data = None
checksum = None
cb_ifver = None
uack = False
compact = False
def cleanup(self):
self.ack = None
self.cancel = None
self.resp_cb = None
self.cancel_cb = None
self.noack_cb = None
self.ack_cb = None
self.r487 = None
self.address = None
self.teA = self.teB = self.teC = self.teD = self.teE = self.teF = self.teG = None
self.tid = None
self.userv = None
self.r408 = None
# Symbolic states names
class SipTransactionState(object):
pass
class TRYING(SipTransactionState):
# Request sent, but no reply received at all
pass
class RINGING(SipTransactionState):
# Provisional reply has been received
pass
class COMPLETED(SipTransactionState):
# Transaction already ended with final reply
pass
class CONFIRMED(SipTransactionState):
# Transaction already ended with final reply and ACK received (server-only)
pass
class TERMINATED(SipTransactionState):
# Transaction ended abnormally (request timeout and such)
pass
class UACK(SipTransactionState):
# UAC wants to generate ACK at its own discretion
pass
class local4remote(object):
global_config = None
cache_r2l = None
cache_r2l_old = None
cache_l2s = None
skt = None
handleIncoming = None
fixed = False
ploss_out_rate = 0.0
pdelay_out_max = 0.0
def __init__(self, global_config, handleIncoming):
if not global_config.has_key('_xmpp_mode') or not global_config['_xmpp_mode']:
from Udp_server import Udp_server, Udp_server_opts
self.Udp_server_opts = Udp_server_opts
self.udp_server_class = Udp_server
else:
from XMPP_server import XMPP_server, XMPP_server_opts
self.Udp_server_opts = XMPP_server_opts
self.udp_server_class = XMPP_server
self.global_config = global_config
self.cache_r2l = {}
self.cache_r2l_old = {}
self.cache_l2s = {}
self.handleIncoming = handleIncoming
try:
# Python can be compiled with IPv6 support, but if kernel
# has not we would get exception creating the socket.
# Workaround that by trying create socket and checking if
# we get an exception.
socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
except:
socket.has_ipv6 = False
if 'my' in dir(global_config['_sip_address']):
if socket.has_ipv6:
laddresses = (('0.0.0.0', global_config['_sip_port']), ('[::]', global_config['_sip_port']))
else:
laddresses = (('0.0.0.0', global_config['_sip_port']),)
else:
laddresses = ((global_config['_sip_address'], global_config['_sip_port']),)
self.fixed = True
for laddress in laddresses:
sopts = self.Udp_server_opts(laddress, handleIncoming)
sopts.ploss_out_rate = self.ploss_out_rate
sopts.pdelay_out_max = self.pdelay_out_max
server = self.udp_server_class(global_config, sopts)
self.cache_l2s[laddress] = server
def getServer(self, address, is_local = False):
if self.fixed:
return self.cache_l2s.items()[0][1]
if not is_local:
laddress = self.cache_r2l.get(address[0], None)
if laddress == None:
laddress = self.cache_r2l_old.get(address[0], None)
if laddress != None:
self.cache_r2l[address[0]] = laddress
if laddress != None:
#print 'local4remot-1: local address for %s is %s' % (address[0], laddress[0])
return self.cache_l2s[laddress]
if address[0].startswith('['):
family = socket.AF_INET6
lookup_address = address[0][1:-1]
else:
family = socket.AF_INET
lookup_address = address[0]
self.skt = socket.socket(family, socket.SOCK_DGRAM)
ai = socket.getaddrinfo(lookup_address, None, family)
if family == socket.AF_INET:
_address = (ai[0][4][0], address[1])
else:
_address = (ai[0][4][0], address[1], ai[0][4][2], ai[0][4][3])
self.skt.connect(_address)
if family == socket.AF_INET:
laddress = (self.skt.getsockname()[0], self.global_config['_sip_port'])
else:
laddress = ('[%s]' % self.skt.getsockname()[0], self.global_config['_sip_port'])
self.cache_r2l[address[0]] = laddress
else:
laddress = address
server = self.cache_l2s.get(laddress, None)
if server == None:
sopts = self.Udp_server_opts(laddress, handleIncoming)
sopts.ploss_out_rate = self.ploss_out_rate
sopts.pdelay_out_max = self.pdelay_out_max
server = self.udp_server_class(self.global_config, sopts)
self.cache_l2s[laddress] = server
#print 'local4remot-2: local address for %s is %s' % (address[0], laddress[0])
return server
def rotateCache(self):
self.cache_r2l_old = self.cache_r2l
self.cache_r2l = {}
class SipTransactionManager(object):
global_config = None
l4r = None
tclient = None
tserver = None
req_cb = None
l1rcache = None
l2rcache = None
nat_traversal = False
req_consumers = None
provisional_retr = 0
ploss_out_rate = 0.0
pdelay_out_max = 0.0
def __init__(self, global_config, req_cb = None):
self.global_config = global_config
self.l4r = local4remote(global_config, self.handleIncoming)
self.l4r.ploss_out_rate = self.ploss_out_rate
self.l4r.pdelay_out_max = self.pdelay_out_max
self.tclient = {}
self.tserver = {}
self.req_cb = req_cb
self.l1rcache = {}
self.l2rcache = {}
self.req_consumers = {}
Timeout(self.rCachePurge, 32, -1)
def handleIncoming(self, data, address, server, rtime):
if len(data) < 32:
return
rtime = rtime.realt
self.global_config['_sip_logger'].write('RECEIVED message from %s:%d:\n' % address, data, ltime = rtime)
checksum = md5(data).digest()
retrans = self.l1rcache.get(checksum, None)
if retrans == None:
retrans = self.l2rcache.get(checksum, None)
if retrans != None:
userv, data, address = retrans
if data == None:
return
self.transmitData(userv, data, address)
return
if data.startswith('SIP/2.0 '):
try:
resp = SipResponse(data)
tid = resp.getTId(True, True)
except Exception, exception:
print datetime.now(), 'can\'t parse SIP response from %s:%d: %s:' % (address[0], address[1], str(exception))
print '-' * 70
print_exc(file = sys.stdout)
print '-' * 70
print data
print '-' * 70
sys.stdout.flush()
self.l1rcache[checksum] = (None, None, None)
return
if resp.getSCode()[0] < 100 or resp.getSCode()[0] > 999:
print datetime.now(), 'invalid status code in SIP response from %s:%d:' % address
print data
sys.stdout.flush()
self.l1rcache[checksum] = (None, None, None)
return
resp.rtime = rtime
if not self.tclient.has_key(tid):
#print 'no transaction with tid of %s in progress' % str(tid)
self.l1rcache[checksum] = (None, None, None)
return
t = self.tclient[tid]
if self.nat_traversal and resp.countHFs('contact') > 0 and not check1918(t.address[0]):
cbody = resp.getHFBody('contact')
if not cbody.asterisk:
curl = cbody.getUrl()
if check1918(curl.host):
curl.host, curl.port = address
resp.setSource(address)
self.incomingResponse(resp, t, checksum)
else:
if self.req_cb == None:
return
try:
req = SipRequest(data)
tids = req.getTIds()
except Exception, exception:
print datetime.now(), 'can\'t parse SIP request from %s:%d: %s:' % (address[0], address[1], str(exception))
print '-' * 70
print_exc(file = sys.stdout)
print '-' * 70
print data
print '-' * 70
sys.stdout.flush()
self.l1rcache[checksum] = (None, None, None)
return
req.rtime = rtime
via0 = req.getHFBody('via')
ahost, aport = via0.getAddr()
rhost, rport = address
if self.nat_traversal and rport != aport and check1918(ahost):
req.nated = True
if ahost != rhost:
via0.params['received'] = rhost
if via0.params.has_key('rport') or req.nated:
via0.params['rport'] = str(rport)
if self.nat_traversal and req.countHFs('contact') > 0 and req.countHFs('via') == 1:
try:
cbody = req.getHFBody('contact')
except Exception, exception:
print datetime.now(), 'can\'t parse SIP request from %s:%d: %s:' % (address[0], address[1], str(exception))
print '-' * 70
print_exc(file = sys.stdout)
print '-' * 70
print data
print '-' * 70
sys.stdout.flush()
self.l1rcache[checksum] = (None, None, None)
return
if not cbody.asterisk:
curl = cbody.getUrl()
if check1918(curl.host) or curl.port == 0 or curl.host == '255.255.255.255':
curl.host, curl.port = address
req.nated = True
req.setSource(address)
self.incomingRequest(req, checksum, tids, server)
# 1. Client transaction methods
def newTransaction(self, msg, resp_cb = None, laddress = None, userv = None, \
cb_ifver = 1, compact = False):
t = SipTransaction()
t.rtime = time()
t.compact = compact
t.method = msg.getMethod()
t.cb_ifver = cb_ifver
t.tid = msg.getTId(True, True)
if self.tclient.has_key(t.tid):
raise ValueError('BUG: Attempt to initiate transaction with the same TID as existing one!!!')
t.tout = 0.5
t.fcode = None
t.address = msg.getTarget()
if userv == None:
if laddress == None:
t.userv = self.l4r.getServer(t.address)
else:
t.userv = self.l4r.getServer(laddress, is_local = True)
else:
t.userv = userv
t.data = msg.localStr(*t.userv.uopts.laddress, compact = t.compact)
if t.method == 'INVITE':
try:
t.expires = msg.getHFBody('expires').getNum()
if t.expires <= 0:
t.expires = 300
except IndexError:
t.expires = 300
t.needack = True
t.ack = msg.genACK()
t.cancel = msg.genCANCEL()
else:
t.expires = 32
t.needack = False
t.ack = None
t.cancel = None
t.cancelPending = False
t.resp_cb = resp_cb
t.teA = Timeout(self.timerA, t.tout, 1, t)
if resp_cb != None:
t.r408 = msg.genResponse(408, 'Request Timeout')
t.teB = Timeout(self.timerB, 32.0, 1, t)
t.teC = None
t.state = TRYING
self.tclient[t.tid] = t
self.transmitData(t.userv, t.data, t.address)
return t
def cancelTransaction(self, t, reason = None):
# If we got at least one provisional reply then (state == RINGING)
# then start CANCEL transaction, otherwise deffer it
if t.state != RINGING:
t.cancelPending = True
else:
if reason != None:
t.cancel.appendHeader(SipHeader(body = reason))
self.newTransaction(t.cancel, userv = t.userv)
def incomingResponse(self, msg, t, checksum):
# In those two states upper level already notified, only do ACK retransmit
# if needed
if t.state == TERMINATED:
return
if t.state == TRYING:
# Stop timers
if t.teA != None:
t.teA.cancel()
t.teA = None
if t.state in (TRYING, RINGING):
if t.teB != None:
t.teB.cancel()
t.teB = None
if msg.getSCode()[0] < 200:
# Privisional response - leave everything as is, except that
# change state and reload timeout timer
if t.state == TRYING:
t.state = RINGING
if t.cancelPending:
self.newTransaction(t.cancel, userv = t.userv)
t.cancelPending = False
t.teB = Timeout(self.timerB, t.expires, 1, t)
self.l1rcache[checksum] = (None, None, None)
if t.resp_cb != None:
if t.cb_ifver == 1:
t.resp_cb(msg)
else:
t.resp_cb(msg, t)
else:
# Final response - notify upper layer and remove transaction
if t.resp_cb != None:
if t.cb_ifver == 1:
t.resp_cb(msg)
else:
t.resp_cb(msg, t)
if t.needack:
# Prepare and send ACK if necessary
fcode = msg.getSCode()[0]
tag = msg.getHFBody('to').getTag()
if tag != None:
t.ack.getHFBody('to').setTag(tag)
rAddr = None
if msg.getSCode()[0] >= 200 and msg.getSCode()[0] < 300:
# Some hairy code ahead
if msg.countHFs('contact') > 0:
rTarget = msg.getHFBody('contact').getUrl().getCopy()
else:
rTarget = None
routes = [x.getCopy() for x in msg.getHFBodys('record-route')]
routes.reverse()
if len(routes) > 0:
if not routes[0].getUrl().lr:
if rTarget != None:
routes.append(SipRoute(address = SipAddress(url = rTarget)))
rTarget = routes.pop(0).getUrl()
rAddr = rTarget.getAddr()
else:
rAddr = routes[0].getAddr()
elif rTarget != None:
rAddr = rTarget.getAddr()
if rTarget != None:
t.ack.setRURI(rTarget)
if rAddr != None:
t.ack.setTarget(rAddr)
t.ack.delHFs('route')
t.ack.appendHeaders([SipHeader(name = 'route', body = x) for x in routes])
if fcode >= 200 and fcode < 300:
t.ack.getHFBody('via').genBranch()
if rAddr == None:
rAddr = t.address
if not t.uack:
self.transmitMsg(t.userv, t.ack, rAddr, checksum, t.compact)
else:
t.state = UACK
t.ack_rAddr = rAddr
t.ack_checksum = checksum
self.l1rcache[checksum] = (None, None, None)
t.teG = Timeout(self.timerG, 64, 1, t)
return
else:
self.l1rcache[checksum] = (None, None, None)
del self.tclient[t.tid]
t.cleanup()
def timerA(self, t):
#print 'timerA', t
self.transmitData(t.userv, t.data, t.address)
t.tout *= 2
t.teA = Timeout(self.timerA, t.tout, 1, t)
def timerB(self, t):
#print 'timerB', t
t.teB = None
if t.teA != None:
t.teA.cancel()
t.teA = None
t.state = TERMINATED
#print '2: Timeout(self.timerC, 32.0, 1, t)', t
t.teC = Timeout(self.timerC, 32.0, 1, t)
if t.resp_cb == None:
return
t.r408.rtime = time()
if t.cb_ifver == 1:
t.resp_cb(t.r408)
else:
t.resp_cb(t.r408, t)
#try:
# t.resp_cb(SipRequest(t.data).genResponse(408, 'Request Timeout'))
#except:
# print 'SipTransactionManager: unhandled exception when processing response!'
def timerC(self, t):
#print 'timerC', t
#print self.tclient
t.teC = None
del self.tclient[t.tid]
t.cleanup()
# 2. Server transaction methods
def incomingRequest(self, msg, checksum, tids, server):
for tid in tids:
if self.tclient.has_key(tid):
t = self.tclient[tid]
resp = msg.genResponse(482, 'Loop Detected')
self.transmitMsg(server, resp, resp.getHFBody('via').getTAddr(), checksum, \
t.compact)
return
if msg.getMethod() != 'ACK':
tid = msg.getTId(wBRN = True)
else:
tid = msg.getTId(wTTG = True)
t = self.tserver.get(tid, None)
if t != None:
#print 'existing transaction'
if msg.getMethod() == t.method:
# Duplicate received, check that we have sent any response on this
# request already
if t.data != None:
self.transmitData(t.userv, t.data, t.address, checksum)
return
elif msg.getMethod() == 'CANCEL':
# RFC3261 says that we have to reply 200 OK in all cases if
# there is such transaction
resp = msg.genResponse(200, 'OK')
self.transmitMsg(t.userv, resp, resp.getHFBody('via').getTAddr(), checksum, \
t.compact)
if t.state in (TRYING, RINGING):
self.doCancel(t, msg.rtime, msg)
elif msg.getMethod() == 'ACK' and t.state == COMPLETED:
t.state = CONFIRMED
if t.teA != None:
t.teA.cancel()
t.teA = None
t.teD.cancel()
# We have done with the transaction, no need to wait for timeout
del self.tserver[t.tid]
if t.ack_cb != None:
t.ack_cb(msg)
t.cleanup()
self.l1rcache[checksum] = (None, None, None)
elif msg.getMethod() == 'ACK':
# Some ACK that doesn't match any existing transaction.
# Drop and forget it - upper layer is unlikely to be interested
# to seeing this anyway.
print datetime.now(), 'unmatched ACK transaction - ignoring'
sys.stdout.flush()
self.l1rcache[checksum] = (None, None, None)
elif msg.getMethod() == 'CANCEL':
resp = msg.genResponse(481, 'Call Leg/Transaction Does Not Exist')
self.transmitMsg(server, resp, resp.getHFBody('via').getTAddr(), checksum)
else:
#print 'new transaction', msg.getMethod()
t = SipTransaction()
t.tid = tid
t.state = TRYING
t.teA = None
t.teD = None
t.teE = None
t.teF = None
t.teG = None
t.method = msg.getMethod()
t.rtime = msg.rtime
t.data = None
t.address = None
t.noack_cb = None
t.ack_cb = None
t.cancel_cb = None
t.checksum = checksum
if server.uopts.laddress[0] not in ('0.0.0.0', '[::]'):
t.userv = server
else:
# For messages received on the wildcard interface find
# or create more specific server.
t.userv = self.l4r.getServer(msg.getSource())
if msg.getMethod() == 'INVITE':
t.r487 = msg.genResponse(487, 'Request Terminated')
t.needack = True
t.branch = msg.getHFBody('via').getBranch()
try:
e = msg.getHFBody('expires').getNum()
if e <= 0:
e = 300
except IndexError:
e = 300
t.teE = Timeout(self.timerE, e, 1, t)
else:
t.r487 = None
t.needack = False
t.branch = None
self.tserver[t.tid] = t
for consumer in self.req_consumers.get(t.tid[0], ()):
cobj = consumer.cobj.isYours(msg)
if cobj != None:
t.compact = consumer.compact
rval = cobj.recvRequest(msg, t)
break
else:
rval = self.req_cb(msg, t)
if rval == None:
if t.teA != None or t.teD != None or t.teE != None or t.teF != None:
return
if self.tserver.has_key(t.tid):
del self.tserver[t.tid]
t.cleanup()
return
resp, t.cancel_cb, t.noack_cb = rval
if resp != None:
self.sendResponse(resp, t)
def regConsumer(self, consumer, call_id, compact = False):
cons = SipTransactionConsumer(consumer, compact)
self.req_consumers.setdefault(call_id, []).append(cons)
def unregConsumer(self, consumer, call_id):
# Usually there will be only one consumer per call_id, so that
# optimize management for this case
consumers = self.req_consumers.pop(call_id)
for cons in consumers:
if cons.cobj != consumer:
continue
consumers.remove(cons)
cons.cleanup()
if len(consumers) > 0:
self.req_consumers[call_id] = consumers
break
else:
self.req_consumers[call_id] = consumers
raise IndexError('unregConsumer: consumer %s for call-id %s is not registered' % \
(str(consumer), call_id))
def sendResponse(self, resp, t = None, retrans = False, ack_cb = None):
#print self.tserver
if t == None:
tid = resp.getTId(wBRN = True)
t = self.tserver[tid]
if t.state not in (TRYING, RINGING) and not retrans:
raise ValueError('BUG: attempt to send reply on already finished transaction!!!')
scode = resp.getSCode()[0]
toHF = resp.getHFBody('to')
if scode > 100 and toHF.getTag() == None:
toHF.genTag()
t.data = resp.localStr(*t.userv.uopts.laddress, compact = t.compact)
t.address = resp.getHFBody('via').getTAddr()
self.transmitData(t.userv, t.data, t.address, t.checksum)
if scode < 200:
t.state = RINGING
if self.provisional_retr > 0 and scode > 100:
if t.teF != None:
t.teF.cancel()
t.teF = Timeout(self.timerF, self.provisional_retr, 1, t)
else:
t.state = COMPLETED
if t.teE != None:
t.teE.cancel()
t.teE = None
if t.teF != None:
t.teF.cancel()
t.teF = None
if t.needack:
# Schedule removal of the transaction
t.ack_cb = ack_cb
t.teD = Timeout(self.timerD, 32.0, 1, t)
if scode >= 200:
# Black magick to allow proxy send us another INVITE
# same branch and From tag. Use To tag to match
# ACK transaction after this point. Branch tag in ACK
# could differ as well.
del self.tserver[t.tid]
t.tid = list(t.tid[:-1])
t.tid.append(resp.getHFBody('to').getTag())
t.tid = tuple(t.tid)
self.tserver[t.tid] = t
# Install retransmit timer if necessary
t.tout = 0.5
t.teA = Timeout(self.timerA, t.tout, 1, t)
else:
# We have done with the transaction
del self.tserver[t.tid]
t.cleanup()
def doCancel(self, t, rtime = None, req = None):
if rtime == None:
rtime = time()
if t.r487 != None:
self.sendResponse(t.r487, t, True)
if t.cancel_cb != None:
t.cancel_cb(rtime, req)
def timerD(self, t):
#print 'timerD'
t.teD = None
if t.teA != None:
t.teA.cancel()
t.teA = None
if t.noack_cb != None and t.state != CONFIRMED:
t.noack_cb()
del self.tserver[t.tid]
t.cleanup()
def timerE(self, t):
#print 'timerE'
t.teE = None
if t.teF != None:
t.teF.cancel()
t.teF = None
if t.state in (TRYING, RINGING):
if t.r487 != None:
t.r487.reason = 'Request Expired'
self.doCancel(t)
# Timer to retransmit the last provisional reply every
# 2 seconds
def timerF(self, t):
#print 'timerF', t.state
t.teF = None
if t.state == RINGING and self.provisional_retr > 0:
self.transmitData(t.userv, t.data, t.address)
t.teF = Timeout(self.timerF, self.provisional_retr, 1, t)
def timerG(self, t):
#print 'timerG', t.state
t.teG = None
if t.state == UACK:
print datetime.now(), 'INVITE transaction stuck in the UACK state, possible UAC bug'
def rCachePurge(self):
self.l2rcache = self.l1rcache
self.l1rcache = {}
self.l4r.rotateCache()
def transmitMsg(self, userv, msg, address, cachesum, compact = False):
data = msg.localStr(*userv.uopts.laddress, compact = compact)
self.transmitData(userv, data, address, cachesum)
def transmitData(self, userv, data, address, cachesum = None):
userv.send_to(data, address)
self.global_config['_sip_logger'].write('SENDING message to %s:%d:\n' % address, data)
if cachesum != None:
self.l1rcache[cachesum] = (userv, data, address)
def sendACK(self, t):
#print 'sendACK', t.state
if t.teG != None:
t.teG.cancel()
t.teG = None
self.transmitMsg(t.userv, t.ack, t.ack_rAddr, t.ack_checksum, t.compact)
del self.tclient[t.tid]
t.cleanup()
| bsd-2-clause | 6,950,058,240,984,168,000 | 38.675462 | 127 | 0.519785 | false |
dferens/django-angularmagic | angularmagic/base.py | 1 | 1190 | class Serializer(object):
"""
Converts complex objects such as querysets or model instances to
native Python datatypes that can be converted to xml/json/anything later.
Is a light wrapper of ``rest_framework.serializers.Serializer``.
:param model: class if serializer is class-specific.
:type model: type
"""
model = None
def __init__(self, obj):
"""
Initializes serializer instance.
:param obj: object to serialize
"""
pass
def serialize(self):
"""
Actually serializes previously passed object.
:return:
"""
raise NotImplementedError
class Renderer(object):
"""
Renders passed data to string.
Is a light wrapper of ``rest_framework.renderers.Renderer``.
:param format: (like 'json' or 'xml')
"""
format = 'unknown'
def __init__(self, data):
"""
Initializes renderer instance.
:param data: data to render
"""
pass
def render(self):
"""
Actually renders previously passed data.
:return: rendered data as string
"""
raise NotImplementedError
| mit | 8,314,070,814,512,707,000 | 20.25 | 77 | 0.590756 | false |
jamesblunt/psutil | psutil/_pslinux.py | 1 | 45271 | # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux platform implementation."""
from __future__ import division
import base64
import errno
import functools
import os
import re
import socket
import struct
import sys
import warnings
from collections import namedtuple, defaultdict
from . import _common
from . import _psposix
from . import _psutil_linux as cext
from . import _psutil_posix as cext_posix
from ._common import isfile_strict, usage_percent
from ._common import NIC_DUPLEX_FULL, NIC_DUPLEX_HALF, NIC_DUPLEX_UNKNOWN
from ._compat import PY3, long
if sys.version_info >= (3, 4):
import enum
else:
enum = None
__extra__all__ = [
# io prio constants
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
# connection status constants
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ]
# --- constants
HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
# RLIMIT_* constants, not guaranteed to be present on all kernels
if HAS_PRLIMIT:
for name in dir(cext):
if name.startswith('RLIM'):
__extra__all__.append(name)
# Number of clock ticks per second
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
BOOT_TIME = None # set later
if PY3:
FS_ENCODING = sys.getfilesystemencoding()
if enum is None:
AF_LINK = socket.AF_PACKET
else:
AddressFamily = enum.IntEnum('AddressFamily',
{'AF_LINK': socket.AF_PACKET})
AF_LINK = AddressFamily.AF_LINK
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
if enum is None:
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
else:
class IOPriority(enum.IntEnum):
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
globals().update(IOPriority.__members__)
# taken from /fs/proc/array.c
PROC_STATUSES = {
"R": _common.STATUS_RUNNING,
"S": _common.STATUS_SLEEPING,
"D": _common.STATUS_DISK_SLEEP,
"T": _common.STATUS_STOPPED,
"t": _common.STATUS_TRACING_STOP,
"Z": _common.STATUS_ZOMBIE,
"X": _common.STATUS_DEAD,
"x": _common.STATUS_DEAD,
"K": _common.STATUS_WAKE_KILL,
"W": _common.STATUS_WAKING
}
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
TCP_STATUSES = {
"01": _common.CONN_ESTABLISHED,
"02": _common.CONN_SYN_SENT,
"03": _common.CONN_SYN_RECV,
"04": _common.CONN_FIN_WAIT1,
"05": _common.CONN_FIN_WAIT2,
"06": _common.CONN_TIME_WAIT,
"07": _common.CONN_CLOSE,
"08": _common.CONN_CLOSE_WAIT,
"09": _common.CONN_LAST_ACK,
"0A": _common.CONN_LISTEN,
"0B": _common.CONN_CLOSING
}
# set later from __init__.py
NoSuchProcess = None
ZombieProcess = None
AccessDenied = None
TimeoutExpired = None
# --- utils
def open_text(fname):
"""On Python 3 opens a file in text mode by using fs encoding.
On Python 2 this is just an alias for open(name, 'rt').
"""
kw = dict(encoding=FS_ENCODING) if PY3 else dict()
return open(fname, "rt", **kw)
# --- named tuples
def _get_cputimes_fields():
"""Return a namedtuple of variable fields depending on the
CPU times available on this Linux kernel version which may be:
(user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
[guest_nice]]])
"""
with open('/proc/stat', 'rb') as f:
values = f.readline().split()[1:]
fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append('steal')
if vlen >= 9:
# Linux >= 2.6.24
fields.append('guest')
if vlen >= 10:
# Linux >= 3.2.0
fields.append('guest_nice')
return fields
scputimes = namedtuple('scputimes', _get_cputimes_fields())
svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached'])
pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
pmmap_grouped = namedtuple(
'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
'shared_dirty', 'private_clean', 'private_dirty',
'referenced', 'anonymous', 'swap'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# --- system memory
def virtual_memory():
total, free, buffers, shared, _, _ = cext.linux_sysinfo()
cached = active = inactive = None
with open('/proc/meminfo', 'rb') as f:
for line in f:
if line.startswith(b"Cached:"):
cached = int(line.split()[1]) * 1024
elif line.startswith(b"Active:"):
active = int(line.split()[1]) * 1024
elif line.startswith(b"Inactive:"):
inactive = int(line.split()[1]) * 1024
if (cached is not None and
active is not None and
inactive is not None):
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# https://github.com/giampaolo/psutil/issues/313
msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
cached = active = inactive = 0
avail = free + buffers + cached
used = total - free
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free,
active, inactive, buffers, cached)
def swap_memory():
_, _, _, _, total, free = cext.linux_sysinfo()
used = total - free
percent = usage_percent(used, total, _round=1)
# get pgin/pgouts
with open("/proc/vmstat", "rb") as f:
sin = sout = None
for line in f:
# values are expressed in 4 kilo bytes, we want bytes instead
if line.startswith(b'pswpin'):
sin = int(line.split(b' ')[1]) * 4 * 1024
elif line.startswith(b'pswpout'):
sout = int(line.split(b' ')[1]) * 4 * 1024
if sin is not None and sout is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# https://github.com/giampaolo/psutil/issues/313
msg = "'sin' and 'sout' swap memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
sin = sout = 0
return _common.sswap(total, used, free, percent, sin, sout)
# --- CPUs
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
with open('/proc/stat', 'rb') as f:
values = f.readline().split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields)
def per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
cpus = []
with open('/proc/stat', 'rb') as f:
# get rid of the first line which refers to system wide CPU stats
f.readline()
for line in f:
if line.startswith(b'cpu'):
values = line.split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
entry = scputimes(*fields)
cpus.append(entry)
return cpus
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
with open('/proc/cpuinfo', 'rb') as f:
for line in f:
if line.lower().startswith(b'processor'):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# https://github.com/giampaolo/psutil/issues/200
# try to parse /proc/stat as a last resort
if num == 0:
search = re.compile('cpu\d')
with open_text('/proc/stat') as f:
for line in f:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
# mimic os.cpu_count()
return None
return num
def cpu_count_physical():
"""Return the number of physical cores in the system."""
mapping = {}
current_info = {}
with open('/proc/cpuinfo', 'rb') as f:
for line in f:
line = line.strip().lower()
if not line:
# new section
if (b'physical id' in current_info and
b'cpu cores' in current_info):
mapping[current_info[b'physical id']] = \
current_info[b'cpu cores']
current_info = {}
else:
# ongoing section
if (line.startswith(b'physical id') or
line.startswith(b'cpu cores')):
key, value = line.split(b'\t:', 1)
current_info[key] = int(value)
# mimic os.cpu_count()
return sum(mapping.values()) or None
# --- other system functions
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0' or hostname == ':0':
hostname = 'localhost'
nt = _common.suser(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
global BOOT_TIME
with open('/proc/stat', 'rb') as f:
for line in f:
if line.startswith(b'btime'):
ret = float(line.strip().split()[1])
BOOT_TIME = ret
return ret
raise RuntimeError("line 'btime' not found in /proc/stat")
# --- processes
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(b'/proc') if x.isdigit()]
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
# --- network
class Connections:
"""A wrapper on top of /proc/net/* files, retrieving per-process
and system-wide open connections (TCP, UDP, UNIX) similarly to
"netstat -an".
Note: in case of UNIX sockets we're only able to determine the
local endpoint/path, not the one it's connected to.
According to [1] it would be possible but not easily.
[1] http://serverfault.com/a/417946
"""
def __init__(self):
tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
unix = ("unix", socket.AF_UNIX, None)
self.tmap = {
"all": (tcp4, tcp6, udp4, udp6, unix),
"tcp": (tcp4, tcp6),
"tcp4": (tcp4,),
"tcp6": (tcp6,),
"udp": (udp4, udp6),
"udp4": (udp4,),
"udp6": (udp6,),
"unix": (unix,),
"inet": (tcp4, tcp6, udp4, udp6),
"inet4": (tcp4, udp4),
"inet6": (tcp6, udp6),
}
def get_proc_inodes(self, pid):
inodes = defaultdict(list)
for fd in os.listdir("/proc/%s/fd" % pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
except OSError as err:
# ENOENT == file which is gone in the meantime;
# os.stat('/proc/%s' % self.pid) will be done later
# to force NSP (if it's the case)
if err.errno in (errno.ENOENT, errno.ESRCH):
continue
elif err.errno == errno.EINVAL:
# not a link
continue
else:
raise
else:
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode].append((pid, int(fd)))
return inodes
def get_all_inodes(self):
inodes = {}
for pid in pids():
try:
inodes.update(self.get_proc_inodes(pid))
except OSError as err:
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine
# as we'll just end up returning a connection with PID
# and fd set to None anyway.
# Both netstat -an and lsof does the same so it's
# unlikely we can do any better.
# ENOENT just means a PID disappeared on us.
if err.errno not in (
errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES):
raise
return inodes
def decode_address(self, addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if PY3:
ip = ip.encode('ascii')
if family == socket.AF_INET:
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
# ip = ip.decode('hex')
# return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith('6') and not os.path.exists(file):
# IPv6 not supported
return
with open_text(file) as f:
f.readline() # skip the first line
for line in f:
try:
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (
file, line))
if inode in inodes:
# # We assume inet sockets are unique, so we error
# # out if there are multiple references to the
# # same inode. We won't do this for UNIX sockets.
# if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
# raise ValueError("ambiguos inode with multiple "
# "PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
yield (fd, family, type_, laddr, raddr, status, pid)
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
# see: https://github.com/giampaolo/psutil/issues/675
kw = dict(encoding=FS_ENCODING, errors='replace') if PY3 else dict()
with open(file, 'rt', **kw) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (
file, line))
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
def retrieve(self, kind, pid=None):
if kind not in self.tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in self.tmap])))
if pid is not None:
inodes = self.get_proc_inodes(pid)
if not inodes:
# no connections for this process
return []
else:
inodes = self.get_all_inodes()
ret = set()
for f, family, type_ in self.tmap[kind]:
if family in (socket.AF_INET, socket.AF_INET6):
ls = self.process_inet(
"/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
else:
ls = self.process_unix(
"/proc/net/%s" % f, family, inodes, filter_pid=pid)
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
if pid:
conn = _common.pconn(fd, family, type_, laddr, raddr,
status)
else:
conn = _common.sconn(fd, family, type_, laddr, raddr,
status, bound_pid)
ret.add(conn)
return list(ret)
_connections = Connections()
def net_connections(kind='inet'):
"""Return system-wide open connections."""
return _connections.retrieve(kind)
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
with open_text("/proc/net/dev") as f:
lines = f.readlines()
retdict = {}
for line in lines[2:]:
colon = line.rfind(':')
assert colon > 0, repr(line)
name = line[:colon].strip()
fields = line[colon + 1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[3])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN}
names = net_io_counters().keys()
ret = {}
for name in names:
isup, duplex, speed, mtu = cext.net_if_stats(name)
duplex = duplex_map[duplex]
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
net_if_addrs = cext_posix.net_if_addrs
# --- disks
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
with open_text("/proc/partitions") as f:
lines = f.readlines()[2:]
for line in reversed(lines):
_, _, _, name = line.split()
if name[-1].isdigit():
# we're dealing with a partition (e.g. 'sda1'); 'sda' will
# also be around but we want to omit it
partitions.append(name)
else:
if not partitions or not partitions[-1].startswith(name):
# we're dealing with a disk entity for which no
# partitions have been defined (e.g. 'sda' but
# 'sda1' was not around), see:
# https://github.com/giampaolo/psutil/issues/338
partitions.append(name)
#
retdict = {}
with open_text("/proc/diskstats") as f:
lines = f.readlines()
for line in lines:
# http://www.mjmwired.net/kernel/Documentation/iostats.txt
fields = line.split()
if len(fields) > 7:
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
fields[:11]
else:
# from kernel 2.6.0 to 2.6.25
_, _, name, reads, rbytes, writes, wbytes = fields
rtime, wtime = 0, 0
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of namedtuples"""
fstypes = set()
with open_text("/proc/filesystems") as f:
for line in f:
line = line.strip()
if not line.startswith("nodev"):
fstypes.add(line.strip())
else:
# ignore all lines starting with "nodev" except "nodev zfs"
fstype = line.split("\t")[1]
if fstype == "zfs":
fstypes.add("zfs")
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in fstypes:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
disk_usage = _psposix.disk_usage
# --- decorators
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
def wrap_exceptions_w_zombie(fun):
"""Same as above but also handles zombies."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return wrap_exceptions(fun)(self)
except NoSuchProcess:
if not pid_exists(self.pid):
raise
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_name", "_ppid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
@wrap_exceptions
def name(self):
with open_text("/proc/%s/stat" % self.pid) as f:
data = f.read()
# XXX - gets changed later and probably needs refactoring
return data[data.find('(') + 1:data.rfind(')')]
def exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except OSError as err:
if err.errno in (errno.ENOENT, errno.ESRCH):
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s" % self.pid):
return ""
else:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
# readlink() might return paths containing null bytes ('\x00').
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
exe = exe.split('\x00')[0]
if exe.endswith(' (deleted)') and not os.path.exists(exe):
exe = exe[:-10]
return exe
@wrap_exceptions
def cmdline(self):
with open_text("/proc/%s/cmdline" % self.pid) as f:
data = f.read()
if data.endswith('\x00'):
data = data[:-1]
return [x for x in data.split('\x00')]
@wrap_exceptions
def terminal(self):
tmap = _psposix._get_terminal_map()
with open("/proc/%s/stat" % self.pid, 'rb') as f:
tty_nr = int(f.read().split(b' ')[6])
try:
return tmap[tty_nr]
except KeyError:
return None
if os.path.exists('/proc/%s/io' % os.getpid()):
@wrap_exceptions
def io_counters(self):
fname = "/proc/%s/io" % self.pid
with open(fname, 'rb') as f:
rcount = wcount = rbytes = wbytes = None
for line in f:
if rcount is None and line.startswith(b"syscr"):
rcount = int(line.split()[1])
elif wcount is None and line.startswith(b"syscw"):
wcount = int(line.split()[1])
elif rbytes is None and line.startswith(b"read_bytes"):
rbytes = int(line.split()[1])
elif wbytes is None and line.startswith(b"write_bytes"):
wbytes = int(line.split()[1])
for x in (rcount, wcount, rbytes, wbytes):
if x is None:
raise NotImplementedError(
"couldn't read all necessary info from %r" % fname)
return _common.pio(rcount, wcount, rbytes, wbytes)
else:
def io_counters(self):
raise NotImplementedError("couldn't find /proc/%s/io (kernel "
"too old?)" % self.pid)
@wrap_exceptions
def cpu_times(self):
with open("/proc/%s/stat" % self.pid, 'rb') as f:
st = f.read().strip()
# ignore the first two values ("pid (exe)")
st = st[st.find(b')') + 2:]
values = st.split(b' ')
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
return _common.pcputimes(utime, stime)
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
@wrap_exceptions
def create_time(self):
with open("/proc/%s/stat" % self.pid, 'rb') as f:
st = f.read().strip()
# ignore the first two values ("pid (exe)")
st = st[st.rfind(b')') + 2:]
values = st.split(b' ')
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
# Also use cached value if available.
bt = BOOT_TIME or boot_time()
return (float(values[19]) / CLOCK_TICKS) + bt
@wrap_exceptions
def memory_info(self):
with open("/proc/%s/statm" % self.pid, 'rb') as f:
vms, rss = f.readline().split()[:2]
return _common.pmem(int(rss) * PAGESIZE,
int(vms) * PAGESIZE)
@wrap_exceptions
def memory_info_ex(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
with open("/proc/%s/statm" % self.pid, "rb") as f:
vms, rss, shared, text, lib, data, dirty = \
[int(x) * PAGESIZE for x in f.readline().split()[:7]]
return pextmem(rss, vms, shared, text, lib, data, dirty)
if os.path.exists('/proc/%s/smaps' % os.getpid()):
@wrap_exceptions
def memory_maps(self):
"""Return process's mapped memory regions as a list of named tuples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
with open_text("/proc/%s/smaps" % self.pid) as f:
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(':'):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith('VmFlags:'):
# see issue #369
continue
else:
raise ValueError("don't know how to inte"
"rpret line %r" % line)
yield (current_block.pop(), data)
ls = []
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = \
hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
ls.append((
addr, perms, path,
data['Rss:'],
data.get('Size:', 0),
data.get('Pss:', 0),
data.get('Shared_Clean:', 0),
data.get('Shared_Dirty:', 0),
data.get('Private_Clean:', 0),
data.get('Private_Dirty:', 0),
data.get('Referenced:', 0),
data.get('Anonymous:', 0),
data.get('Swap:', 0)
))
return ls
else:
def memory_maps(self):
msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or " \
"CONFIG_MMU kernel configuration option is not enabled" \
% self.pid
raise NotImplementedError(msg)
@wrap_exceptions_w_zombie
def cwd(self):
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def num_ctx_switches(self):
vol = unvol = None
with open("/proc/%s/status" % self.pid, "rb") as f:
for line in f:
if line.startswith(b"voluntary_ctxt_switches"):
vol = int(line.split()[1])
elif line.startswith(b"nonvoluntary_ctxt_switches"):
unvol = int(line.split()[1])
if vol is not None and unvol is not None:
return _common.pctxsw(vol, unvol)
raise NotImplementedError(
"'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
"fields were not found in /proc/%s/status; the kernel is "
"probably older than 2.6.23" % self.pid)
@wrap_exceptions
def num_threads(self):
with open("/proc/%s/status" % self.pid, "rb") as f:
for line in f:
if line.startswith(b"Threads:"):
return int(line.split()[1])
raise NotImplementedError("line not found")
@wrap_exceptions
def threads(self):
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
hit_enoent = False
for thread_id in thread_ids:
fname = "/proc/%s/task/%s/stat" % (self.pid, thread_id)
try:
with open(fname, 'rb') as f:
st = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
hit_enoent = True
continue
raise
# ignore the first two values ("pid (exe)")
st = st[st.find(b')') + 2:]
values = st.split(b' ')
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
ntuple = _common.pthread(int(thread_id), utime, stime)
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def nice_get(self):
# with open_text('/proc/%s/stat' % self.pid) as f:
# data = f.read()
# return int(data.split()[18])
# Use C implementation
return cext_posix.getpriority(self.pid)
@wrap_exceptions
def nice_set(self, value):
return cext_posix.setpriority(self.pid, value)
@wrap_exceptions
def cpu_affinity_get(self):
return cext.proc_cpu_affinity_get(self.pid)
@wrap_exceptions
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
if err.errno == errno.EINVAL:
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
raise
# only starting from kernel 2.6.13
if hasattr(cext, "proc_ioprio_get"):
@wrap_exceptions
def ionice_get(self):
ioclass, value = cext.proc_ioprio_get(self.pid)
if enum is not None:
ioclass = IOPriority(ioclass)
return _common.pionice(ioclass, value)
@wrap_exceptions
def ionice_set(self, ioclass, value):
if value is not None:
if not PY3 and not isinstance(value, (int, long)):
msg = "value argument is not an integer (gor %r)" % value
raise TypeError(msg)
if not 0 <= value <= 7:
raise ValueError(
"value argument range expected is between 0 and 7")
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
msg = "can't specify value with IOPRIO_CLASS_NONE " \
"(got %r)" % value
raise ValueError(msg)
ioclass = IOPRIO_CLASS_NONE
value = 0
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
msg = "can't specify value with IOPRIO_CLASS_IDLE " \
"(got %r)" % value
raise ValueError(msg)
value = 0
elif ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
# TODO: add comment explaining why this is 4 (?)
value = 4
else:
# otherwise we would get OSError(EVINAL)
raise ValueError("invalid ioclass argument %r" % ioclass)
return cext.proc_ioprio_set(self.pid, ioclass, value)
if HAS_PRLIMIT:
@wrap_exceptions
def rlimit(self, resource, limits=None):
# If pid is 0 prlimit() applies to the calling process and
# we don't want that. We should never get here though as
# PID 0 is not supported on Linux.
if self.pid == 0:
raise ValueError("can't use prlimit() against PID 0 process")
try:
if limits is None:
# get
return cext.linux_prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError(
"second argument must be a (soft, hard) tuple, "
"got %s" % repr(limits))
soft, hard = limits
cext.linux_prlimit(self.pid, resource, soft, hard)
except OSError as err:
if err.errno == errno.ENOSYS and pid_exists(self.pid):
# I saw this happening on Travis:
# https://travis-ci.org/giampaolo/psutil/jobs/51368273
raise ZombieProcess(self.pid, self._name, self._ppid)
else:
raise
@wrap_exceptions
def status(self):
with open("/proc/%s/status" % self.pid, 'rb') as f:
for line in f:
if line.startswith(b"State:"):
letter = line.split()[1]
if PY3:
letter = letter.decode()
# XXX is '?' legit? (we're not supposed to return
# it anyway)
return PROC_STATUSES.get(letter, '?')
@wrap_exceptions
def open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
try:
file = os.readlink(file)
except OSError as err:
# ENOENT == file which is gone in the meantime
if err.errno in (errno.ENOENT, errno.ESRCH):
hit_enoent = True
continue
elif err.errno == errno.EINVAL:
# not a link
continue
else:
raise
else:
# If file is not an absolute path there's no way
# to tell whether it's a regular file or not,
# so we skip it. A regular file is always supposed
# to be absolutized though.
if file.startswith('/') and isfile_strict(file):
ntuple = _common.popenfile(file, int(fd))
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
ret = _connections.retrieve(kind, self.pid)
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def ppid(self):
fpath = "/proc/%s/status" % self.pid
with open(fpath, 'rb') as f:
for line in f:
if line.startswith(b"PPid:"):
# PPid: nnnn
return int(line.split()[1])
raise NotImplementedError("line 'PPid' not found in %s" % fpath)
@wrap_exceptions
def uids(self):
fpath = "/proc/%s/status" % self.pid
with open(fpath, 'rb') as f:
for line in f:
if line.startswith(b'Uid:'):
_, real, effective, saved, fs = line.split()
return _common.puids(int(real), int(effective), int(saved))
raise NotImplementedError("line 'Uid' not found in %s" % fpath)
@wrap_exceptions
def gids(self):
fpath = "/proc/%s/status" % self.pid
with open(fpath, 'rb') as f:
for line in f:
if line.startswith(b'Gid:'):
_, real, effective, saved, fs = line.split()
return _common.pgids(int(real), int(effective), int(saved))
raise NotImplementedError("line 'Gid' not found in %s" % fpath)
| bsd-3-clause | 3,847,961,493,654,271,000 | 36.321517 | 80 | 0.510216 | false |
adamgilman/tourbillon | tourbillon/streams.py | 1 | 1427 | from datetime import datetime
from threading import Timer
class TourbillonStream(object):
def __init__(self, tb, stream_name):
self.tb = tb
self.r = self.tb.r
self.stream_name = stream_name
self.channel = None
# self.message_queue
self.halt_next = False
self.seconds_delay = 1
def add(self, tick_tuple):
if type(tick_tuple) is not tuple:
raise Exception("Tick data must be a tuple (datetime, data)")
if type(tick_tuple[0]) is not datetime:
raise Exception("Tick data must be a tuple (datetime, data)")
self.r.rpush(self.stream_name, tick_tuple)
def format_message(self, message):
return "%s: %s" % (self.stream_name, message)
def length(self):
if self.channel is not None:
return self.r.llen(self.stream_name)
else:
return None
def output_channel(self, output_channel):
self.channel = output_channel
def announce(self, message):
self.r.publish(self.channel, message)
def set_delay(self, seconds_delay):
self.seconds_delay = seconds_delay
def start(self):
if self.channel is None:
raise Exception("Channel must be set before starting")
self.queueNextEmit()
def stop(self):
self.halt_next = True
def queueNextEmit(self):
self.timer = Timer(self.seconds_delay, self.emitter)
self.timer.start()
def emitter(self):
#self.announce("test emitter")
self.announce( self.r.lpop(self.stream_name) )
if not self.halt_next:
self.queueNextEmit()
| mit | 9,180,900,712,580,681,000 | 22.8 | 64 | 0.704275 | false |
cmos3511/cmos_linux | python/op/nop/core/report/report.py | 1 | 5302 | """
Generate report module of OnePiece Platform
Usage:
a) dic :
1. instantiation
2. call method: read_dic
b) json :
1. instantiation
2. call method: read_json
API:
Input: gen_rpt_path
gen_rpt_path: generate report path
Ouput:
1) stdout: show the report on the stdout-screen
2) log: save the report to the specified file(.rpt)
1.json file and dic format:
{"R1": {"C1": "V11",
"C2": "V12"},
"R2": {"C1": "V21",
"C2": "V22"},
"R3": {"C1": "V31",
"C2": "V32"},
}
2. generated ASCII table example:
--------------------------
| | C1 | C2 |
--------------------------
| R1 | V11 | V12 |
--------------------------
| R2 | V21 | V22 |
--------------------------
| R3 | V31 | V33 |
--------------------------
"""
import os
import json
import math
import texttable
from utils import pcom
LOG = pcom.gen_logger(__name__)
class Report:
"""generate specified ASCII table"""
def __init__(self, rpt_path):
"""rpt_path: generated report file path
self._data_dic: generated table need data"""
self._rpt_path = rpt_path
self._data_dic = {}
def _set_dic(self, data_dic):
"""set the report need dic data"""
self._data_dic = data_dic
@classmethod
def _check_json(cls, json_path):
"""check if the given json file exists?"""
if os.path.exists(json_path):
return True
LOG.warning("The json file %s used to generate report does not exist", json_path)
return False
def _gen_rpt_dir(self):
"""if not exists: mkdir generated report dir
else: pass
"""
base_dir = os.path.dirname(os.path.abspath(self._rpt_path))
pcom.mkdir(LOG, base_dir)
@classmethod
def _gen_porper_width(cls, lst):
"""Generate the appropriate width based on the list of column
['lavall', 'kevinf', 'wyatt_wang', 'guanyu']
--> [6, 6, 10, 6]
title = 6(first)
average = 28 / 4 = 7(sum/num)
1) if average >= title: return average
2) else: return title
--> return 7
"""
width_lst = [len(item) for item in lst]
title_width = width_lst[0]
average_width = sum(width_lst) / len(width_lst)
average_width = math.ceil(average_width)
if average_width >= title_width:
return average_width
return title_width
def _auto_adjust_col_width(self, nested_lst):
"""optimize texttable's output
Get the generated ASCII table's column width list besed on
the nested list.
The nested list is the texttable needed data
"""
col_width_lst = []
for index, _ in enumerate(nested_lst[0]):
tmp_lst = []
for lst in nested_lst:
tmp_lst.append(lst[index])
col_width_lst.append(self._gen_porper_width(tmp_lst))
return col_width_lst
def _work_json_data(self):
"""convert json dic data to list type
json format:
{"R1": {"C1": "V11",
"C2": "V12"},
"R2": {"C1": "V21",
"C2": "V22"},
"R3": {"C1": "V31"},
"C2": "V32"}
nested list format:
[['', 'C1', 'C2'],
['R1', 'V11', 'V12'],
['R2', 'V21', 'V22'],
['R3', 'V31', 'V32']]
"""
data_lst = []
row_lst = list(self._data_dic.keys())
col_lst = []
col_lst.insert(0, '')
col_lst.extend(self._data_dic[row_lst[0]])
data_lst.append(col_lst)
for row_title in row_lst:
tmp_lst = [row_title,]
for col_title in self._data_dic[row_title].keys():
tmp_lst.append(self._data_dic[row_title][col_title])
data_lst.append(tmp_lst)
return data_lst
def _gen_table(self):
"""generate the proper ASCII table by texttable
in: the specified dic format
out: the ASCII table
"""
data_lst = self._work_json_data()
width_lst = self._auto_adjust_col_width(data_lst)
table = texttable.Texttable()
table.set_cols_width(width_lst)
table.add_rows(data_lst)
report_table = table.draw()
return report_table
def _show_rpt(self, rpt_table):
"""show the report to stdout and save it to the specified file"""
LOG.info("The report for this sub_stage: %s%s", os.linesep, rpt_table)
with open(self._rpt_path, 'w') as w_rpt:
w_rpt.write(rpt_table)
def read_dic(self, data_dic):
"""generate report from a dic"""
self._set_dic(data_dic)
self._gen_rpt_dir()
rpt_table = self._gen_table()
self._show_rpt(rpt_table)
def read_json(self, json_path):
"""generaged report from the specified format json file"""
if self._check_json(json_path):
with open(json_path) as rd_json:
data_dic = json.load(rd_json)
self.read_dic(data_dic)
| gpl-3.0 | 7,415,528,286,573,856,000 | 30.372781 | 89 | 0.50132 | false |
smart-cities/reading_aginova_sensors | scripts/parse_log.py | 1 | 2425 | #!/usr/bin/env python
"""Parse a log file and submit last values
Parse a specified log file looking the most recent data being inserted to the
'NOVA_LASTDATA_T' table and submit this over http. MAX_BYTES is used to
limit the volume of data that is read from the log file into memory.
"""
# beware this is horrible hacked together python, continue at your own risk...
import os
import re
import sys
import json
import urllib2
MIN_EPOCH = 0
MAX_BYTES = 1*1024*1024 #1MB
SUBMIT_URL = 'http://smartcities.switchsystems.co.uk/api/reading/send/%s'
def f_to_c(value):
""" Convert Fahrenheit to Celsius"""
return (value - 32) / 1.8
def send_data(device_id, epoch, value, sensor_name='TEMP'):
"""Send sensor data over http"""
data = {
'deviceId': device_id,
'sensorName': sensor_name,
'dataFloat': f_to_c(float(value)), # convert to Celsius
'timestamp': int(epoch)/1000, #timestamp in seconds
}
url = SUBMIT_URL % urllib2.quote(json.dumps(data))
#print url
return urllib2.urlopen(url).read()
def tail(handle, max_bytes=None):
"""Return the lines contined in the last n bytes"""
try:
if max_bytes:
handle.seek((-1 * max_bytes), os.SEEK_END)
else:
handle.seek(0)
except OSError:
handle.seek(0)
return ''.join(handle.read().decode('utf-8', 'ignore')).splitlines()[1:]
def scan_file(filename):
"""Scan through lines looking for INSERTS into NOVA_LASTDATA_T"""
data = {}
log_file = open(filename,'r')
for line in reversed(tail(log_file, MAX_BYTES)):
result = re.search(r"^INSERT INTO NOVA_LASTDATA_T VALUES\(\d,(\d*),(\d*),'temp',(\d*\.\d*).*$", line)
if result and result.group(1) not in data:
data[result.group(1)] = (result.group(2), result.group(3))
log_file.close()
return data
if __name__ == '__main__':
if len(sys.argv) > 1:
if len(sys.argv) > 2:
MIN_EPOCH = int(sys.argv[2])
DATA = scan_file(sys.argv[1])
#print DATA
for sensor_id in DATA:
if DATA[sensor_id][0] > MIN_EPOCH:
send_data(sensor_id, DATA[sensor_id][0], DATA[sensor_id][1])
else:
print "Skipping data too old: %s, %s, %s" % (sensor_id,
DATA[sensor_id][0], DATA[sensor_id][1])
else:
print "USAGE: parse_log.py FILENAME [MIN_EPOCH]"
| gpl-2.0 | 3,628,607,066,047,131,000 | 29.696203 | 109 | 0.602474 | false |
mablae/weblate | weblate/trans/south_migrations/0018_auto__add_field_change_translation.py | 1 | 14751 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Change.translation'
db.add_column('trans_change', 'translation',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trans.Translation'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Change.translation'
db.delete_column('trans_change', 'translation_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lang.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'trans.change': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Change'},
'action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']", 'null': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'trans.check': {
'Meta': {'object_name': 'Check'},
'check': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"})
},
'trans.comment': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Comment'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.dictionary': {
'Meta': {'ordering': "['source']", 'object_name': 'Dictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'trans.indexupdate': {
'Meta': {'object_name': 'IndexUpdate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']"})
},
'trans.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'commit_message': ('django.db.models.fields.CharField', [], {'default': "'Translated using Weblate.'", 'max_length': '200'}),
'committer_email': ('django.db.models.fields.EmailField', [], {'default': "'[email protected]'", 'max_length': '75'}),
'committer_name': ('django.db.models.fields.CharField', [], {'default': "'Weblate'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'merge_style': ('django.db.models.fields.CharField', [], {'default': "'merge'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'new_lang': ('django.db.models.fields.CharField', [], {'default': "'contact'", 'max_length': '10'}),
'push_on_commit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'set_translation_team': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'web': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'trans.subproject': {
'Meta': {'ordering': "['project__name', 'name']", 'object_name': 'SubProject'},
'branch': ('django.db.models.fields.CharField', [], {'default': "'master'", 'max_length': '50'}),
'file_format': ('django.db.models.fields.CharField', [], {'default': "'auto'", 'max_length': '50'}),
'filemask': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'push': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'report_source_bugs': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'repoweb': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'trans.suggestion': {
'Meta': {'object_name': 'Suggestion'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'target': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.translation': {
'Meta': {'ordering': "['language__name']", 'object_name': 'Translation'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'fuzzy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'lock_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lock_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'subproject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.SubProject']"}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'translated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'trans.unit': {
'Meta': {'ordering': "['position']", 'object_name': 'Unit'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'flags': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'fuzzy': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'source': ('django.db.models.fields.TextField', [], {}),
'target': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'translated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']"})
}
}
complete_apps = ['trans']
| gpl-3.0 | 320,020,449,889,908,400 | 73.484848 | 182 | 0.550041 | false |
googleapis/api-client-staging | generated/python/googleapis-common-protos/google/api/source_info_pb2.py | 1 | 2577 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/source_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/source_info.proto',
package='google.api',
syntax='proto3',
serialized_options=_b('\n\016com.google.apiB\017SourceInfoProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI'),
serialized_pb=_b('\n\x1cgoogle/api/source_info.proto\x12\ngoogle.api\x1a\x19google/protobuf/any.proto\"8\n\nSourceInfo\x12*\n\x0csource_files\x18\x01 \x03(\x0b\x32\x14.google.protobuf.AnyBq\n\x0e\x63om.google.apiB\x0fSourceInfoProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_SOURCEINFO = _descriptor.Descriptor(
name='SourceInfo',
full_name='google.api.SourceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source_files', full_name='google.api.SourceInfo.source_files', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=127,
)
_SOURCEINFO.fields_by_name['source_files'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['SourceInfo'] = _SOURCEINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SourceInfo = _reflection.GeneratedProtocolMessageType('SourceInfo', (_message.Message,), {
'DESCRIPTOR' : _SOURCEINFO,
'__module__' : 'google.api.source_info_pb2'
# @@protoc_insertion_point(class_scope:google.api.SourceInfo)
})
_sym_db.RegisterMessage(SourceInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| bsd-3-clause | 3,272,847,479,109,776,400 | 33.824324 | 339 | 0.742336 | false |
google-research/language | language/capwap/utils/text_utils.py | 1 | 11097 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for dealing with text."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import string
from bert import tokenization
from language.capwap.utils import nltk_utils
from language.capwap.utils import tensor_utils
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import lookup as contrib_lookup
TextInputs = collections.namedtuple(
"TextInputs", ["token_ids", "mask", "segment_ids", "positions"])
TextOutputs = collections.namedtuple("TextLabels", ["token_ids", "mask"])
# ------------------------------------------------------------------------------
#
# General purpose text functions for masking/unmasking.
#
# ------------------------------------------------------------------------------
class Vocab(object):
"""Wrapper around the BERT tokenizer and vocabulary."""
PAD = "[PAD]"
UNK = "[UNK]"
SEP = "[SEP]"
CLS = "[CLS]"
IMG = "[IMG]"
ANS = "[A]"
QUE = "[Q]"
def __init__(self, vocab_file, do_lower_case):
# Load BERT tokenizer.
self._tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
if self.IMG not in self:
# Override [unused0] to point to IMG.
idx = self._tokenizer.vocab.pop("[unused0]")
self._tokenizer.vocab[self.IMG] = idx
self._tokenizer.inv_vocab[idx] = self.IMG
if self.ANS not in self:
# Override [unused1] to point to ANS.
idx = self._tokenizer.vocab.pop("[unused1]")
self._tokenizer.vocab[self.ANS] = idx
self._tokenizer.inv_vocab[idx] = self.ANS
if self.QUE not in self:
# Override [unused2] to point to QUE.
idx = self._tokenizer.vocab.pop("[unused2]")
self._tokenizer.vocab[self.QUE] = idx
self._tokenizer.inv_vocab[idx] = self.QUE
# Validate
for i in range(len(self)):
assert i in self._tokenizer.inv_vocab
for special_token in [self.PAD, self.UNK, self.SEP, self.CLS]:
assert special_token in self
def __len__(self):
return len(self._tokenizer.vocab)
def __contains__(self, token):
return token in self._tokenizer.vocab
def t2i(self, token):
return self._tokenizer.vocab[token]
def i2t(self, index):
return self._tokenizer.inv_vocab[index]
def tokenize(self, text):
"""Convert text to word pieces."""
return self._tokenizer.tokenize(text)
@staticmethod
def clean(wordpieces):
"""Clean word pieces."""
if Vocab.CLS in wordpieces:
idx = wordpieces.index(Vocab.CLS)
wordpieces = wordpieces[idx + 1:]
if Vocab.SEP in wordpieces:
idx = wordpieces.index(Vocab.SEP)
wordpieces = wordpieces[:idx]
if Vocab.PAD in wordpieces:
wordpieces = [w for w in wordpieces if w != Vocab.PAD]
# Various adhoc hacks.
adjusted = []
for w in wordpieces:
# Remove non-ascii.
try:
w.encode(encoding="utf-8").decode("ascii")
except UnicodeDecodeError:
continue
# Remove [unused*]
if w.startswith("[unused"):
continue
# Remove repeated word.
if not w.startswith("##") and adjusted and adjusted[-1] == w:
continue
adjusted.append(w)
return adjusted
@staticmethod
def detokenize(wordpieces):
"""Convert word pieces to text."""
wordpieces = Vocab.clean(wordpieces)
tokens = []
for w in wordpieces:
if w.startswith("##") and len(tokens):
tokens[-1] = tokens[-1] + w.lstrip("##")
else:
tokens.append(w)
return " ".join(tokens)
def get_string_lookup_table(self):
unk_idx = self._tokenizer.vocab[self.UNK]
ordered = [self.i2t(i) for i in range(len(self))]
return contrib_lookup.index_table_from_tensor(
np.array(ordered), default_value=unk_idx)
@classmethod
def load(cls, path):
do_lower_case = "uncased" in path or "cased" not in path
return cls(path, do_lower_case)
# ------------------------------------------------------------------------------
#
# General purpose text functions for masking/unmasking.
#
# ------------------------------------------------------------------------------
def get_token_mask(token_ids, stop_id):
"""Create mask for all ids past stop_id (inclusive)."""
batch_size = tensor_utils.shape(token_ids, 0)
num_tokens = tensor_utils.shape(token_ids, 1)
# Create position matrix.
idx_range = tf.expand_dims(tf.range(num_tokens), 0)
idx_range = tf.tile(idx_range, [batch_size, 1])
# Find positions of stop_id.
stop_positions = tf.where(
condition=tf.equal(token_ids, stop_id),
x=idx_range,
y=tf.fill([batch_size, num_tokens], num_tokens))
# Find earliest stop position (length).
stop_positions = tf.reduce_min(stop_positions, -1)
# Mask out all tokens at positions > stop_id.
mask = tf.less_equal(idx_range, tf.expand_dims(stop_positions, -1))
return tf.cast(mask, tf.int32)
def get_random_span(text, p, max_span_len, max_iter=10):
"""Get random subspan from text token sequence, following heuristics.
Heuristics:
1) Should not start or end mid-wordpiece.
2) Must contain at least one non-stopword token.
3) Length should be drawn from Geo(p) and less than max_span_len.
Args:
text: <string> [], space-separated token string.
p: <float32> Geometric distribution parameter.
max_span_len: Length to pad or truncate to.
max_iter: Maximum rejection sampling iterations.
Returns:
span_wid: <string>
"""
# Split text into tokens.
tokens = tf.string_split([text]).values
seq_len = tf.size(tokens)
def reject(start, end):
"""Reject span sample."""
span = tokens[start:end + 1]
wordpiece_boundary = tf.logical_or(
tf.strings.regex_full_match(span[0], r"^##.*"),
tf.strings.regex_full_match(span[-1], r"^##.*"))
span = tokens[start:end]
stopwords = list(nltk_utils.get_stopwords() | set(string.punctuation))
non_stopword = tf.setdiff1d(span, stopwords)
all_stopword = tf.equal(tf.size(non_stopword.out), 0)
length = tf.equal(tf.size(span), 0)
return tf.reduce_any([wordpiece_boundary, all_stopword, length])
def sample(start, end):
"""Sample length from truncated Geo(p)."""
# Sample from truncated geometric distribution.
geometric = lambda k: (1 - p)**(k - 1) * p
probs = np.array([geometric(k) for k in range(1, max_span_len + 1)])
probs /= probs.sum()
length = tf.distributions.Categorical(probs=probs).sample() + 1
# Sample start uniformly.
max_offset = tf.maximum(1, seq_len - length + 1)
start = tf.random.uniform([], 0, max_offset, dtype=tf.int32)
end = start + length
# Return span.
return [start, end]
# Rejection sample. Start with dummy span variable.
start = tf.constant(0)
end = tf.constant(0)
start, end = tf.while_loop(
reject, sample, [start, end], maximum_iterations=max_iter)
span = tf.strings.reduce_join(tokens[start:end], separator=" ")
return span
# ------------------------------------------------------------------------------
#
# General purpose text functions for masking/unmasking.
#
# ------------------------------------------------------------------------------
def build_text_inputs(
text,
length,
lookup_table,
segment_id=0,
start_token=None,
end_token=None,
):
"""Convert text to TextInputs.
Args:
text: <string>, space-separated token string.
length: Length to pad or truncate to.
lookup_table: Instance of contrib.lookup.index_table_from_tensor.
segment_id: Integer denoting segment type.
start_token: Optional start token.
end_token: Optional end token.
Returns:
Instance of TextInputs.
"""
# Tokenize and truncate.
tokens = tf.string_split([text]).values
length_offset = sum([0 if i is None else 1 for i in [start_token, end_token]])
tokens = tokens[:length - length_offset]
if start_token is not None:
tokens = tf.concat([[start_token], tokens], axis=0)
if end_token is not None:
tokens = tf.concat([tokens, [end_token]], axis=0)
token_ids = tf.cast(lookup_table.lookup(tokens), tf.int32)
mask = tf.ones_like(token_ids)
segment_ids = tf.fill(tf.shape(token_ids), segment_id)
pad = [[0, length - tf.size(token_ids)]]
token_ids = tf.pad(token_ids, pad)
mask = tf.pad(mask, pad)
segment_ids = tf.pad(segment_ids, pad)
positions = tf.range(length)
text_input = TextInputs(
token_ids=tf.ensure_shape(token_ids, [length]),
mask=tf.ensure_shape(mask, [length]),
segment_ids=tf.ensure_shape(segment_ids, [length]),
positions=tf.ensure_shape(positions, [length]))
return text_input
def build_planner_inputs(question, answer, length, lookup_table):
"""Convert text to TextInputs for conditional text planner.
Args:
question: <string>, space-separated token string.
answer: <string>, space-separated token string.
length: Length to pad or truncate to.
lookup_table: Instance of contrib.lookup.index_table_from_tensor.
Returns:
Instance of TextInputs.
"""
# Build question.
q_tokens = tf.string_split([question]).values
q_tokens = tf.concat([["[Q]"], q_tokens], axis=0)
q_token_ids = tf.cast(lookup_table.lookup(q_tokens), tf.int32)
q_len = tensor_utils.shape(q_token_ids, 0)
q_positions = tf.range(q_len)
# Build answer.
a_tokens = tf.string_split([answer]).values
a_tokens = tf.concat([["[A]"], a_tokens], axis=0)
a_token_ids = tf.cast(lookup_table.lookup(a_tokens), tf.int32)
a_len = tensor_utils.shape(a_token_ids, 0)
a_positions = tf.range(a_len)
# Combine.
token_ids = tf.concat([q_token_ids, a_token_ids], axis=0)
segment_ids = tf.concat([tf.fill([q_len], 2), tf.fill([a_len], 1)], axis=0)
positions = tf.concat([q_positions, a_positions], axis=0)
q_mask = tf.ones_like(q_token_ids)
mask = tf.concat([q_mask, tf.ones_like(a_token_ids)], axis=0)
# Truncate.
token_ids = token_ids[:length]
segment_ids = segment_ids[:length]
mask = mask[:length]
positions = positions[:length]
# Pad.
pad = [[0, length - tf.size(token_ids)]]
token_ids = tf.pad(token_ids, pad)
mask = tf.pad(mask, pad)
segment_ids = tf.pad(segment_ids, pad)
positions = tf.pad(positions, pad)
text_input = TextInputs(
token_ids=tf.ensure_shape(token_ids, [length]),
mask=tf.ensure_shape(mask, [length]),
segment_ids=tf.ensure_shape(segment_ids, [length]),
positions=tf.ensure_shape(positions, [length]))
return text_input
| apache-2.0 | -2,312,943,053,695,171,000 | 30.796562 | 80 | 0.635217 | false |
haisland0909/Denoising-Dirty-Documents | script/prediction.py | 1 | 5057 | # coding: UTF8
from sklearn.pipeline import FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import sklearn.linear_model
import img_to_pickle as i_p
import features as f
import classify
import preprocessing as pre
import pickle
import numpy as np
import pandas as pd
import datetime
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
SUBMISSION_DIR = ROOT.replace("script", "tmp/submission")
clf_dict = {
'LR': {
"name": 'L2 Logistic Regression',
"clf": sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, C=0.01),
},
'GB2': {
"name": 'Gradient Boosting New',
"clf": GradientBoostingRegressor(random_state=1, learning_rate=0.05,
n_estimators=3000, subsample=0.8,
max_features=0.3, min_samples_split=2,
min_samples_leaf=1, max_depth=7)
},
"RF": {
"name": "RandomForest",
"clf": RandomForestRegressor(max_depth=7, max_features=0.4,
min_samples_leaf=10, min_samples_split=2,
n_jobs=-1, n_estimators=1000)
},
'SGDR': {
"name": 'SGD Regression',
"clf": sklearn.linear_model.SGDRegressor(penalty='l2'),
}
}
def zero_one(x):
return min(max(x, 0.), 1.)
def convert_testdata(test_gray_data):
data_df = f.make_test_df(test_gray_data)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X_test = fu.fit_transform(data_df)
#X_test = Std.fit_transform(X_test)
return X_test
def convert_traindata(train_gray_data, labels):
data_df = f.make_data_df(train_gray_data, labels)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X_train = fu.fit_transform(data_df)
y_train = np.concatenate(data_df["label"].apply(lambda x: x.flatten()))
X_train = Std.fit_transform(X_train)
return X_train, y_train
def prediction(clf_name):
print "****************classifier****************"
print clf_dict[clf_name]["clf"]
clf = clf_dict[clf_name]["clf"]
_, _, _, train_gray_data, test_gray_data, _, labels = i_p.load_data()
train_keys = train_gray_data.keys()
test_keys = test_gray_data.keys()
train_df = f.make_data_df(train_gray_data, labels)
test_df = f.make_test_df(test_gray_data)
train_df = train_df.reset_index()
test_df = test_df.reset_index()
train_df.columns = ["pngname", "input", "label"]
test_df.columns = ["pngname", "input"]
# operation check
if clf_name == "SGDB":
# train_df, train_keys, test_df, test_keys = pre.make_checkdata(mode="df")
# train_df, train_keys, _, _ = pre.make_checkdata(mode="df")
for i in xrange(len(train_keys)):
train_X, train_y = classify.set_traindata(train_df, train_keys[i])
clf.partial_fit(train_X, train_y)
else:
# operation check
# train_df, train_keys, _, _ = pre.make_checkdata(mode="df")
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
train_X = fu.fit_transform(train_df)
train_y = np.concatenate(train_df["label"].apply(lambda x: x.flatten()))
train_X, train_y = classify.downsampling_data(train_X, train_y, 0.2)
clf.fit(train_X, train_y)
clf_dir = os.path.abspath(os.path.dirname(__file__)) +\
"/../tmp/fit_instance/"
now = datetime.datetime.now()
savefile = clf_dir + clf_name + now.strftime("%Y_%m_%d_%H_%M_%S") + ".pickle"
fi = open(savefile, "w")
pickle.dump(clf, fi)
fi.close()
for i in xrange(len(test_keys)):
test_img = test_df[(test_df["pngname"] == test_keys[i])]["input"].as_matrix()[0]
imgname = test_keys[i]
shape = test_img.shape
test_img = {test_keys[i]: test_img}
X_test = convert_testdata(test_img)
output = clf.predict(X_test)
output = np.asarray(output)
zo = np.vectorize(zero_one)
output = zo(output).reshape(shape)
tmp = []
for row in xrange(len(output)):
for column in xrange(len(output[row])):
id_ = imgname + "_" + str(row + 1) + "_" + str(column + 1)
value = output[row][column]
pix = [id_, value]
tmp.append(pix)
if i == 0:
predict_df = pd.DataFrame(tmp)
else:
tmp_df = pd.DataFrame(tmp)
predict_df = pd.concat([predict_df, tmp_df])
predict_df.columns = ["id", "value"]
now = datetime.datetime.now()
submission_path = SUBMISSION_DIR + "/submission_" + now.strftime("%Y_%m_%d_%H_%M_%S") + ".csv"
predict_df.to_csv(submission_path, header=True, index=False)
if __name__ == '__main__':
clf_name = "RF"
prediction(clf_name)
| apache-2.0 | -8,497,978,010,576,337,000 | 29.281437 | 98 | 0.58513 | false |
tgbugs/hypush | hyputils/memex/models/user.py | 1 | 8621 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import re
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy.ext.declarative import declared_attr
from hyputils.memex._compat import string_types
from hyputils.memex.db import Base
from hyputils.memex.util.user import split_user
from hyputils.memex.security import security
USERNAME_MIN_LENGTH = 3
USERNAME_MAX_LENGTH = 30
USERNAME_PATTERN = "(?i)^[A-Z0-9._]+$"
EMAIL_MAX_LENGTH = 100
DISPLAY_NAME_MAX_LENGTH = 30
def _normalise_username(username):
# We normalize usernames by dots and case in order to discourage attempts
# at impersonation.
return sa.func.lower(sa.func.replace(username, sa.text("'.'"), sa.text("''")))
class UsernameComparator(Comparator):
"""
Custom comparator for :py:attr:`~h.models.user.User.username`.
This ensures that all lookups against the username property, such as
session.query(User).filter_by(username='juanwood')
use the normalised username for the lookup and appropriately normalise the
RHS of the query. This means that a query like the one above will
correctly find a user with a username of "Juan.Wood", for example.
"""
def operate(self, op, other, **kwargs):
return op(
_normalise_username(self.__clause_element__()),
_normalise_username(other),
**kwargs
)
class UserIDComparator(Comparator):
"""
Custom comparator for :py:attr:`~h.models.user.User.userid`.
A user's userid is a compound property which depends on their username
and their authority. A naive comparator for this property would generate
SQL like the following:
... WHERE 'acct:' || username || '@' || authority = ...
This would be slow, due to the lack of an index on the LHS expression.
While we could add a functional index on this expression, we can also take
advantage of the existing index on (normalised_username, authority), which
is what this comparator does.
A query such as
session.query(User).filter_by(userid='acct:[email protected]')
will instead generate
WHERE
(lower(replace(username, '.', '')), authority ) =
(lower(replace('luis.silva', '.', '')), 'example.com')
"""
def __init__(self, username, authority):
self.username = username
self.authority = authority
def __clause_element__(self):
return sa.tuple_(_normalise_username(self.username), self.authority)
def __eq__(self, other):
"""
Compare the userid for equality with `other`.
`other` can be anything plausibly on the RHS of a comparison, which
can include other SQL clause elements or expressions, as in
User.userid == sa.tuple_(User.username, Group.authority)
or literals, as in
User.userid == 'acct:[email protected]'
We treat the literal case specially, and split the string into
username and authority ourselves. If the string is not a well-formed
userid, the comparison will always return False.
"""
if isinstance(other, string_types):
try:
val = split_user(other)
except ValueError:
# The value being compared isn't a valid userid
return False
else:
other = sa.tuple_(_normalise_username(val["username"]), val["domain"])
return self.__clause_element__() == other
def in_(self, userids):
others = []
for userid in userids:
try:
val = split_user(userid)
except ValueError:
continue
other = sa.tuple_(_normalise_username(val["username"]), val["domain"])
others.append(other)
if not others:
return False
return self.__clause_element__().in_(others)
class User(Base):
__tablename__ = "user"
@declared_attr
def __table_args__(cls): # noqa: N805
return (
# (email, authority) must be unique
sa.UniqueConstraint("email", "authority"),
# (normalised username, authority) must be unique. This index is
# also critical for making user lookups fast.
sa.Index(
"ix__user__userid",
_normalise_username(cls.username),
cls.authority,
unique=True,
),
)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
#: Username as chosen by the user on registration
_username = sa.Column("username", sa.UnicodeText(), nullable=False)
#: The "authority" for this user. This represents the "namespace" in which
#: this user lives. By default, all users are created in the namespace
#: corresponding to `request.domain`, but this can be overridden with the
#: `h.authority` setting.
authority = sa.Column("authority", sa.UnicodeText(), nullable=False)
#: The display name which will be used when rendering an annotation.
display_name = sa.Column(sa.UnicodeText())
#: A short user description/bio
description = sa.Column(sa.UnicodeText())
#: A free-form column to allow the user to say where they are
location = sa.Column(sa.UnicodeText())
#: The user's URI/link on the web
uri = sa.Column(sa.UnicodeText())
#: The user's ORCID ID
orcid = sa.Column(sa.UnicodeText())
identities = sa.orm.relationship(
"UserIdentity", backref="user", cascade="all, delete-orphan"
)
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@username.comparator
def username(cls): # noqa: N805
return UsernameComparator(cls._username)
@hybrid_property
def userid(self):
return "acct:{username}@{authority}".format(
username=self.username, authority=self.authority
)
@userid.comparator
def userid(cls): # noqa: N805
return UserIDComparator(cls.username, cls.authority)
email = sa.Column(sa.UnicodeText())
last_login_date = sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
registered_date = sa.Column(
sa.TIMESTAMP(timezone=False),
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
@sa.orm.validates("email")
def validate_email(self, key, email):
if email is None:
return email
if len(email) > EMAIL_MAX_LENGTH:
raise ValueError(
"email must be less than {max} characters "
"long".format(max=EMAIL_MAX_LENGTH)
)
return email
@sa.orm.validates("_username")
def validate_username(self, key, username):
if not USERNAME_MIN_LENGTH <= len(username) <= USERNAME_MAX_LENGTH:
raise ValueError(
"username must be between {min} and {max} "
"characters long".format(
min=USERNAME_MIN_LENGTH, max=USERNAME_MAX_LENGTH
)
)
if not re.match(USERNAME_PATTERN, username):
raise ValueError(
"username must have only letters, numbers, " "periods, and underscores."
)
return username
@classmethod
def get_by_email(cls, session, email, authority):
"""Fetch a user by email address."""
if email is None:
return None
return (
session.query(cls)
.filter(
sa.func.lower(cls.email) == email.lower(), cls.authority == authority
)
.first()
)
@classmethod
def get_by_username(cls, session, username, authority):
"""Fetch a user by username."""
return (
session.query(cls)
.filter(cls.username == username, cls.authority == authority)
.first()
)
def __acl__(self):
terms = []
# auth_clients that have the same authority as the user
# may update the user
user_update_principal = "client_authority:{}".format(self.authority)
terms.append((security.Allow, user_update_principal, "update"))
terms.append(security.DENY_ALL)
return terms
def __repr__(self):
return "<User: %s>" % self.username
| mit | -137,838,271,922,611,360 | 30.122744 | 88 | 0.609094 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.