repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dladd/pyFormex | pyformex/plugins/tools.py | 1 | 9029 | # $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""tools.py
Graphic Tools for pyFormex.
"""
from __future__ import print_function
import pyformex as pf
from coords import *
from collection import Collection
from gui.actors import GeomActor
from mesh import Mesh
from formex import Formex
from plugins.trisurface import TriSurface
from plugins.nurbs import NurbsCurve,NurbsSurface
class Plane(object):
def __init__(self,points,normal=None,size=((1.0,1.0),(1.0,1.0))):
pts = Coords(points)
if pts.shape == (3,) and normal is not None:
P = pts
n = Coords(normal)
if n.shape != (3,):
raise ValueError,"normal does not have correct shape"
elif pts.shape == (3,3,):
P = pts.centroid()
n = cross(pts[1]-pts[0],pts[2]-pts[0])
else:
raise ValueError,"points has incorrect shape (%s)" % str(pts.shape)
size = asarray(size)
s = Coords([insert(size[0],0,0.,-1),insert(size[1],0,0.,-1)])
self.P = P
self.n = n
self.s = s
def point(self):
return self.P
def normal(self):
return self.n
def size(self):
return self.s
def bbox(self):
return self.P.bbox()
def __str__(self):
return 'P:%s n:%s s:%s' % (list(self.P),list(self.n), (list(self.s[0]),list(self.s[1])))
def actor(self,**kargs):
from gui import actors
actor = actors.PlaneActor(size=self.s,**kargs)
actor = actors.RotatedActor(actor,self.n,**kargs)
actor = actors.TranslatedActor(actor,self.P,**kargs)
return actor
################# Report information about picked objects ################
def report(K):
if K is not None and hasattr(K,'obj_type'):
print(K.obj_type)
if K.obj_type == 'actor':
return reportActors(K)
elif K.obj_type == 'element':
return reportElements(K)
elif K.obj_type == 'point':
return reportPoints(K)
elif K.obj_type == 'edge':
return reportEdges(K)
elif K.obj_type == 'partition':
return reportPartitions(K)
return ''
def reportActors(K):
s = "Actor report\n"
v = K.get(-1,[])
s += "Actors %s\n" % v
for k in v:
A = pf.canvas.actors[k]
t = A.getType()
s += " Actor %s (type %s)\n" % (k,t)
return s
def reportElements(K):
s = "Element report\n"
for k in K.keys():
v = K[k]
A = pf.canvas.actors[k]
t = A.getType()
s += "Actor %s (type %s); Elements %s\n" % (k,t,v)
if t == Formex:
e = A.coords
elif t == TriSurface or t == Mesh :
e = A.elems
for p in v:
s += " Element %s: %s\n" % (p,e[p])
return s
def reportPoints(K):
s = "Point report\n"
for k in K.keys():
v = K[k]
A = pf.canvas.actors[k]
s += "Actor %s (type %s); Points %s\n" % (k,A.getType(),v)
x = A.points()
for p in v:
s += " Point %s: %s\n" % (p,x[p])
return s
def reportEdges(K):
s = "Edge report\n"
for k in K.keys():
v = K[k]
A = pf.canvas.actors[k]
s += "Actor %s (type %s); Edges %s\n" % (k,A.getType(),v)
e = A.edges()
for p in v:
s += " Edge %s: %s\n" % (p,e[p])
def reportPartitions(K):
s = "Partition report\n"
for k in K.keys():
P = K[k][0]
A = pf.canvas.actors[k]
t = A.getType()
for l in P.keys():
v = P[l]
s += "Actor %s (type %s); Partition %s; Elements %s\n" % (k,t,l,v)
if t == 'Formex':
e = A
elif t == 'TriSurface':
e = A.getElems()
for p in v:
s += " Element %s: %s\n" % (p,e[p])
return s
def reportDistances(K):
if K is None or not hasattr(K,'obj_type') or K.obj_type != 'point':
return ''
s = "Distance report\n"
x = Coords.concatenate(getCollection(K))
s += "First point: %s %s\n" % (0,x[0])
d = x.distanceFromPoint(x[0])
for i,p in enumerate(zip(x,d)):
s += "Distance from point: %s %s: %s\n" % (i,p[0],p[1])
return s
def reportAngles(K):
if K is None or not hasattr(K,'obj_type') or K.obj_type != 'element':
return ''
s = "Angle report:\n"
for F in getCollection(K):
if isinstance(F,Mesh):
F=F.toFormex()
if isinstance(F,Formex):
x = F.coords
if len(x)!=2:
raise ValueError,"You didn't select 2 elements"
v = x[:,1,:] - x[:,0,:]
v = normalize(v)
cosa = dotpr(v[0],v[1])
#print(cosa)
a = arccosd(cosa)
s += " a = %s" % a
else:
raise TypeError,"Angle measurement only possible with Formex or Mesh"
return s
def getObjectItems(obj,items,mode):
"""Get the specified items from object."""
if mode == 'actor':
return [ obj[i].object for i in items if hasattr(obj[i],'object') ]
elif mode in ['element','partition']:
if hasattr(obj,'object') and hasattr(obj.object,'select'):
return obj.object.select(items)
elif mode == 'point':
if hasattr(obj,'points'):
return obj.points()[items]
return None
def getCollection(K):
"""Returns a collection."""
if K.obj_type == 'actor':
return [ pf.canvas.actors[int(i)].object for i in K.get(-1,[]) if hasattr(pf.canvas.actors[int(i)],'object') ]
elif K.obj_type in ['element','point']:
return [ getObjectItems(pf.canvas.actors[k],K[k],K.obj_type) for k in K.keys() ]
elif K.obj_type == 'partition':
return [getObjectItems(pf.canvas.actors[k],K[k][0][prop],K.obj_type) for k in K.keys() for prop in K[k][0].keys()]
else:
return None
def growCollection(K,**kargs):
"""Grow the collection with n frontal rings.
K should be a collection of elements.
This should work on any objects that have a growSelection method.
"""
if K.obj_type == 'element':
for k in K.keys():
o = pf.canvas.actors[k]
if hasattr(o,'growSelection'):
K[k] = o.growSelection(K[k],**kargs)
def partitionCollection(K):
"""Partition the collection according to node adjacency.
The actor numbers will be connected to a collection of property numbers,
e.g. 0 [1 [4,12] 2 [6,20]], where 0 is the actor number, 1 and 2 are the
property numbers and 4, 12, 6 and 20 are the element numbers.
"""
sel = getCollection(K)
if len(sel) == 0:
print("Nothing to partition!")
return
if K.obj_type == 'actor':
actor_numbers = K.get(-1,[])
K.clear()
for i in actor_numbers:
K.add(range(sel[int(i)].nelems()),i)
prop = 1
j = 0
for i in K.keys():
p = sel[j].partitionByConnection() + prop
print("Actor %s partitioned in %s parts" % (i,p.max()-p.min()+1))
C = Collection()
C.set(transpose(asarray([p,K[i]])))
K[i] = C
prop += p.max()-p.min()+1
j += 1
K.setType('partition')
def getPartition(K,prop):
""" Remove all partitions with property not in prop."""
for k in K.keys():
for p in K[k][0].keys():
if not p in prop:
K[k][0].remove(K[k][0][p],p)
def exportObjects(obj,name,single=False):
"""Export a list of objects under the given name.
If obj is a list, and single=True, each element of the list is exported
as a single item. The items will be given the names name-0, name-1, etc.
Else, the obj is exported as is under the name.
"""
if single and type(obj) == list:
export(dict([ ("name-%s"%i,v) for i,v in enumerate(obj)]))
else:
export({name:obj})
# End
| gpl-3.0 | 4,544,304,737,583,841,000 | 29.710884 | 122 | 0.554103 | false | 3.288055 | false | false | false |
LuciferJack/python-mysql-pool | PyMysqlPool/util/logger_err.py | 1 | 1113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
import logging
import sys
from PyMysqlPool.constant.constant import loggingerr, loggErrorFile
logging.basicConfig(level=logging.NOTSET,
format='[%(asctime)s][%(levelname)7s][%(threadName)s][%(filename)s:%(funcName)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
logFormatter = logging.Formatter(
'[%(asctime)s][%(levelname)7s][%(threadName)s][%(filename)s:%(funcName)s:%(lineno)d] %(message)s')
rootLogger = logging.getLogger(__name__)
rootLogger.setLevel(logging.ERROR)
# create console handler and set level to debug
ch = logging.StreamHandler(stream=sys.stderr)
ch.setLevel(logging.ERROR)
# add ch to logger
rootLogger.addHandler(ch)
if loggErrorFile:
fileHandler = logging.FileHandler("{0}".format(loggingerr))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
| mit | -7,561,047,000,098,568,000 | 32.78125 | 125 | 0.68823 | false | 3.5 | false | true | false |
dycodedev/taiga-back | taiga/projects/apps.py | 8 | 4463 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.db.models import signals
from . import signals as handlers
def connect_memberships_signals():
# On membership object is deleted, update role-points relation.
signals.pre_delete.connect(handlers.membership_post_delete,
sender=apps.get_model("projects", "Membership"),
dispatch_uid='membership_pre_delete')
# On membership object is deleted, update notify policies of all objects relation.
signals.post_save.connect(handlers.create_notify_policy,
sender=apps.get_model("projects", "Membership"),
dispatch_uid='create-notify-policy')
def connect_projects_signals():
# On project object is created apply template.
signals.post_save.connect(handlers.project_post_save,
sender=apps.get_model("projects", "Project"),
dispatch_uid='project_post_save')
# Tags
signals.pre_save.connect(handlers.tags_normalization,
sender=apps.get_model("projects", "Project"),
dispatch_uid="tags_normalization_projects")
signals.pre_save.connect(handlers.update_project_tags_when_create_or_edit_taggable_item,
sender=apps.get_model("projects", "Project"),
dispatch_uid="update_project_tags_when_create_or_edit_taggable_item_projects")
def connect_us_status_signals():
signals.post_save.connect(handlers.try_to_close_or_open_user_stories_when_edit_us_status,
sender=apps.get_model("projects", "UserStoryStatus"),
dispatch_uid="try_to_close_or_open_user_stories_when_edit_us_status")
def connect_task_status_signals():
signals.post_save.connect(handlers.try_to_close_or_open_user_stories_when_edit_task_status,
sender=apps.get_model("projects", "TaskStatus"),
dispatch_uid="try_to_close_or_open_user_stories_when_edit_task_status")
def disconnect_memberships_signals():
signals.pre_delete.disconnect(sender=apps.get_model("projects", "Membership"), dispatch_uid='membership_pre_delete')
signals.post_save.disconnect(sender=apps.get_model("projects", "Membership"), dispatch_uid='create-notify-policy')
def disconnect_projects_signals():
signals.post_save.disconnect(sender=apps.get_model("projects", "Project"), dispatch_uid='project_post_save')
signals.pre_save.disconnect(sender=apps.get_model("projects", "Project"), dispatch_uid="tags_normalization_projects")
signals.pre_save.disconnect(sender=apps.get_model("projects", "Project"), dispatch_uid="update_project_tags_when_create_or_edit_taggable_item_projects")
def disconnect_us_status_signals():
signals.post_save.disconnect(sender=apps.get_model("projects", "UserStoryStatus"), dispatch_uid="try_to_close_or_open_user_stories_when_edit_us_status")
def disconnect_task_status_signals():
signals.post_save.disconnect(sender=apps.get_model("projects", "TaskStatus"), dispatch_uid="try_to_close_or_open_user_stories_when_edit_task_status")
class ProjectsAppConfig(AppConfig):
name = "taiga.projects"
verbose_name = "Projects"
def ready(self):
connect_memberships_signals()
connect_projects_signals()
connect_us_status_signals()
connect_task_status_signals()
| agpl-3.0 | -2,557,207,236,575,334,000 | 49.693182 | 160 | 0.663977 | false | 4.004488 | false | false | false |
mozilla/airmozilla | airmozilla/manage/views/authmigrate.py | 2 | 5816 | from django import forms
from django.shortcuts import render
from django.contrib.auth import get_user_model
from .decorators import superuser_required
from airmozilla.manage.forms import BaseForm
from airmozilla.main.models import (
Event,
SuggestedEvent,
EventEmail,
EventRevision,
EventAssignment,
SuggestedEventComment,
EventTweet,
Approval,
Picture,
Chapter,
UserEmailAlias,
)
from airmozilla.closedcaptions.models import ClosedCaptions, RevOrder
from airmozilla.comments.models import (
Comment,
Unsubscription,
Discussion,
SuggestedDiscussion,
)
from airmozilla.search.models import (
LoggedSearch,
SavedSearch,
)
from airmozilla.starred.models import StarredEvent
from airmozilla.surveys.models import Answer
from airmozilla.uploads.models import Upload
User = get_user_model()
class AuthMigrateForm(BaseForm):
file = forms.FileField()
dry_run = forms.BooleanField(required=False)
@superuser_required
def upload(request): # pragma: no cover
results = None
dry_run = False
if request.method == 'POST':
form = AuthMigrateForm(request.POST, request.FILES)
if form.is_valid():
dry_run = form.cleaned_data['dry_run']
lines = []
first = True
for line in form.cleaned_data['file']:
if first:
first = False
else:
alias, real = line.strip().split(',')
lines.append((alias, real))
if lines:
results = migrate(lines, dry_run)
else:
form = AuthMigrateForm()
context = {
'form': form,
'results': results,
'dry_run': dry_run,
}
return render(request, 'manage/authmigrate_upload.html', context)
def migrate(lines, dry_run=False):
results = []
for alias, real in lines:
try:
old = User.objects.get(email__iexact=alias)
except User.DoesNotExist:
old = None
try:
new = User.objects.get(email__iexact=real)
except User.DoesNotExist:
new = None
notes = ''
if old and not new:
# Easy, just change this user's email address
old.email = real
if not dry_run:
old.save()
UserEmailAlias.objects.get_or_create(
user=old,
email=alias,
)
notes = 'Moved over'
elif not old and new:
if not dry_run:
UserEmailAlias.objects.get_or_create(
email=alias,
user=new,
)
notes = 'Nothing to do'
elif not old and not new:
notes = 'Neither found'
else:
assert old and new
notes = 'Merged'
notes += '\n({})'.format(
'\n'.join(merge_user(old, new, dry_run=dry_run))
)
if not dry_run:
old.is_active = False
old.save()
UserEmailAlias.objects.get_or_create(
user=new,
email=old.email,
)
results.append({
'alias': alias,
'old': old,
'real': real,
'new': new,
'notes': notes,
})
return results
def merge_user(old, new, dry_run=False):
things = []
def migrate(model, key='user', name=None, only_if_in=False):
if only_if_in:
if model.objects.filter(**{key: new}).exists():
model.objects.filter(**{key: old}).delete()
count = 0
for instance in model.objects.filter(**{key: old}):
setattr(instance, key, new)
if not dry_run:
instance.save()
count += 1
if count > 0:
things.append('{}{} {}'.format(
name or model._meta.verbose_name,
count != 1 and 's' or '',
count,
))
if old.is_staff:
new.is_staff = True
if not dry_run:
new.save()
things.append('transferred is_staff')
if old.is_superuser:
new.is_superuser = True
if not dry_run:
new.save()
things.append('transferred is_superuser')
# Groups
for group in old.groups.all():
if group not in new.groups.all():
if not dry_run:
new.groups.add(group)
things.append('{} group membership transferred'.format(group.name))
# Events
migrate(Event, 'creator')
migrate(Event, 'modified_user', name='modified event')
# EventEmail
migrate(EventEmail)
# EventRevision
migrate(EventRevision)
# SuggestedEventComment
migrate(SuggestedEventComment)
# Comments
migrate(Comment)
# Discussions
migrate(Discussion.moderators.through, only_if_in=True)
# Suggested discussions
migrate(SuggestedDiscussion.moderators.through, only_if_in=True)
# Event assignments
migrate(EventAssignment.users.through, only_if_in=True)
# Unsubscriptions
migrate(Unsubscription)
# SuggestedEvent
migrate(SuggestedEvent)
# Closed captions
migrate(ClosedCaptions, 'created_user')
# Rev orders
migrate(RevOrder, 'created_user')
# EventTweet
migrate(EventTweet, 'creator')
# Approval
migrate(Approval)
# Picture
migrate(Picture, 'modified_user')
# Chapters
migrate(Chapter)
# Logged search
migrate(LoggedSearch)
# Saved search
migrate(SavedSearch)
# Starred events
migrate(StarredEvent)
# (survey) Answers
migrate(Answer)
# Upload
migrate(Upload)
return things
| bsd-3-clause | -4,287,681,526,563,116,500 | 23.961373 | 79 | 0.560179 | false | 4.14245 | false | false | false |
divisible-by-hero/dbh-blog | blog/views.py | 1 | 1074 | from django.views.generic import ListView, DetailView, MonthArchiveView
from django import forms
from .models import Post
class ListMixin(object):
paginate_by = 5
context_object_name = 'posts'
template_name = 'blog/post_list_view.html'
def get_queryset(self):
tag = self.request.GET.get('tag', None)
if tag:
return Post.objects.published().filter(tags__name__in=[tag])
return Post.objects.published()
class MonthArchive(ListMixin, MonthArchiveView):
date_field = 'published_date'
class PostListView(ListMixin, ListView):
model = Post
class PostDetailView(DetailView):
model = Post
context_object_name = 'post'
template_name = 'blog/post_detail_item.html'
class SearchListView(ListView):
model = Post
template_name = 'blog/post_list_view.html'
context_object_name = 'posts'
paginate_by = 5
def get_queryset(self):
search = self.kwargs.get('q', None)
if search:
return Post.objects.published().search(search)
return Post.objects.all()
| bsd-3-clause | -1,449,683,677,619,535,400 | 23.976744 | 72 | 0.668529 | false | 3.703448 | false | false | false |
maxikov/locationpythonserver | locationpythonserver/naive_gauss/set_evaluator.py | 1 | 2550 | #!/usr/bin/env python
# Copyright (c) 2013 Maxim Kovalev, Carnegie Mellon University
# This file is part of Locationing Server.
#
# Locationing Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Locationing Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Locationing Server. If not, see <http://www.gnu.org/licenses/>.
import dataloader
import dataprocessor
import traceback
def create_data_processor():
dataloader.db_name = "wifi_location_training"
wifi_stats = dataloader.get_all_wifi_stats()
gps_stats = dataloader.get_all_gps_stats()
dp = dataprocessor.DataProcessor(wifi_stats, gps_stats)
return dp
def load_wifi_gps(timestamp):
w = dataloader.get_one_wifi_reading(timestamp)
g = dataloader.get_one_gps_reading(timestamp)
return w, g
def lookup_location(location_id):
dataloader.db_name = "wifi_location_training"
return dataloader.lookup_location(location_id)
def main():
dp = create_data_processor()
# ts = dataloader.get_few_timestamps(10)
dataloader.db_name = "wifi_location_test"
ts = dataloader.get_all_timestamps()
l = len(ts)
rights = 0
for i in xrange(l):
try:
t = ts[i]
dataloader.db_name = "wifi_location_test"
w, g = load_wifi_gps(t)
ev_loc = dp.estimate_location(w, g)
tr_loc = dataloader.get_true_location(t)
ev_loc_name = lookup_location(ev_loc[0])
tr_loc_name = lookup_location(tr_loc)
if ev_loc_name == tr_loc_name:
rights += 1
print i, "of", l, "(", 100*i/l, "%), rights:", float(rights) / (i+1), "Timestamp:", t, "estimate:", ev_loc, "(", ev_loc_name, ") true:", tr_loc, "(", tr_loc_name, ")"
except Exception as e:
tr = traceback.format_exc().splitlines()
for line in tr:
print line
print e
print "Total accuracy:", 100*float(rights) / l
print "Or, considering i's", 100*float(rights) / (i+1)
if __name__ == "__main__":
main()
| gpl-3.0 | 7,928,715,331,833,816,000 | 37.059701 | 178 | 0.632549 | false | 3.459973 | false | false | false |
phoebe-project/phoebe2-docs | 2.2/tutorials/reflection_heating.py | 1 | 4312 | #!/usr/bin/env python
# coding: utf-8
# Reflection and Heating
# ============================
#
# For a comparison between "Horvat" and "Wilson" methods in the "irad_method" parameter, see the tutorial on [Lambert Scattering](./irrad_method_horvat.ipynb).
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.htmlipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# Relevant Parameters
# ---------------------------------
# The parameters that define reflection and heating are all prefaced by "irrad_frac" (fraction of incident flux) and suffixed by "bol" to indicate that they all refer to a bolometric (rather than passband-dependent) process. For this reason, they are *not* stored in the dataset, but rather directly in the component.
#
# Each of these parameters dictates how much incident flux will be handled by each of the available processes. For now these only include reflection (heating with immediate re-emission, without heat distribution) and lost flux. In the future, heating with distribution and scattering will also be supported.
#
# For each component, these parameters *must* add up to exactly 1.0 - and this is handled by a constraint which by default constrains the "lost" parameter.
# In[3]:
print(b['irrad_frac_refl_bol'])
# In[4]:
print(b['irrad_frac_lost_bol'])
# In[5]:
print(b['irrad_frac_refl_bol@primary'])
# In[6]:
print(b['irrad_frac_lost_bol@primary@component'])
# In order to see the effect of reflection, let's set "irrad_frac_refl_bol" of both of our stars to 0.9 - that is 90% of the incident flux will go towards reflection and 10% will be ignored.
# In[7]:
b.set_value_all('irrad_frac_refl_bol', 0.9)
# Since reflection can be a computationally expensive process and in most cases is a low-order effect, there is a switch in the compute options that needs to be enabled in order for reflection to be taken into account. If this switch is False (which it is by default), the albedos are completely ignored and will be treated as if all incident light is lost/ignored.
# In[8]:
print(b['irrad_method@compute'])
# Reflection has the most noticeable effect when the two stars are close to each other and have a large temperature ratio.
# In[9]:
b['sma@orbit'] = 4.0
# In[10]:
b['teff@primary'] = 10000
# In[11]:
b['teff@secondary'] = 5000
# Influence on Light Curves (fluxes)
# ---------------------------------
# In[12]:
b.add_dataset('lc', times=np.linspace(0,1,101))
# Let's run models with the reflection switch both turned on and off so that we can compare the two results. We'll also override delta to be a larger number since the computation time required by delta depends largely on the number of surface elements.
# In[13]:
b.run_compute(irrad_method='none', ntriangles=700, model='refl_false')
# In[14]:
b.run_compute(irrad_method='wilson', ntriangles=700, model='refl_true')
# In[15]:
afig, mplfig = b.plot(show=True, legend=True)
# In[16]:
artists = plt.plot(b['value@times@refl_false'], b['value@fluxes@refl_true']-b['value@fluxes@refl_false'], 'r-')
# Influence on Meshes (Intensities)
# ------------------------------------------
# In[17]:
b.add_dataset('mesh', times=[0.2], columns=['intensities@lc01'])
# In[18]:
b.run_compute(irrad_method='none', ntriangles=700, model='refl_false', overwrite=True)
# In[19]:
b.run_compute(irrad_method='wilson', ntriangles=700, model='refl_true', overwrite=True)
# In[20]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_false', fc='intensities', ec='face', show=True)
# In[21]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_true', fc='intensities', ec='face', show=True)
# In[ ]:
| gpl-3.0 | -861,078,462,931,556,000 | 22.692308 | 366 | 0.683905 | false | 3.154353 | false | false | false |
kosgroup/odoo | addons/auth_signup/controllers/main.py | 7 | 5292 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import werkzeug
from odoo import http, _
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons.web.controllers.main import ensure_db, Home
from odoo.http import request
_logger = logging.getLogger(__name__)
class AuthSignupHome(Home):
@http.route()
def web_login(self, *args, **kw):
ensure_db()
response = super(AuthSignupHome, self).web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
return response
@http.route('/web/signup', type='http', auth='public', website=True)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
except (SignupError, AssertionError), e:
if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]):
qcontext["error"] = _("Another user is already registered using this email address.")
else:
_logger.error(e.message)
qcontext['error'] = _("Could not create a new account.")
return request.render('auth_signup.signup', qcontext)
@http.route('/web/reset_password', type='http', auth='public', website=True)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, "No login provided."
request.env['res.users'].sudo().reset_password(login)
qcontext['message'] = _("An email has been sent with credentials to reset your password")
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception, e:
qcontext['error'] = e.message
return request.render('auth_signup.reset_password', qcontext)
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
IrConfigParam = request.env['ir.config_parameter']
return {
'signup_enabled': IrConfigParam.sudo().get_param('auth_signup.allow_uninvited') == 'True',
'reset_password_enabled': IrConfigParam.sudo().get_param('auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = request.params.copy()
qcontext.update(self.get_auth_signup_config())
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
token_infos = request.env['res.partner'].sudo().signup_retrieve_info(qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
qcontext['invalid_token'] = True
return qcontext
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = { key: qcontext.get(key) for key in ('login', 'name', 'password') }
assert values.values(), "The form was not properly filled in."
assert values.get('password') == qcontext.get('confirm_password'), "Passwords do not match; please retype them."
supported_langs = [lang['code'] for lang in request.env['res.lang'].sudo().search_read([], ['code'])]
if request.lang in supported_langs:
values['lang'] = request.lang
self._signup_with_values(qcontext.get('token'), values)
request.env.cr.commit()
def _signup_with_values(self, token, values):
db, login, password = request.env['res.users'].sudo().signup(values, token)
request.env.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
uid = request.session.authenticate(db, login, password)
if not uid:
raise SignupError(_('Authentication Failed.'))
| gpl-3.0 | -7,384,755,517,312,322,000 | 46.675676 | 120 | 0.610544 | false | 4.144088 | true | false | false |
elianerpereira/gtg | GTG/tests/tree_testing.py | 1 | 9344 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
# If True, the TreeTester will automatically reorder node on the same level
# as a deleted node. If False, it means that Liblarch has the responsability
# to handle that itself.
REORDER_ON_DELETE = False
class TreeTester:
""" A class that will check if a tree implementation is consistent
by connecting to emitted signals and crashing on any problem """
def __init__(self, viewtree):
self.tree = viewtree
# both dict should always be synchronized
# They are the internal representation of the tree,
# based only on received signals
self.nodes = {}
self.paths = {}
self.tree.register_cllbck('node-added-inview', self.add)
self.tree.register_cllbck('node-deleted-inview', self.delete)
self.tree.register_cllbck('node-modified-inview', self.update)
self.tree.register_cllbck('node-children-reordered', self.reordered)
self.trace = "* * * * * * * *\n"
def add(self, nid, path):
self.trace += "adding %s to path %s\n" % (nid, str(path))
currentnode = self.paths.get(path, None)
if currentnode and currentnode != nid:
raise Exception('path %s is already occupied by %s' % (
str(path), nid))
if nid in self.nodes:
node = self.nodes[nid]
else:
node = []
self.nodes[nid] = node
if path not in node:
node.append(path)
self.paths[path] = nid
def delete(self, nid, path):
self.trace += "removing %s from path %s\n" % (nid, str(path))
if nid != self.paths.get(path, None):
error = '%s is not assigned to path %s\n' % (nid, str(path))
error += self.print_tree()
raise Exception(error)
if path not in self.nodes.get(nid, []):
raise Exception('%s is not a path of node %s' % (str(path), nid))
if REORDER_ON_DELETE:
index = path[-1:]
print "reorder on delete not yet implemented"
self.nodes[nid].remove(path)
if len(self.nodes[nid]) == 0:
self.nodes.pop(nid)
self.paths.pop(path)
# Move other paths lower like in real TreeModel
path_prefix = path[:-1]
index = path[-1]
assert path_prefix + (index, ) == path, "%s vs %s" % (
path_prefix + (index, ), path)
def check_prefix(path):
""" Is this path affected by the change?
Conditions:
* the same prefix
(3, 1, 2, 3) vs (3,1,2,4) OK
(3, 1, 2, 3) vs (3,1,2,4,0) OK
(3, 1, 2, 3) vs (3,2,2,4) FALSE
* higher index
(3, 1, 2, 3) vs (3,1,2,2) FALSE
"""
if len(path) <= len(path_prefix):
return False
for i, pos in enumerate(path_prefix):
if path[i] != pos:
return False
return path[len(path_prefix)] > index
paths = list(self.paths.keys())
paths.sort()
for path in paths:
old_path = path
if check_prefix(path) and len(path_prefix) > 1:
new_path = list(path)
print "new_path: %s" % str(new_path)
index = len(path_prefix)
new_path[index] = str(int(new_path[index]) - 1)
new_path = tuple(new_path)
print "new_path: %s" % str(new_path)
print "self.paths: %s" % str(self.paths)
assert new_path not in self.paths
nid = self.paths[old_path]
self.nodes[nid].remove(old_path)
del self.paths[old_path]
self.nodes[nid].append(new_path)
self.paths[new_path] = nid
def update(self, nid, path):
## self.tree.flush()
# self.trace += "updating %s in path %s\n" %(nid, str(path))
# error = "updating node %s for path %s\n" %(nid, str(path))
# if not self.nodes.has_key(nid):
# error += "%s is not in nodes !\n" %nid
# error += self.print_tree()
# raise Exception(error)
# #Nothing to do, we just update.
# for p in self.nodes[nid]:
# if self.paths[p] != nid:
# raise Exception('Mismatching path for %s'%nid)
# if not self.paths.has_key(path):
# error += '%s is not in stored paths (node %s)\n'%(str(path),nid)
# error += self.print_tree()
# raise Exception(error)
# n = self.paths[path]
# if path not in self.nodes[n] or n != nid:
# raise Exception('Mismatching node for path %s'%str(p))
# Because of the asynchronousness of update, this test
# doesn't work anymore
pass
def reordered(self, nid, path, neworder):
print "reordering"
self.trace += "reordering children of %s (%s) : %s\n" % (nid,
str(path),
neworder)
self.trace += "VR is %s\n" % self.tree.node_all_children()
if not path:
path = ()
i = 0
newpaths = {}
toremove = []
# we first update self.nodes with the new paths
while i < len(neworder):
if i != neworder[i]:
old = neworder[i]
oldp = path + (old, )
newp = path + (i, )
le = len(newp)
for pp in self.paths.keys():
if pp[0:le] == oldp:
n = self.paths[pp]
self.nodes[n].remove(pp)
newpp = newp + pp[le:]
self.nodes[n].append(newpp)
self.trace += " change %s path from %s to %s\n" % (
n, pp, newpp)
newpaths[newpp] = n
toremove.append(pp)
i += 1
# now we can update self.paths
for p in toremove:
self.paths.pop(p)
for p in newpaths:
self.trace += " adding %s to paths %s\n" % (newpaths[p], str(p))
self.paths[p] = newpaths[p]
def test_validity(self):
for n in self.nodes.keys():
paths = self.tree.get_paths_for_node(n)
if len(self.nodes[n]) == 0:
raise Exception('Node %s is stored without any path' % n)
for p in self.nodes[n]:
if self.paths[p] != n:
raise Exception('Mismatching path for %s' % n)
if p not in paths:
error = 'we have a unknown stored path for %s\n' % n
nn = self.tree.get_node_for_path(p)
parent = self.tree.get_node_for_path(p[:-1])
error += ' path %s is the path of %s\n' % (
str(p), str(nn))
error += ' parent is %s' % parent
# error += self.trace
raise Exception(error)
paths.remove(p)
if len(paths) > 0:
raise Exception('why is this path existing for %s' % n)
for p in self.paths.keys():
node = self.tree.get_node_for_path(p)
n = self.paths[p]
if n != node:
error = 'Node for path is %s but should be %s' % (node, n)
raise Exception(error)
if p not in self.nodes[n]:
error = 'Mismatching node for path %s\n' % str(p)
error += self.print_tree()
raise Exception(error)
if len(p) == 1 and len(self.nodes[n]) > 1:
error = 'Node %s has multiple paths and is in the VR\n' % n
error += self.print_tree()
raise Exception(error)
return True
def print_tree(self):
st = self.trace
st += "nodes are %s\n" % self.nodes
st += "paths are %s\n" % self.paths
return st
def quit(self):
self.tree.deregister_cllbck('node-added-inview', self.add)
self.tree.deregister_cllbck('node-deleted-inview', self.delete)
self.tree.deregister_cllbck('node-modified-inview', self.update)
self.tree.deregister_cllbck('node-children-reordered', self.reordered)
| gpl-3.0 | -659,458,706,772,728,400 | 39.626087 | 79 | 0.506528 | false | 3.790669 | false | false | false |
moodpulse/l2 | health/views.py | 1 | 3069 | from datetime import datetime, timedelta
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
import clients.models as clients
import directory.models as directory
from appconf.manager import SettingManager
from rmis_integration.client import Client
from slog.models import Log as slog
CLEANUP_TYPES_LOG = (
1,
2,
3,
4,
5,
6,
10,
16,
17,
18,
19,
20,
25,
27,
22,
23,
100,
998,
999,
1001,
2000,
2001,
2002,
2003,
2004,
2005,
2006,
3000,
3001,
5000,
6000,
10000,
20000,
60001,
60003,
)
@login_required
@staff_member_required
def log(request):
response = {"cnt": slog.objects.all().count(), "store_days": SettingManager.get("max_log_store_days", "120", "i")}
response["to_delete"] = slog.objects.filter(time__lt=datetime.today() - timedelta(days=response["store_days"]), type__in=CLEANUP_TYPES_LOG).count()
return JsonResponse(response)
@login_required
@staff_member_required
def log_cleanup(request):
_, cnt = slog.objects.filter(time__lt=datetime.today() - timedelta(days=SettingManager.get("max_log_store_days", "120", "i")), type__in=CLEANUP_TYPES_LOG).delete()
return HttpResponse(str(cnt.get("slog.Log", 0)), content_type="text/plain")
@login_required
@staff_member_required
def db(request):
response = []
return JsonResponse(response, safe=False)
@login_required
@staff_member_required
def rmis_check(request):
c = Client()
return HttpResponse(c.search_organization_id(check=True) + " " + c.search_dep_id(check=True), content_type="text/plain")
@login_required
@staff_member_required
def archive_without_directions(request):
objs = clients.Card.objects.filter(napravleniya__isnull=True, is_archive=True)
cnt = objs.count()
if request.GET.get("remove", "0") == "1":
_, cnt = objs.delete()
cnt = cnt.get("clients.Card", 0)
return HttpResponse(str(cnt), content_type="text/plain")
@login_required
@staff_member_required
def patients_without_cards(request):
objs = clients.Individual.objects.filter(card__isnull=True)
cnt = objs.count()
if request.GET.get("remove", "0") == "1":
_, cnt = objs.delete()
cnt = cnt.get("clients.Individual", 0)
return HttpResponse(str(cnt), content_type="text/plain")
@login_required
@staff_member_required
def sync_departments(request):
c = Client()
return HttpResponse("Добавлено: %s. Обновлено: %s." % c.department.sync_departments(), content_type="text/plain")
@login_required
@staff_member_required
def sync_researches(request):
r = directory.Researches.objects.filter(podrazdeleniye__isnull=True, subgroup__isnull=False)
cnt = r.count()
for research in r:
research.podrazdeleniye = research.subgroup.podrazdeleniye
research.save()
return HttpResponse(str(cnt), content_type="text/plain")
| mit | -9,199,544,822,434,005,000 | 24.638655 | 167 | 0.676827 | false | 3.266595 | false | false | false |
joac/singing-girl | singing_girl/dicts.py | 1 | 2808 | #! -*- coding: utf8 -*-
especiales_masculino = [
'cero',
'uno',
'dos',
'tres',
'cuatro',
'cinco',
'seis',
'siete',
'ocho',
'nueve',
'diez',
'once',
'doce',
'trece',
'catorce',
'quince',
'dieciséis',
'diecisiete',
'dieciocho',
'diecinueve',
'veinte',
'veintiuno',
'veintidós',
'veintitrés',
'veinticuatro',
'veinticinco',
'veintiséis',
'veintisiete',
'veintiocho',
'veintinueve'
]
especiales_femenino = [
'cero',
'una',
'dos',
'tres',
'cuatro',
'cinco',
'seis',
'siete',
'ocho',
'nueve',
'diez',
'once',
'doce',
'trece',
'catorce',
'quince',
'dieciséis',
'diecisiete',
'dieciocho',
'diecinueve',
'veinte',
'veintiuna',
'veintidós',
'veintitrés',
'veinticuatro',
'veinticinco',
'veintiséis',
'veintisiete',
'veintiocho',
'veintinueve'
]
especiales_apocopado = [
'cero',
'un',
'dos',
'tres',
'cuatro',
'cinco',
'seis',
'siete',
'ocho',
'nueve',
'diez',
'once',
'doce',
'trece',
'catorce',
'quince',
'dieciséis',
'diecisiete',
'dieciocho',
'diecinueve',
'veinte',
'veintiún',
'veintidós',
'veintitrés',
'veinticuatro',
'veinticinco',
'veintiséis',
'veintisiete',
'veintiocho',
'veintinueve',
]
decenas = [
'',
'diez',
'veinte',
'treinta',
'cuarenta',
'cincuenta',
'sesenta',
'setenta',
'ochenta',
'noventa',
]
centena_masculino = [
'',
'ciento',
'doscientos',
'trescientos',
'cuatrocientos',
'quinientos',
'seiscientos',
'setecientos',
'ochocientos',
'novecientos'
]
centena_apocopado = [
'',
'cien',
'doscientos',
'trescientos',
'cuatrocientos',
'quinientos',
'seiscientos',
'setecientos',
'ochocientos',
'novecientos'
]
centena_femenino = [
'',
'ciento',
'doscientas',
'trescientas',
'cuatrocientas',
'quinientas',
'seiscientas',
'setecientas',
'ochocientas',
'novecientas',
]
exponentes_plural = {
# 2:'cien',
3: 'mil',
6: 'millones',
12: 'billones',
18: 'trillones',
24: 'cuatrillones', #mas exponentes agregar acá
}
exponentes_singular = {
# 2:'cien',
3: 'mil',
6: 'un millón',
12: 'un billón',
18: 'un trillón',
24: 'un cuatrillón', #mas exponentes agregar acá
}
| mit | -7,793,795,708,195,571,000 | 15.309942 | 56 | 0.457512 | false | 2.60898 | false | true | false |
Shnatta/Register-Grades | Gradebook.py | 1 | 3202 | #Gradebook Project *Display a persons grades/where can he/she get a job!*
#Made by JonathanD
#Thanks to LoveFeelings(https://www.twitch.tv/lovefeelings), Subzidion(https://www.twitch.tv/subzidion), MichaelC212(https://www.twitch.tv/michaelc212)
import time
from prettytable import PrettyTable
x = PrettyTable()
c = PrettyTable()
def Wait(x): # Wait x amount of time before next line is executed!
time.sleep(x)
class Student(): # Class Student
def __init__(self, Fullname, age, born, classes, grades): #Arguments
self.Fullname = Fullname # Define
self.age = age
self.born = born
self.classes = classes
self.grades = grades
def stats(self):
return 'Fulll Name : {}, Age : {}, Born : {}, Grades : {}'.format(self.Fullname, self.age, self.born, self.grades)
def name():
return '{}'.format(Fullname)
def age():
return '{}'.format(age)
def born():
return '{}'.format(born)
def grades():
return '{}'.format(grades)
def name():
global name_
name_ = input('Full name: ')
if (len(name_) > 1):
Wait(1)
elif (int(name_) ):
print("Error, only letters! (Try again)") #\n creates a new line!
Wait(1.5)
name()
def age():
global age_
age_ = input("Age: ")
if (age_.isdigit() ):
Wait(1.5)
else:
print("Error, please enter a valid number!")
age()
def born():
global born_
born_ = input("Birthday(dd.mm.yy): ")
if (born_):
Wait(1.5)
else:
Wait(1.5)
print("Error, please enter a valid birthday!")
born()
def classes():
global s_classes
s_classes = [] # Empty index! Add item/word to the index (index name goes here).append(obj)
add = input("Enter classes: ")
if (add):
s_classes.extend(add.split() ) #x.split() splits up a sentence(for index() ) / x.append(what you want to add)
return (', '.join(s_classes) )
print(s_classes)
Wait(1.5)
else:
Wait(1.5)
print("Error, something went wrong!")
classes()
def Getgrades():
global grades
grades = {}
for cl in s_classes:
while True:
try:
a = int(input("What did you get in {} ".format(cl)))
print("Registered for {}.".format(cl))
grades[cl] = a
break
except ValueError:
print("Please give a valid integer")
#mylist = ['spam', 'ham', 'eggs']
#print(mylist)
#print (', '.join(mylist) )
print("Made by JonathanD 2017\n")
name()
age()
born()
classes()
Getgrades()
Student_1 = Student(name_, age_, born_, s_classes, grades)
test = (', \n'.join(grades) )
value = sum(grades.values())
c = len(grades)
x.field_names = ["Name", "Born", "Age", "Classes", "Snitt(NO)"]
x.add_row([name_, born_ , age_, test, value / c])
#file = open("details.txt", "w")
#file.write(x)
#file.close()
print(x) #+ "\n(Data saved @ 'details.txt')")
Wait(200)
| apache-2.0 | 6,555,662,637,556,242,000 | 28.495238 | 151 | 0.534666 | false | 3.34238 | false | false | false |
kramerfelix/accpy | accpy/simulate/lsd.py | 2 | 6302 | # -*- coding: utf-8 -*-
''' accpy.simulate.lsd
author: felix.kramer(at)physik.hu-berlin.de
'''
from __future__ import division
from numpy import (eye, dot, trapz, pi, nanmean, array, newaxis,
hstack, concatenate, empty, dstack, sqrt, zeros,
vstack)
from numpy.random import standard_normal
from numpy.linalg import inv
from .slicing import cellslice
from .rmatrices import rmatrix, UCS2R
from .tracking import (initialtwiss, tracktwiss4, trackparts)
from .radiate import dipolering, synchroints
from .particles import part2mqey
from ..lattices.reader import latt2py
from ..visualize.plot import (plotopticpars_closed, plottrajs, plotbeamsigma,
plotopticpars_open, plotoptic, plotphasespace,
plotdisptraj)
def oneturn(UC, P_UC, N_UC, gamma):
M = eye(6)
rho = []
LD = []
D_UC = 0
for i in range(P_UC):
UC_tmp = UC[:, i]
# R matrices of unsliced unit cell
M = dot(rmatrix(UC_tmp, gamma), M)
if UC_tmp[0] == 1:
LD.append(UC_tmp[1])
rho.append(UC_tmp[2])
D_UC += 1
UC_tmp = None
LD = nanmean(LD)
rho = nanmean(rho)
UD = N_UC*D_UC*LD
xtwiss0, ytwiss0, xdisp0 = initialtwiss(M)
# one turn R matrix of ring
M1T = eye(6)
for i in range(N_UC):
M1T = dot(M, M1T)
return xtwiss0, ytwiss0, xdisp0, rho, D_UC, UD, LD
def gettunes(s, xtwiss, ytwiss, N_UC):
Qx = N_UC*trapz(1./xtwiss[0, 0, :], s)/2/pi
Qy = N_UC*trapz(1./ytwiss[0, 0, :], s)/2/pi
return Qx, Qy
def getchromaticity(s, xtwiss, ytwiss, N_UC, UCS):
kx, ky = [], [] # negative k == focus
for k, t in zip(UCS[4, :], UCS[0, :]):
if t == 3:
kx.append(-k)
ky.append(k)
elif t == 4:
kx.append(k)
ky.append(-k)
else:
kx.append(0)
ky.append(0)
Xx = N_UC*trapz(kx*xtwiss[0, 0, 1:], s[1:])/4/pi
Xy = N_UC*trapz(ky*ytwiss[0, 0, 1:], s[1:])/4/pi
return Xx, Xy
def lsd(closed, latt, slices, mode, particles, rounds):
if closed:
(particle, E, I, UC, diagnostics, N_UC,
HF_f, HF_V) = latt2py(latt, closed)
else:
(particle, E, I, UC, diagnostics, N_UC,
xtwiss0, ytwiss0, xdisp0,
emit_x, emit_y, emit_s) = latt2py(latt, closed)
m, q, E0, gamma, P_UC = part2mqey(E, UC, particle)
if closed:
xtwiss0, ytwiss0, xdisp0, rho, D_UC, UD, LD = oneturn(UC, P_UC, N_UC, gamma)
# get sliced unit cell for finer tracking
s, UCS, P_UCS = cellslice(UC, P_UC, slices)
# calculate according sliced R matrix
R = UCS2R(P_UCS, UCS, gamma)
# track twiss and dispersion
xtwiss, ytwiss, xdisp, xytwiss = tracktwiss4(R, P_UCS, closed, xtwiss0,
ytwiss0, xdisp0)
if closed:
# tune Q_u:=1/2pi*int(ds/beta_u(s))
Qx, Qy = gettunes(s, xtwiss, ytwiss, N_UC)
# nat chromaticity xi_u:=1/4pi*int(k_u(s)*beta_u(s)) with k_y = - k_x
Xx, Xy = getchromaticity(s, xtwiss, ytwiss, N_UC, UCS)
# calculate according ring of dipoles
sdip, disperdip, xtwissdip, ytwissdip = \
dipolering(s, N_UC, UD, P_UCS, UCS, xdisp, xtwiss, ytwiss, slices,
D_UC)
# synchrotron integrals
(Cq, Jx, emiteqx, tau_x, Jy, E, emiteqy, tau_y, alpha_mc, eta_mc,
gamma_tr, Q_s, Js, sigma_E, sigma_tau, sigma_s, tau_s, U_rad, P_ges,
E_c, lambda_c) = \
synchroints(N_UC, s, gamma, xtwissdip, disperdip, sdip, rho, E, E0,
I, q, m, ytwiss)
if mode == 'trackbeta':
figs = plotoptic(UC, diagnostics, s, xtwiss, ytwiss, xdisp)
if closed:
figs.append(plotopticpars_closed(xtwiss, xdisp, ytwiss, gamma, Qx,
Xx, Jx, emiteqx, tau_x, Qy, Xy,
Jy, E, emiteqy, tau_y, alpha_mc,
eta_mc, gamma_tr, Q_s, Js,
sigma_E, sigma_tau, sigma_s,
tau_s, U_rad, P_ges, E_c,
lambda_c))
sigx = sqrt(xtwiss[0, 0, :]*emiteqx+(xdisp[0, :]*sigma_E)**2)
sigy = sqrt(ytwiss[0, 0, :]*emiteqy)
else:
figs.append(plotopticpars_open(xtwiss, xdisp, ytwiss, gamma, E))
sigx = sqrt(xtwiss[0, 0, :]*emit_x+(xdisp[0, :]*emit_s)**2)
sigy = sqrt(ytwiss[0, 0, :]*emit_y)
figs.append(plotbeamsigma(UC, diagnostics, s, sigx, sigy))
elif mode == 'trackpart':
# [x, x', y, y', l, delta_p/p_0]
# [mm, mrad, mm, mrad, mm, promille]
ideal = array([0, 0, 0, 0, 0, 0]) # Ideal particle
start = array([1, 1, 1, 1, 1, 0]) # 1 sigma particle
distmean = 1e-3*ideal[newaxis, :].T
distsigma = 1e-3*start[newaxis, :].T
# emmitanz des vorgegebenen 1-sigma teilchens (Wille 3.142)
emittx = dot(start[:2], dot(inv(xtwiss0), start[:2]))
emitty = dot(start[2:4], dot(inv(ytwiss0), start[2:4]))
# Envelope E(s)=sqrt(epsilon_i*beta_i(s))
ydisp = zeros([1, P_UCS+1])
emit_x_beta = array([emittx*xtwiss[0, 0, :], emitty*ytwiss[0, 0, :]])
dispdelta = (vstack([xdisp[0, :], ydisp[0, :]])*1E-3*distsigma[5])**2
envelope = sqrt(dispdelta + emit_x_beta)
# start vectors of normally distributed ensemble
points = P_UCS*N_UC*rounds
X0 = (distsigma - distmean)*standard_normal([6, particles])
X0 = dstack([X0, empty([6, particles, points])])
X0[:, :2, 0] = hstack([distmean, distsigma])
X_S = [X0[:, i, :] for i in range(particles)]
X = trackparts(R, N_UC, X_S, rounds)
s0 = s
envelope0 = envelope
for i in range(1, N_UC):
s = concatenate([s, s0[1:]+s0[-1]*i])[:]
envelope = hstack([envelope, envelope0[:, 1:]])
figs = plottrajs(s, X, rounds, envelope)
figs.append(plotphasespace(s, X, rounds, xtwiss, emittx, ytwiss, emitty))
#figs.append(plotdisptraj(s, P_UCS, E, E0, UCS, UC, diagnostics))
return figs
| gpl-3.0 | 8,628,564,006,182,684,000 | 38.886076 | 84 | 0.530149 | false | 2.766462 | false | false | false |
Extremus-io/djwebsockets | djwebsockets/websocket.py | 1 | 1528 | from djwebsockets.mixins import BaseWSMixin, MixinFail
import asyncio
class WebSocket:
loop = None
def __init__(self, socket, close, send):
self.socket = socket
self.close_handler = close
self.send_handler = send
self.id = id(socket)
self.closed = False
def send(self, Message):
self.loop.call_soon_threadsafe(self._send, Message)
def _send(self, Message):
self.send_handler.put_nowait(Message)
def close(self):
self.closed = True
self.loop.call_soon_threadsafe(self._close)
def _close(self):
self.close_handler.set_result(-1)
class BaseWSClass:
@staticmethod
def super_classes(cls):
return reversed(cls.__mro__)
@classmethod
def call_methods(cls, method, *args):
for clus in cls.super_classes(cls):
try:
if hasattr(clus, method):
getattr(clus, method)(*args)
except MixinFail:
args[0].close()
return
@classmethod
def _on_connect(cls, socket, path):
cls.call_methods("on_connect", socket, path)
@classmethod
def _on_message(cls, socket, message):
cls.call_methods("on_message", socket, message)
@classmethod
def _on_close(cls, socket):
cls.call_methods("on_close", socket) | mit | -5,131,066,702,609,105,000 | 27.849057 | 63 | 0.528141 | false | 4.304225 | false | false | false |
linuxscout/arramooz | scripts/verbs/dict2spell/verbs/spellverbconst.py | 2 | 5873 | #!/usr/bin/python2
# -*- coding=utf-8 -*-
#************************************************************************
# $Id: spellverbconst.py,v 0.7 2010/12/26 01:10:00 Taha Zerrouki $
#
# ------------
# Description:
# ------------
# Copyright (c) 2009, Arabtechies, Arabeyes Taha Zerrouki
#
#
# -----------------
# Revision Details: (Updated by Revision Control System)
# -----------------
# $Date: 2009/06/02 01:10:00 $
# $Author: Taha Zerrouki $
# $Revision: 0.7 $
# $Source: arabtechies.sourceforge.net
#
#***********************************************************************/
from libqutrub.verb_const import *
# table of suffixes of double transitive verbs
#جدةل لواحق الفعل القلبي المتعدي لمغعول به عاقل،
TabSuffixesPronominale={
PronounAna :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'1'},
PronounNahnu :{'full': u" HcHdHeHfHgHhHi".replace(' ','') ,'alias':'2'},
PronounAnta :{'full': u" HbHcHd HfHg Hi".replace(' ','') ,'alias':'3'},
PronounAnti :{'full': u" HbHc HeHfHgHhHi".replace(' ','') ,'alias':'4'},
PronounAntuma :{'full': u" HbHc HeHfHgHhHi".replace(' ','') ,'alias':'5'},
PronounAntuma_f:{'full': u" HbHc HfHgHhHi".replace(' ','') ,'alias':'6'},
PronounAntum :{'full': u" HjHk Ho Hq".replace(' ','') ,'alias':'7'},
PronounAntunna :{'full': u" HbHc HgHhHi".replace(' ','') ,'alias':'8'},
PronounHuwa :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'9'},
PronounHya :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'10'},
PronounHuma :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'11'},
PronounHuma_f :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'12'},
PronounHum :{'full': u" HjHkHlHmHnHoHpHq".replace(' ','') ,'alias':'13'},
PronounHunna :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'14'},
}
#جدةل لواحق الفعل غير قلبي المتعدي
TabSuffixes={
PronounAna :{'full': u" HdHeHfHgHhHi".replace(' ','') ,'alias':'15'},
PronounNahnu :{'full': u" HdHeHfHgHhHi".replace(' ','') ,'alias':'16'},
PronounAnta :{'full': u" HbHc Hi".replace(' ','') ,'alias':'17'},
PronounAnti :{'full': u" HbHc Hi".replace(' ','') ,'alias':'18'},
PronounAntuma :{'full': u" HbHc Hi".replace(' ','') ,'alias':'19'},
PronounAntuma_f:{'full': u" HbHc Hi".replace(' ','') ,'alias':'20'},
PronounAntum :{'full': u" HjHk Hq".replace(' ','') ,'alias':'21'},
PronounAntunna :{'full': u" HbHc Hi".replace(' ','') ,'alias':'22'},
PronounHuwa :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'23'},
PronounHya :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'24'},
PronounHuma :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'25'},
PronounHuma_f :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'26'},
PronounHum :{'full': u" HjHkHlHmHnHoHpHq".replace(' ','') ,'alias':'27'},
PronounHunna :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'28'},
}
TabPrefixes={
# const for Tense Name
TensePast :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'29'},
TenseFuture :{'full': u"PaPbPc PePfPg Pj".replace(' ','') ,'alias':'30'},
TenseImperative :{'full': u" Pb Pe ".replace(' ','') ,'alias':'31'},
TenseConfirmedImperative :{'full': u" Pb Pe ".replace(' ','') ,'alias':'32'},
TenseJussiveFuture :{'full': u" Pb Pe Pi ".replace(' ','') ,'alias':'33'},
TenseSubjunctiveFuture :{'full': u" Pb PdPe Ph ".replace(' ','') ,'alias':'34'},
TenseConfirmedFuture :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'35'},
TensePassivePast :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'36'},
TensePassiveFuture :{'full': u"PaPbPc PePfPg Pj".replace(' ','') ,'alias':'37'},
TensePassiveJussiveFuture :{'full': u" Pb Pe Pi ".replace(' ','') ,'alias':'38'},
TensePassiveSubjunctiveFuture:{'full': u" Pb PdPe Ph ".replace(' ','') ,'alias':'39'},
TensePassiveConfirmedFuture :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'40'},
}
# table of suffixes of double transitive verbs
#جدةل لةاحق الفعل المتعدي لمغعولين
TabDisplayTagDouble={
PronounAna :{'full': u"HbHc",'alias':'41'},
PronounNahnu :{'full': u"HbHc",'alias':'42'},
PronounAnta :{'full': u"HbHd",'alias':'43'},
PronounAnti :{'full': u"HbHd",'alias':'44'},
PronounAntuma :{'full': u"HbHd",'alias':'45'},
PronounAntuma_f:{'full': u"HbHd",'alias':'46'},
PronounAntum :{'full': u"HbHd",'alias':'47'},
PronounAntunna :{'full': u"HbHd",'alias':'48'},
PronounHuwa :{'full': u"HbHcHd",'alias':'49'},
PronounHya :{'full': u"HbHcHd",'alias':'50'},
PronounHuma :{'full': u"HbHcHd",'alias':'51'},
PronounHuma_f :{'full': u"HbHcHd",'alias':'52'},
PronounHum :{'full': u"HbHcHd",'alias':'53'},
PronounHunna :{'full': u"HbHcHd",'alias':'54'},
}
CodePronoun={
PronounAna :'1',
PronounNahnu :'2',
PronounAnta :'3',
PronounAnti :'4',
PronounAntuma :'5',
PronounAntuma_f:'6',
PronounAntum :'7',
PronounAntunna :'8',
PronounHuwa :'9',
PronounHya :'10',
PronounHuma :'11',
PronounHuma_f :'12',
PronounHum :'13',
PronounHunna :'14',
}
CodeTense={
# const for Tense Name
TensePast :'1',
TenseFuture :'2',
TenseImperative :'3',
TenseConfirmedImperative :'4',
TenseJussiveFuture :'5',
TenseSubjunctiveFuture :'6',
TenseConfirmedFuture :'7',
TensePassivePast :'8',
TensePassiveFuture :'9',
TensePassiveJussiveFuture :'10',
TensePassiveSubjunctiveFuture:'11',
TensePassiveConfirmedFuture :'12',
}
| gpl-2.0 | 3,238,434,920,257,254,400 | 43.091603 | 94 | 0.548823 | false | 2.331853 | false | false | false |
waveform80/picraft | picraft/events.py | 1 | 35374 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# An alternate Python Minecraft library for the Rasperry-Pi
# Copyright (c) 2013-2016 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The events module defines the :class:`Events` class, which provides methods for
querying events in the Minecraft world, and the :class:`BlockHitEvent`,
:class:`PlayerPosEvent`, :class:`ChatPostEvent`, and :class:`IdleEvent` classes
which represent the various event types.
.. note::
All items in this module are available from the :mod:`picraft` namespace
without having to import :mod:`picraft.events` directly.
The following items are defined in the module:
Events
======
.. autoclass:: Events
:members:
BlockHitEvent
=============
.. autoclass:: BlockHitEvent(pos, face, player)
:members:
PlayerPosEvent
==============
.. autoclass:: PlayerPosEvent(old_pos, new_pos, player)
:members:
ChatPostEvent
=============
.. autoclass:: ChatPostEvent(message, player)
:members:
IdleEvent
=========
.. autoclass:: IdleEvent()
:members:
"""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import logging
import threading
import time
import warnings
from collections import namedtuple, Container
from weakref import WeakSet
from functools import update_wrapper
from types import FunctionType
from .exc import ConnectionClosed, NoHandlersWarning
from .vector import Vector
from .player import Player
logger = logging.getLogger('picraft')
class BlockHitEvent(namedtuple('BlockHitEvent', ('pos', 'face', 'player'))):
"""
Event representing a block being hit by a player.
This tuple derivative represents the event resulting from a player striking
a block with their sword in the Minecraft world. Users will not normally
need to construct instances of this class, rather they are constructed and
returned by calls to :meth:`~Events.poll`.
.. note::
Please note that the block hit event only registers when the player
*right clicks* with the sword. For some reason, left clicks do not
count.
.. attribute:: pos
A :class:`~picraft.vector.Vector` indicating the position of the block
which was struck.
.. attribute:: face
A string indicating which side of the block was struck. This can be one
of six values: 'x+', 'x-', 'y+', 'y-', 'z+', or 'z-'. The value
indicates the axis, and direction along that axis, that the side faces:
.. image:: images/block_faces.*
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
hit the block.
"""
@classmethod
def from_string(cls, connection, s):
v, f, p = s.rsplit(',', 2)
return cls(Vector.from_string(v), {
0: 'y-',
1: 'y+',
2: 'z-',
3: 'z+',
4: 'x-',
5: 'x+',
}[int(f)], Player(connection, int(p)))
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(BlockHitEvent, self).__dict__
def __repr__(self):
return '<BlockHitEvent pos=%s face=%r player=%d>' % (
self.pos, self.face, self.player.player_id)
class PlayerPosEvent(namedtuple('PlayerPosEvent', ('old_pos', 'new_pos', 'player'))):
"""
Event representing a player moving.
This tuple derivative represents the event resulting from a player moving
within the Minecraft world. Users will not normally need to construct
instances of this class, rather they are constructed and returned by calls
to :meth:`~Events.poll`.
.. attribute:: old_pos
A :class:`~picraft.vector.Vector` indicating the location of the player
prior to this event. The location includes decimal places (it is not
the tile-position, but the actual position).
.. attribute:: new_pos
A :class:`~picraft.vector.Vector` indicating the location of the player
as of this event. The location includes decimal places (it is not
the tile-position, but the actual position).
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
moved.
"""
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(PlayerPosEvent, self).__dict__
def __repr__(self):
return '<PlayerPosEvent old_pos=%s new_pos=%s player=%d>' % (
self.old_pos, self.new_pos, self.player.player_id)
class ChatPostEvent(namedtuple('ChatPostEvent', ('message', 'player'))):
"""
Event representing a chat post.
This tuple derivative represents the event resulting from a chat message
being posted in the Minecraft world. Users will not normally need to
construct instances of this class, rather they are constructed and returned
by calls to :meth:`~Events.poll`.
.. note::
Chat events are only generated by the Raspberry Juice server, not by
Minecraft Pi edition.
.. attribute:: message
The message that was posted to the world.
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
moved.
"""
@classmethod
def from_string(cls, connection, s):
p, m = s.split(',', 1)
return cls(m, Player(connection, int(p)))
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(ChatPostEvent, self).__dict__
def __repr__(self):
return '<ChatPostEvent message=%s player=%d>' % (
self.message, self.player.player_id)
class IdleEvent(namedtuple('IdleEvent', ())):
"""
Event that fires in the event that no other events have occurred since the
last poll. This is only used if :attr:`Events.include_idle` is ``True``.
"""
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(IdleEvent, self).__dict__
def __repr__(self):
return '<IdleEvent>'
class Events(object):
"""
This class implements the :attr:`~picraft.world.World.events` attribute.
There are two ways of responding to picraft's events: the first is to
:meth:`poll` for them manually, and process each event in the resulting
list::
>>> for event in world.events.poll():
... print(repr(event))
...
<BlockHitEvent pos=1,1,1 face="y+" player=1>,
<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>
The second is to "tag" functions as event handlers with the decorators
provided and then call the :meth:`main_loop` function which will handle
polling the server for you, and call all the relevant functions as needed::
@world.events.on_block_hit(pos=Vector(1,1,1))
def hit_block(event):
print('You hit the block at %s' % event.pos)
world.events.main_loop()
By default, only block hit events will be tracked. This is because it is
the only type of event that the Minecraft server provides information about
itself, and thus the only type of event that can be processed relatively
efficiently. If you wish to track player positions, assign a set of player
ids to the :attr:`track_players` attribute. If you wish to include idle
events (which fire when nothing else is produced in response to
:meth:`poll`) then set :attr:`include_idle` to ``True``.
.. note::
If you are using a Raspberry Juice server, chat post events are also
tracked by default. Chat post events are only supported with Raspberry
Juice servers; Minecraft Pi edition doesn't support chat post events.
Finally, the :attr:`poll_gap` attribute specifies how long to pause during
each iteration of :meth:`main_loop` to permit event handlers some time to
interact with the server. Setting this to 0 will provide the fastest
response to events, but will result in event handlers having to fight with
event polling for access to the server.
"""
def __init__(self, connection, poll_gap=0.1, include_idle=False):
self._connection = connection
self._handlers = []
self._handler_instances = WeakSet()
self._poll_gap = poll_gap
self._include_idle = include_idle
self._track_players = {}
def _get_poll_gap(self):
return self._poll_gap
def _set_poll_gap(self, value):
self._poll_gap = float(value)
poll_gap = property(_get_poll_gap, _set_poll_gap, doc="""\
The length of time (in seconds) to pause during :meth:`main_loop`.
This property specifies the length of time to wait at the end of each
iteration of :meth:`main_loop`. By default this is 0.1 seconds.
The purpose of the pause is to give event handlers executing in the
background time to communicate with the Minecraft server. Setting this
to 0.0 will result in faster response to events, but also starves
threaded event handlers of time to communicate with the server,
resulting in "choppy" performance.
""")
def _get_track_players(self):
return self._track_players.keys()
def _set_track_players(self, value):
try:
self._track_players = {
pid: Player(self._connection, pid).pos.round(1)
for pid in value
}
except TypeError:
if not isinstance(value, int):
raise ValueError(
'track_players value must be a player id '
'or a sequence of player ids')
self._track_players = {
value: Player(self._connection, value).pos.round(1)
}
if self._connection.server_version != 'raspberry-juice':
# Filter out calculated directions for untracked players
self._connection._directions = {
pid: delta
for (pid, delta) in self._connection._directions.items()
if pid in self._track_players
}
track_players = property(_get_track_players, _set_track_players, doc="""\
The set of player ids for which movement should be tracked.
By default the :meth:`poll` method will not produce player position
events (:class:`PlayerPosEvent`). Producing these events requires extra
interactions with the Minecraft server (one for each player tracked)
which slow down response to block hit events.
If you wish to track player positions, set this attribute to the set of
player ids you wish to track and their positions will be stored. The
next time :meth:`poll` is called it will query the positions for all
specified players and fire player position events if they have changed.
Given that the :attr:`~picraft.world.World.players` attribute
represents a dictionary mapping player ids to players, if you wish to
track all players you can simply do::
>>> world.events.track_players = world.players
""")
def _get_include_idle(self):
return self._include_idle
def _set_include_idle(self, value):
self._include_idle = bool(value)
include_idle = property(_get_include_idle, _set_include_idle, doc="""\
If ``True``, generate an idle event when no other events would be
generated by :meth:`poll`. This attribute defaults to ``False``.
""")
def clear(self):
"""
Forget all pending events that have not yet been retrieved with
:meth:`poll`.
This method is used to clear the list of events that have occurred
since the last call to :meth:`poll` without retrieving them. This is
useful for ensuring that events subsequently retrieved definitely
occurred *after* the call to :meth:`clear`.
"""
self._set_track_players(self._get_track_players())
self._connection.send('events.clear()')
def poll(self):
"""
Return a list of all events that have occurred since the last call to
:meth:`poll`.
For example::
>>> w = World()
>>> w.events.track_players = w.players
>>> w.events.include_idle = True
>>> w.events.poll()
[<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>,
<BlockHitEvent pos=1,1,1 face="x+" player=1>,
<BlockHitEvent pos=1,1,1 face="x+" player=1>]
>>> w.events.poll()
[<IdleEvent>]
"""
def player_pos_events(positions):
for pid, old_pos in positions.items():
player = Player(self._connection, pid)
new_pos = player.pos.round(1)
if old_pos != new_pos:
if self._connection.server_version != 'raspberry-juice':
# Calculate directions for tracked players on platforms
# which don't provide it natively
self._connection._directions[pid] = new_pos - old_pos
yield PlayerPosEvent(old_pos, new_pos, player)
positions[pid] = new_pos
def block_hit_events():
s = self._connection.transact('events.block.hits()')
if s:
for e in s.split('|'):
yield BlockHitEvent.from_string(self._connection, e)
def chat_post_events():
if self._connection.server_version == 'raspberry-juice':
s = self._connection.transact('events.chat.posts()')
if s:
for e in s.split('|'):
yield ChatPostEvent.from_string(self._connection, e)
events = list(player_pos_events(self._track_players)) + list(block_hit_events()) + list(chat_post_events())
if events:
return events
elif self._include_idle:
return [IdleEvent()]
else:
return []
def main_loop(self):
"""
Starts the event polling loop when using the decorator style of event
handling (see :meth:`on_block_hit`).
This method will not return, so be sure that you have specified all
your event handlers before calling it. The event loop can only be
broken by an unhandled exception, or by closing the world's connection
(in the latter case the resulting :exc:`~picraft.exc.ConnectionClosed`
exception will be suppressed as it is assumed that you want to end the
script cleanly).
"""
logger.info('Entering event loop')
try:
while True:
self.process()
time.sleep(self.poll_gap)
except ConnectionClosed:
logger.info('Connection closed; exiting event loop')
def process(self):
"""
Poll the server for events and call any relevant event handlers
registered with :meth:`on_block_hit`.
This method is called repeatedly the event handler loop implemented by
:meth:`main_loop`; developers should only call this method when
implementing their own event loop manually, or when their (presumably
non-threaded) event handler is engaged in a long operation and they
wish to permit events to be processed in the meantime.
"""
for event in self.poll():
for handler in self._handlers:
if handler.matches(event):
handler.execute(event)
def has_handlers(self, cls):
"""
Decorator for registering a class as containing picraft event handlers.
If you are writing a class which contains methods that you wish to
use as event handlers for picraft events, you must decorate the class
with ``@has_handlers``. This will ensure that picraft tracks instances
of the class and dispatches events to each instance that exists when
the event occurs.
For example::
from picraft import World, Block, Vector, X, Y, Z
world = World()
@world.events.has_handlers
class HitMe(object):
def __init__(self, pos):
self.pos = pos
self.been_hit = False
world.blocks[self.pos] = Block('diamond_block')
@world.events.on_block_hit()
def was_i_hit(self, event):
if event.pos == self.pos:
self.been_hit = True
print('Block at %s was hit' % str(self.pos))
p = world.player.tile_pos
block1 = HitMe(p + 2*X)
block2 = HitMe(p + 2*Z)
world.events.main_loop()
Class-based handlers are an advanced feature and have some notable
limitations. For instance, in the example above the ``on_block_hit``
handler couldn't be declared with the block's position because this was
only known at instance creation time, not at class creation time (which
was when the handler was registered).
Furthermore, class-based handlers must be regular instance methods
(those which accept the instance, self, as the first argument); they
cannot be class methods or static methods.
.. note::
The ``@has_handlers`` decorator takes no arguments and shouldn't
be called, unlike event handler decorators.
"""
# Search the class for handler methods, appending the class to the
# handler's list of associated classes (if you're thinking why is this
# a collection, consider that a method can be associated with multiple
# classes either by inheritance or direct assignment)
handlers_found = 0
for item in dir(cls):
item = getattr(cls, item, None)
if item: # PY2
item = getattr(item, 'im_func', item)
if item and isinstance(item, FunctionType):
try:
item._picraft_classes.add(cls)
handlers_found += 1
except AttributeError:
pass
if not handlers_found:
warnings.warn(NoHandlersWarning('no handlers found in %s' % cls))
return cls
# Replace __init__ on the class with a closure that adds every instance
# constructed to self._handler_instances. As this is a WeakSet,
# instances that die will be implicitly removed
old_init = getattr(cls, '__init__', None)
def __init__(this, *args, **kwargs):
if old_init:
old_init(this, *args, **kwargs)
self._handler_instances.add(this)
if old_init:
update_wrapper(__init__, old_init)
cls.__init__ = __init__
return cls
def _handler_closure(self, f):
def handler(event):
if not f._picraft_classes:
# The handler is a straight-forward function; just call it
f(event)
else:
# The handler is an unbound method (yes, I know these don't
# really exist in Python 3; it's a function which is expecting
# to be called from an object instance if you like). Here we
# search the set of instances of classes which were registered
# as having handlers (by @has_handlers)
for cls in f._picraft_classes:
for inst in self._handler_instances:
# Check whether the instance has the right class; note
# that we *don't* use isinstance() here as we want an
# exact match
if inst.__class__ == cls:
# Bind the function to the instance via its
# descriptor
f.__get__(inst, cls)(event)
update_wrapper(handler, f)
return handler
def on_idle(self, thread=False, multi=True):
"""
Decorator for registering a function/method as an idle handler.
This decorator is used to mark a function as an event handler which
will be called when no other event handlers have been called in an
iteration of :meth:`main_loop`. The function will be called with the
corresponding :class:`IdleEvent` as the only argument.
Note that idle events will only be generated if :attr:`include_idle`
is set to ``True``.
"""
def decorator(f):
self._handlers.append(
IdleHandler(self._handler_closure(f), thread, multi))
f._picraft_classes = set()
return f
return decorator
def on_player_pos(self, thread=False, multi=True, old_pos=None, new_pos=None):
"""
Decorator for registering a function/method as a position change
handler.
This decorator is used to mark a function as an event handler which
will be called for any events indicating that a player's position has
changed while :meth:`main_loop` is executing. The function will be
called with the corresponding :class:`PlayerPosEvent` as the only
argument.
The *old_pos* and *new_pos* parameters can be used to specify vectors
or sequences of vectors (including a
:class:`~picraft.vector.vector_range`) that the player position events
must match in order to activate the associated handler. For example, to
fire a handler every time any player enters or walks over blocks within
(-10, 0, -10) to (10, 0, 10)::
from picraft import World, Vector, vector_range
world = World()
world.events.track_players = world.players
from_pos = Vector(-10, 0, -10)
to_pos = Vector(10, 0, 10)
@world.events.on_player_pos(new_pos=vector_range(from_pos, to_pos + 1))
def in_box(event):
world.say('Player %d stepped in the box' % event.player.player_id)
world.events.main_loop()
Various effects can be achieved by combining *old_pos* and *new_pos*
filters. For example, one could detect when a player crosses a boundary
in a particular direction, or decide when a player enters or leaves a
particular area.
Note that only players specified in :attr:`track_players` will generate
player position events.
"""
def decorator(f):
self._handlers.append(
PlayerPosHandler(self._handler_closure(f),
thread, multi, old_pos, new_pos))
f._picraft_classes = set()
return f
return decorator
def on_block_hit(self, thread=False, multi=True, pos=None, face=None):
"""
Decorator for registering a function/method as a block hit handler.
This decorator is used to mark a function as an event handler which
will be called for any events indicating a block has been hit while
:meth:`main_loop` is executing. The function will be called with the
corresponding :class:`BlockHitEvent` as the only argument.
The *pos* parameter can be used to specify a vector or sequence of
vectors (including a :class:`~picraft.vector.vector_range`); in this
case the event handler will only be called for block hits on matching
vectors.
The *face* parameter can be used to specify a face or sequence of
faces for which the handler will be called.
For example, to specify that one handler should be called for hits
on the top of any blocks, and another should be called only for hits
on any face of block at the origin one could use the following code::
from picraft import World, Vector
world = World()
@world.events.on_block_hit(pos=Vector(0, 0, 0))
def origin_hit(event):
world.say('You hit the block at the origin')
@world.events.on_block_hit(face="y+")
def top_hit(event):
world.say('You hit the top of a block at %d,%d,%d' % event.pos)
world.events.main_loop()
The *thread* parameter (which defaults to ``False``) can be used to
specify that the handler should be executed in its own background
thread, in parallel with other handlers.
Finally, the *multi* parameter (which only applies when *thread* is
``True``) specifies whether multi-threaded handlers should be allowed
to execute in parallel. When ``True`` (the default), threaded handlers
execute as many times as activated in parallel. When ``False``, a
single instance of a threaded handler is allowed to execute at any
given time; simultaneous activations are ignored (but not queued, as
with unthreaded handlers).
"""
def decorator(f):
self._handlers.append(
BlockHitHandler(self._handler_closure(f),
thread, multi, pos, face))
f._picraft_classes = set()
return f
return decorator
def on_chat_post(self, thread=False, multi=True, message=None):
"""
Decorator for registering a function/method as a chat event handler.
This decorator is used to mark a function as an event handler which
will be called for events indicating a chat message was posted to
the world while :meth:`main_loop` is executing. The function will be
called with the corresponding :class:`ChatPostEvent` as the only
argument.
.. note::
Only the Raspberry Juice server generates chat events; Minecraft
Pi Edition does not support this event type.
The *message* parameter can be used to specify a string or regular
expression; in this case the event handler will only be called for chat
messages which match this value. For example::
import re
from picraft import World, Vector
world = World()
@world.events.on_chat_post(message="hello world")
def echo(event):
world.say("Hello player %d!" % event.player.player_id)
@world.events.on_chat_post(message=re.compile(r"teleport_me \d+,\d+,\d+"))
def teleport(event):
x, y, z = event.message[len("teleport_me "):].split(",")
event.player.pos = Vector(int(x), int(y), int(z))
world.events.main_loop()
The *thread* parameter (which defaults to ``False``) can be used to
specify that the handler should be executed in its own background
thread, in parallel with other handlers.
Finally, the *multi* parameter (which only applies when *thread* is
``True``) specifies whether multi-threaded handlers should be allowed
to execute in parallel. When ``True`` (the default), threaded handlers
execute as many times as activated in parallel. When ``False``, a
single instance of a threaded handler is allowed to execute at any
given time; simultaneous activations are ignored (but not queued, as
with unthreaded handlers).
"""
def decorator(f):
self._handlers.append(
ChatPostHandler(self._handler_closure(f),
thread, multi, message))
f._picraft_classes = set()
return f
return decorator
class EventHandler(object):
"""
This is an internal object used to associate event handlers with their
activation restrictions.
The *action* parameter specifies the function to be run when a matching
event is received from the server.
The *thread* parameter specifies whether the *action* will be launched in
its own background thread. If *multi* is ``False``, then the
:meth:`execute` method will ensure that any prior execution has finished
before launching another one.
"""
def __init__(self, action, thread, multi):
self.action = action
self.thread = thread
self.multi = multi
self._thread = None
def execute(self, event):
"""
Launches the *action* in a background thread if necessary. If required,
this method also ensures threaded actions don't overlap.
"""
if self.thread:
if self.multi:
threading.Thread(target=self._execute_handler, args=(event,)).start()
elif not self._thread:
self._thread = threading.Thread(target=self._execute_single, args=(event,))
self._thread.start()
else:
self._execute_handler(event)
def _execute_single(self, event):
try:
self._execute_handler(event)
finally:
self._thread = None
def _execute_handler(self, event):
self.action(event)
def matches(self, event):
"""
Tests whether or not *event* match all the filters for the handler that
this object represents.
"""
raise NotImplementedError
class PlayerPosHandler(EventHandler):
"""
This class associates a handler with a player-position event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *old_pos* and *new_pos* to
specify the vectors (or sequences of vectors) that an event must transition
across in order to activate this action. These filters must both match in
order for the action to fire.
"""
def __init__(self, action, thread, multi, old_pos, new_pos):
super(PlayerPosHandler, self).__init__(action, thread, multi)
self.old_pos = old_pos
self.new_pos = new_pos
def matches(self, event):
return (
isinstance(event, PlayerPosEvent) and
self.matches_pos(self.old_pos, event.old_pos) and
self.matches_pos(self.new_pos, event.new_pos))
def matches_pos(self, test, pos):
if test is None:
return True
if isinstance(test, Vector):
return test == pos.floor()
if isinstance(test, Container):
return pos.floor() in test
raise TypeError(
"%r is not a valid position test; expected Vector or "
"sequence of Vector" % test)
class BlockHitHandler(EventHandler):
"""
This class associates a handler with a block-hit event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *pos* to specify the vector
(or sequence of vectors) which an event must match in order to activate
this action, and *face* to specify the block face (or set of faces) which
an event must match. These filters must both match in order for the action
to fire.
"""
def __init__(self, action, thread, multi, pos, face):
super(BlockHitHandler, self).__init__(action, thread, multi)
self.pos = pos
if isinstance(face, bytes):
face = face.decode('ascii')
self.face = face
def matches(self, event):
return (
isinstance(event, BlockHitEvent) and
self.matches_pos(event.pos) and
self.matches_face(event.face))
def matches_pos(self, pos):
if self.pos is None:
return True
if isinstance(self.pos, Vector):
return self.pos == pos
if isinstance(self.pos, Container):
return pos in self.pos
raise TypeError(
"%r is not a valid position test; expected Vector or "
"sequence of Vector" % pos)
def matches_face(self, face):
if self.face is None:
return True
if isinstance(self.face, str):
return self.face == face
if isinstance(self.face, Container):
return face in self.face
raise TypeError(
"%r is not a valid face test; expected string or sequence "
"of strings" % face)
class ChatPostHandler(EventHandler):
"""
This class associates a handler with a chat-post event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *message* to specify the
message that an event must contain in order to activate this action.
"""
def __init__(self, action, thread, multi, message):
super(ChatPostHandler, self).__init__(action, thread, multi)
if isinstance(message, bytes):
message = message.decode('ascii')
self.message = message
def matches(self, event):
return (
isinstance(event, ChatPostEvent) and
self.matches_message(event.message))
def matches_message(self, message):
if self.message is None:
return True
if isinstance(self.message, str):
return self.message == message
try:
return self.message.match(message)
except AttributeError:
raise TypeError(
"%r is not a valid message test; expected string"
"or regular expression" % message)
class IdleHandler(EventHandler):
"""
This class associates a handler with an idle event.
"""
def matches(self, event):
return isinstance(event, IdleEvent)
| bsd-3-clause | 7,676,719,058,441,888,000 | 37.242162 | 115 | 0.618675 | false | 4.46585 | false | false | false |
AllMyChanges/allmychanges.com | allmychanges/tests/preview.py | 1 | 5407 | # coding: utf-8
import anyjson
from urllib import urlencode
from nose.tools import eq_
from allmychanges.models import Changelog, Preview
from django.test import Client
from django.core.urlresolvers import reverse
from allmychanges.tasks import _task_log
from .utils import (refresh, check_status_code,
create_user, put_json, json)
from hamcrest import (
assert_that,
has_entries)
def test_preview():
_task_log[:] = []
cl = Client()
eq_(0, Preview.objects.count())
eq_(0, Changelog.objects.count())
# when user opens add-new page, a new changelog and preview
# are created
source = 'test+samples/very-simple.md'
cl.get(reverse('add-new') + '?' + urlencode(dict(url=source)))
eq_(1, Changelog.objects.count())
eq_(1, Preview.objects.count())
preview = Preview.objects.all()[0]
eq_(None, preview.user)
assert preview.light_user != None
eq_([('update_preview_task', (1,), {})], _task_log)
preview_url = reverse('preview', kwargs=dict(pk=preview.pk))
response = cl.get(preview_url)
eq_(200, response.status_code)
assert 'Some <span class="changelog-highlight-fix">bugfix</span>.' in response.content
assert 'Initial release.' in response.content
# при этом, у объекта preview должны быть версии, а у changelog нет
changelog = Changelog.objects.all()[0]
eq_(0, changelog.versions.count())
eq_(2, preview.versions.count())
# проверим, что у preview есть поле log, и оно список
preview = refresh(preview)
eq_(6, len(preview.log))
# теперь обновим preview на несуществующий источник
response = cl.post(preview_url,
data=anyjson.serialize(dict(source='test+another source',
ignore_list='NEWS',
search_list='docs')),
content_type='application/json')
eq_(200, response.status_code)
preview = refresh(preview)
eq_('test+another source', preview.source)
eq_('NEWS', preview.ignore_list)
eq_('docs', preview.search_list)
# and another preview task was scheduled
eq_([('update_preview_task', (1,), {}),
('update_preview_task', (1,), {})], _task_log)
# версии должны были удалиться
eq_(0, changelog.versions.count())
eq_(0, preview.versions.count())
# а само preview перейти в состояние error
eq_('error', preview.status)
def test_update_package_preview_versions():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test+samples/markdown-release-notes')
preview = changelog.previews.create(light_user='anonymous',
source=changelog.source)
preview.schedule_update()
eq_(0, changelog.versions.filter(preview=None).count())
def first_line(version):
return version.processed_text.split('\n', 1)[0]
def first_lines(versions):
return map(first_line, versions)
versions = preview.versions.all()
eq_([
'<ul>',
u'<h1>0.1.1</h1>', u'<h1>0.1.0</h1>'],
first_lines(versions))
# now we'll check if ignore list works
preview.set_ignore_list(['docs/unrelated-crap.md'])
preview.save()
preview.schedule_update()
versions = preview.versions.all()
eq_([u'<h1>0.1.1</h1>', u'<h1>0.1.0</h1>'],
first_lines(versions))
def test_when_preview_saved_versions_are_copied_to_changelog():
# this only should happen when changelog is empty
user = create_user('art')
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test+samples/markdown-release-notes')
changelog.add_to_moderators(user)
cl = Client()
cl.login(username='art', password='art')
response = cl.get(reverse('changelog-list'))
preview = changelog.previews.create(light_user=response.cookies.get('light_user_id').value,
source=changelog.source)
preview.schedule_update()
eq_(0, changelog.versions.count())
eq_(3, preview.versions.count())
response = put_json(cl,
reverse('changelog-detail', kwargs=dict(pk=changelog.pk)),
expected_code=200,
namespace=changelog.namespace,
name=changelog.name,
source='http://github.com/svetlyak40wt/django-fields',
downloader='git.vcs')
# versions now moved to the changelog
eq_(3, changelog.versions.count())
eq_(0, preview.versions.count())
def test_get_preview_details_via_api():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test+samples/markdown-release-notes')
preview = changelog.previews.create(light_user='anonymous',
source=changelog.source)
cl = Client()
response = cl.get(reverse('preview-detail', kwargs=dict(pk=preview.pk)))
check_status_code(200, response)
data = json(response)
assert_that(data,
has_entries(
status='created',
processing_status='',
log=[]))
| bsd-2-clause | 1,169,439,980,779,571,700 | 32.484076 | 95 | 0.607761 | false | 3.556834 | true | false | false |
pgaref/memcached_bench | Python_plots/plots/logscale/microexp/logscale_latency.py | 1 | 4626 | __author__ = "Panagiotis Garefalakis"
__copyright__ = "Imperial College London"
# The MIT License (MIT)
#
# Copyright (c) 2016 Panagiotis Garefalakis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import pandas as pd
import plots.utils as utils
import brewer2mpl
# brewer2mpl.get_map args: set name set type number of colors
# bmap = brewer2mpl.get_map('RdBu', 'Diverging', 5)
bmap = brewer2mpl.get_map('Set1', 'Qualitative', 5)
colors = bmap.mpl_colors
files = ["CPLEX-off_stats.csv", "CPLEX-on_stats.csv", "GR-NODE_CAND_stats.csv", "GR-SERIAL_stats.csv", "GR-RANDOM_stats.csv"]
labels = ["ILP-offline", "ILP-online", "Node Candidates", "Random"]
labels_map={"CPLEX-on": "ILP-online", "CPLEX-off": "ILP-offline",
"GR-NODE_CAND": "Node Candidates", "GR-RANDOM": "Greedy", "GR-SERIAL": "Aurora-Prelim"}
# colors = ['r', 'g', 'b', 'black', 'c', 'm']
markers = ['o', '^', 'v', 'h', 'x']
linestyle_list = ['--', '-', ':', '-', '-.']
# Global style configuration
utils.set_rcs()
def latency_logscale(data):
fig = utils.plt.figure()
ax = fig.add_subplot(111)
space = 0.25
conditions = np.unique(data[:, 0])
categories = np.unique(data[:, 1])
# n = len(conditions)
n = len(labels_map)
width = (1 - space) / n
print "width:", width
i = 0
for cond in conditions:
print "cond:", cond
y_vals = data[data[:, 0] == cond][:, 2].astype(np.float)
x_vals = data[data[:, 0] == cond][:, 1].astype(np.int)
pos = [j - (1 - space) / 2. + i * width for j in range(1, len(categories) + 1)]
if labels_map.has_key(str(cond).strip()):
ax.plot(x_vals, y_vals, label=labels_map[str(cond).strip()], color=colors[i], linestyle=linestyle_list[i],
marker=markers[i], linewidth=1.5,)
# , edgecolor=get_colors()[i+1],hatch=hatch_patterns[i])
i +=1
indexes = range(1, len(categories) + 1)
print "Indexes: ", indexes
print "Categories: ", categories
# Add the axis labels
ax.set_ylabel("Latency (ms)")
ax.set_xlabel("Number of Nodes")
# Make Y axis logscale
utils.plt.yscale('log', nonposy='clip')
# Add a legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
utils.plt.tight_layout()
# Create some space for the last marker
utils.plt.xlim((0, x_vals[len(x_vals)-1]+10))
return fig, ax
def file_parser(fnames):
file_data = (pd.read_csv(f) for f in fnames)
all_data = pd.concat(file_data, ignore_index=True)
# grouped_data = all_data.groupby([' Plan technique', ' totJobs'])[' ObjectiveValue '].mean()
print all_data.columns.values
# print grouped_data
numpyMatrix = all_data[[' Plan technique', ' clusterSize', ' runTime(ms)']].values
# print numpyMatrix
return numpyMatrix
if __name__ == '__main__':
print "Sytem Path {}".format(os.environ['PATH'])
if len(sys.argv) < 2:
print "Usage: bars_efficiency.py.py <input PATH>"
sys.exit(-1)
outname = "placement_latency_log"
fpaths = []
for file in files:
fpaths.append(sys.argv[1]+"/"+file)
# labels.append(sys.argv[2 + i])
print 'Files given: {}'.format(" | ".join(fname for fname in fpaths))
# print 'Labels given: {}'.format(" | ".join(label for label in labels))
# print brewer2mpl.print_maps()
data = file_parser(fpaths)
fig, axes = latency_logscale(data)
utils.set_rcs()
utils.prepare_legend(legend_loc="upper left", legend_font=15)
utils.writeout("%s"%outname) | mit | 5,525,530,286,866,149,000 | 34.320611 | 125 | 0.646995 | false | 3.273885 | false | false | false |
chemlab/chemlab | chemlab/utils/pbc.py | 1 | 4376 | import numpy as np
import dask.array as da
from collections import Sequence
def minimum_image(coords, pbc):
"""
Wraps a vector collection of atom positions into the central periodic
image or primary simulation cell.
Parameters
----------
pos : :class:`numpy.ndarray`, (Nx3)
Vector collection of atom positions.
Returns
-------
wrap : :class:`numpy.ndarray`, (Nx3)
Returns atomic positions wrapped into the primary simulation
cell, or periodic image.
"""
# This will do the broadcasting
coords = np.array(coords)
pbc = np.array(pbc)
# For each coordinate this number represents which box we are in
image_number = np.floor(coords / pbc)
wrap = coords - pbc * image_number
return wrap
def noperiodic(r_array, periodic, reference=None):
'''Rearrange the array of coordinates *r_array* in a way that doensn't
cross the periodic boundary.
Parameters
----------
r_array : :class:`numpy.ndarray`, (Nx3)
Array of 3D coordinates.
periodic: :class:`numpy.ndarray`, (3)
Periodic boundary dimensions.
reference: ``None`` or :class:`numpy.ndarray` (3)
The points will be moved to be in the periodic image centered on the reference.
If None, the first point will be taken as a reference
Returns
-------
A (N, 3) array of coordinates, all in the same periodic image.
Example
-------
>>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]])
>>> periodic = np.array([1, 1, 1])
>>> noperiodic(coordinates, periodic)
[[ 0.1, 0.0, 0.0],
[-0.1, 0.0, 0.0]]
'''
if reference is None:
center = r_array[0]
else:
center = reference
# Find the displacements
dr = (center - r_array)
drsign = np.sign(dr)
# Move things when the displacement is more than half the box size
tomove = np.abs(dr) >= periodic / 2.0
r_array[tomove] += (drsign * periodic)[tomove]
return r_array
def subtract_vectors(a, b, periodic):
'''Returns the difference of the points vec_a - vec_b subject
to the periodic boundary conditions.
'''
r = a - b
delta = np.abs(r)
sign = np.sign(r)
return np.where(delta > 0.5 * periodic, sign * (periodic - delta), r)
def add_vectors(vec_a, vec_b, periodic):
'''Returns the sum of the points vec_a - vec_b subject
to the periodic boundary conditions.
'''
moved = noperiodic(np.array([vec_a, vec_b]), periodic)
return vec_a + vec_b
def distance_matrix(a, b, periodic):
'''Calculate a distrance matrix between coordinates sets a and b
'''
a = a
b = b[:, np.newaxis]
return periodic_distance(a, b, periodic)
def periodic_distance(a, b, periodic):
'''
Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes.
'''
a = np.array(a)
b = np.array(b)
periodic = np.array(periodic)
delta = np.abs(a - b)
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.sqrt((delta ** 2).sum(axis=-1))
def geometric_center(coords, periodic):
'''Geometric center taking into account periodic boundaries'''
max_vals = periodic
theta = 2 * np.pi * (coords / max_vals)
eps = np.cos(theta) * max_vals / (2 * np.pi)
zeta = np.sin(theta) * max_vals / (2 * np.pi)
eps_avg = eps.sum(axis=0)
zeta_avg = zeta.sum(axis=0)
theta_avg = np.arctan2(-zeta_avg, -eps_avg) + np.pi
return theta_avg * max_vals / (2 * np.pi)
def radius_of_gyration(coords, periodic):
'''Calculate the square root of the mean distance squared from the center of gravity.
'''
gc = geometric_center(coords, periodic)
return (periodic_distance(coords, gc, periodic) ** 2).sum() / len(coords)
def fractional_coordinates(xyz, box_vectors):
T = np.linalg.inv(box_vectors)
return T.T.dot(xyz.T).T
def cell_coordinates(fractional, box_vectors):
return box_vectors.T.dot(fractional.T).T
def general_periodic_distance(a, b, box_vectors):
frac = fractional_coordinates(b - a, box_vectors)
delta = np.abs(frac)
periodic = 1.0
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.linalg.norm(cell_coordinates(delta, box_vectors))
| gpl-3.0 | -6,614,042,608,282,507,000 | 26.522013 | 89 | 0.620201 | false | 3.517685 | false | false | false |
mnpiozhang/DocumentSearch | backend/views.py | 1 | 12494 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.template.context_processors import csrf
from django.shortcuts import redirect,HttpResponse,render_to_response,render
from django.http.response import StreamingHttpResponse,HttpResponseRedirect,HttpResponseNotFound
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.template.context import RequestContext
from models import UserInfo,DocumentInfo
from forms import DocumentForm
from decorators import is_login_auth
import platform,os
from utils.common import Page,page_div,query_page_div,get_doc_page_info,filenameJudge
from DocumentSearch import settings
import datetime
from django.db.models import Q
from tasks import analyze_uploadfile_task
from indexhelper import del_es_doc
import os
# Create your views here.
#登陆
@csrf_exempt
def login(request):
# Create your views here.
ret = {'status':''}
if request.method == 'POST':
username = request.POST.get('username',None)
password = request.POST.get('password',None)
is_not_empty=all([username,password])
if is_not_empty:
count = UserInfo.objects.filter(username=username,password=password).count()
#判断输入用户名密码OK,则跳转到主页面
if count == 1:
request.session['username'] = username
request.session['login_auth'] = True
#logging.info("user login : {}".format(username))
return redirect('/backend/index/')
else:
ret['status']='password error'
else:
ret['status']='can not empty'
return render_to_response('login.html',ret)
#登出
@is_login_auth
def logout(request):
#logging.info("user logout : {}".format(request.session['username']))
del request.session['login_auth']
del request.session['username']
return redirect("/backend/login/")
#首页
@is_login_auth
def index(request,page=1):
ret = {'DocumentInfoObj':None,'UserInfoObj':None,'PageInfo':None,'AllCount':None}
try:
page = int(page)
except Exception:
page = 1
if request.method == 'GET':
#查询页面的分页显示
if request.GET.get('issearch',None):
searchindexstate = request.GET.get('searchindexstate',None)
tmpstarttime = request.GET.get('searchstarttime',None)
tmpendtime = request.GET.get('searchendtime',None)
Qset = {}
Qset['indexstate'] = searchindexstate
Qset['searchstarttime'] = tmpstarttime
Qset['searchendtime'] = tmpendtime
#判断是否输入了开始时间,没输入或输入非法则默认为1970.01.01
try:
searchstarttime = datetime.datetime.strptime(tmpstarttime,'%Y-%m-%d')
except:
searchstarttime = datetime.datetime(1970, 1, 1)
#判断是否输入了结束时间或输入非法,没输入或输入非法则默认为现在
try:
searchendtime = datetime.datetime.strptime(tmpendtime,'%Y-%m-%d')
except:
searchendtime = datetime.datetime.now()
allDoc = DocumentInfo.objects.filter(
Q(indexstate__startswith=searchindexstate)
&Q(timestamp__gte=searchstarttime)
&Q(timestamp__lte=searchendtime)
)
AllCount = allDoc.count()
ret['AllCount'] = AllCount
PageObj = Page(AllCount,page,6)
DocumentInfoObj = allDoc[PageObj.begin:PageObj.end]
pageurl = 'index'
querycondition = request.META.get("QUERY_STRING",None)
pageinfo = query_page_div(page, PageObj.all_page_count,pageurl,querycondition)
ret['PageInfo'] = pageinfo
ret['DocumentInfoObj'] = DocumentInfoObj
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
ret['Qset'] = Qset
print Qset
return render_to_response('index.html',ret,context_instance=RequestContext(request))
#正常主页的分页显示
else:
docPage = get_doc_page_info(DocumentInfo,page,'n')
ret['AllCount'] = docPage['AllCount']
ret['PageInfo'] = docPage['PageInfo']
ret['DocumentInfoObj'] = docPage['DocumentInfoObj']
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
return render_to_response('index.html',ret,context_instance=RequestContext(request))
else:
return HttpResponse("this is a web page , please use metod GET")
#提交新文档
@is_login_auth
def submit_doc(request):
ret = {'UserName':None,'form':None,'UserInfoObj':None}
ret['UserName'] = request.session.get('username',None)
#WorkOrderObj = WorkOrder.objects.create()
UserInfoObj = UserInfo.objects.get(username=ret['UserName'])
ret['UserInfoObj'] = UserInfoObj
if request.method == 'POST':
DocumentObj_form = DocumentForm(request.POST,request.FILES)
upload_filename = request.FILES['attachment'].name
#django.core.files.uploadedfile.InMemoryUploadedFile
fileSuffixObj = filenameJudge(upload_filename)
file_flag = fileSuffixObj.suffix_judge()
if DocumentObj_form.is_valid() and file_flag:
DocumentObj = DocumentObj_form.save(commit=False)
#索引状态放置为b即开始索引
DocumentObj.indexstate = 'b'
DocumentObj.save()
analyze_uploadfile_task.delay(DocumentObj.id,file_flag)
ret['status'] = 'save ok'
else:
ret['status'] = 'save error'
ret['form'] = DocumentObj_form
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render(request,'submitdoc.html',ret)
DocumentObj_form = DocumentForm()
ret['form'] = DocumentObj_form
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render_to_response('submitdoc.html',ret)
#文件下载功能
@is_login_auth
def big_file_download(request,attachmentid):
def _file_iterator(file_name, chunk_size=512):
with open(file_name,'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
DocumentFileObj = DocumentInfo.objects.get(id=attachmentid)
#获取系统类别
sys_type = platform.system()
if sys_type == 'Windows':
#windows下使用
the_file_name = str(settings.MEDIA_ROOT) + '\\' + str(DocumentFileObj.attachment).replace('/', '\\').decode('utf-8')
elif sys_type == 'Linux':
#linux下使用
the_file_name = settings.MEDIA_ROOT + "/" + str(DocumentFileObj.attachment).decode('utf-8')
else:
#非linux或windows下,如unbantu等皆使用linux的标准
the_file_name = settings.MEDIA_ROOT + "/" + str(DocumentFileObj.attachment).decode('utf-8')
response = StreamingHttpResponse(_file_iterator(the_file_name))
response['Content-Type'] = 'application/octet-stream'
if sys_type == 'Windows':
#windows下使用
response['Content-Disposition'] = 'attachment;filename=' + the_file_name.encode('gbk').split("\\")[-1]
elif sys_type == 'Linux':
#linux下使用
response['Content-Disposition'] = 'attachment;filename=' + the_file_name.encode('gbk').split("/")[-1]
else:
#非linux或windows下,如unbantu等皆使用linux的标准
response['Content-Disposition'] = 'attachment;filename=' + the_file_name.encode('gbk').split("/")[-1]
return response
#批量删除主机信息
@is_login_auth
def batch_del_doc(request):
if request.method == 'POST':
#根据传进来的主机id批量删除数据库对象
ret = {'DocumentInfoObj':None,'UserInfoObj':None,'PageInfo':None,'AllCount':None}
will_del_doc = request.POST.getlist("checkboxdel[]",None)
if will_del_doc:
for i in will_del_doc:
DocumentInfoObj = DocumentInfo.objects.get(id=i)
DocumentInfoObj.delete()
try:
del_es_doc(i)
except Exception,e:
print e
print "del this doc id in es error,may be this doc id does not exist "
ids = ",".join(will_del_doc)
ret['popover'] = { "id":ids,"info":"已经删除以下编号的文档" }
else:
ret['popover'] = { "id":"","info":"没有选中可删除的文档" }
page = 1
docPage = get_doc_page_info(DocumentInfo,page,'n')
ret['AllCount'] = docPage['AllCount']
ret['PageInfo'] = docPage['PageInfo']
ret['DocumentInfoObj'] = docPage['DocumentInfoObj']
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
return render_to_response('index.html',ret,context_instance=RequestContext(request))
else:
return HttpResponseNotFound('<h1>Page not found</h1>')
#删除文档信息
@is_login_auth
def del_doc(request,id):
try:
try:
DocumentInfoObj = DocumentInfo.objects.get(id=id)
except Exception,e:
print e
return HttpResponseRedirect('/backend/index')
DocumentInfoObj.delete()
try:
del_es_doc(id)
except Exception,e:
print e
print "del this doc id in es error,may be this doc id does not exist "
ret = {'DocumentInfoObj':None,'UserInfoObj':None,'PageInfo':None,'AllCount':None}
page = 1
docPage = get_doc_page_info(DocumentInfo,page,'n')
ret['AllCount'] = docPage['AllCount']
ret['PageInfo'] = docPage['PageInfo']
ret['DocumentInfoObj'] = docPage['DocumentInfoObj']
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
ret['popover'] = { "id":id,"info":"已经删除文档" }
return render_to_response('index.html',ret,context_instance=RequestContext(request))
except Exception,e:
return HttpResponseNotFound('<h1>Page not found</h1>')
#编辑文档信息
@is_login_auth
def edit(request,id):
ret = {'UserName':None,'form':None,'status':'','id':None,'UserInfoObj':None}
DocumentInfoObj = DocumentInfo.objects.get(id=id)
#print DocumentInfoObj.type
if request.method == 'POST':
DocumentInfoObj_form = DocumentForm(data=request.POST,files=request.FILES,instance=DocumentInfoObj)
#print request.POST
#print request.FILES['attachment'].name
#print DocumentInfoObj.attachment
#print str(DocumentInfoObj.attachment)
#print DocumentInfoObj_form.attachment
try:
fileSuffixObj = filenameJudge(request.FILES['attachment'].name)
except:
fileSuffixObj = filenameJudge(os.path.basename(str(DocumentInfoObj.attachment)))
file_flag = fileSuffixObj.suffix_judge()
if DocumentInfoObj_form.is_valid() and file_flag:
DocumentObj = DocumentInfoObj_form.save(commit=False)
#索引状态放置为b即开始索引
DocumentObj.indexstate = 'b'
DocumentObj.save()
analyze_uploadfile_task.delay(DocumentObj.id,file_flag)
ret['status'] = '修改成功'
else:
ret['status'] = '修改失败'
ret['form'] = DocumentInfoObj_form
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render(request,'edit.html',ret)
DocumentInfoObj_form = DocumentForm(instance=DocumentInfoObj)
ret['UserName'] = request.session.get('username',None)
UserInfoObj = UserInfo.objects.get(username=ret['UserName'])
ret['UserInfoObj'] = UserInfoObj
ret['form'] = DocumentInfoObj_form
ret['id'] = id
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render_to_response('edit.html',ret) | mit | -1,303,588,607,948,399,400 | 39.534014 | 124 | 0.612957 | false | 3.561267 | false | false | false |
dcf21/meteor-pi | src/pythonModules/meteorpi_server/meteorpi_server/importer_api.py | 2 | 15194 | # importer_api.py
# Meteor Pi, Cambridge Science Centre
# Dominic Ford, Tom Oinn
# -------------------------------------------------
# Copyright 2016 Cambridge Science Centre.
# This file is part of Meteor Pi.
# Meteor Pi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meteor Pi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meteor Pi. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------
from logging import getLogger
from yaml import safe_load
from os import path, remove
import meteorpi_model as model
from flask.ext.jsonpify import jsonify
from flask import request, g
class MeteorDatabaseImportReceiver(object):
"""
Connects to a
:class:`meteorpi_db.MeteorDatabase` and pushes any data to it on import, including managing the acquisition of any
additional information (camera status, binary file data) required in the process.
"""
def __init__(self, db):
self.db = db
@staticmethod
def get_importing_user_id():
"""
Retrieve the importing user ID from the request context, this user will have already authenticated correctly
by the point the import receiver is called.
:return:
The string user_id for the importing user
"""
return g.user.user_id
def receive_observation(self, import_request):
obs = import_request.entity
if not self.db.has_observation_id(obs.id):
self.db.import_observation(observation=obs, user_id=self.get_importing_user_id())
self.db.commit()
return import_request.response_complete()
def receive_file_record(self, import_request):
file_record = import_request.entity
if not self.db.has_file_id(file_record.id):
if not path.isfile(self.db.file_path_for_id(file_record.id)):
return import_request.response_need_file_data(file_id=file_record.id)
self.db.import_file(file_item=file_record, user_id=self.get_importing_user_id())
self.db.commit()
return import_request.response_complete()
def receive_metadata(self, import_request):
entity = import_request.entity
if not self.db.has_obstory_metadata(entity.id):
if not self.db.has_obstory_name(entity.obstory_name):
self.db.register_obstory(obstory_id=entity.obstory_id, obstory_name=entity.obstory_name,
latitude=entity.obstory_lat, longitude=entity.obstory_lng)
self.db.import_obstory_metadata(obstory_name=entity.obstory_name,
key=entity.key, value=entity.value, metadata_time=entity.time,
time_created=entity.time_created,
user_created=self.get_importing_user_id(),
item_id=entity.id)
self.db.commit()
return import_request.response_complete()
def receive_file_data(self, file_id, file_data, md5_hex):
file_path = self.db.file_path_for_id(file_id)
if not path.isfile(file_path):
file_data.save(file_path)
if md5_hex != model.get_md5_hash(file_path):
remove(file_path)
class ImportRequest(object):
"""
Helper used when importing, makes use of the 'cached_request' request transparent to the importing party.
:cvar logger:
Logs to 'meteorpi.server.import'
:ivar entity_type:
The type of the ID being imported, which will be one of 'file', 'status', 'event' or 'none'.
"""
logger = getLogger("meteorpi.server.import")
def __init__(self, entity, entity_id):
"""
Constructor, don't use this from your own code, instead use process_request() to create one from the Flask
request context.
:param entity:
The entity being imported, either pulled from the request directly or from the cache. This can be None under
error conditions, in which case the only legitimate response is to send a 'continue' message back to the
exporter, at which point it will re-send the necessary information to rebuild the cache.
:param entity_id:
The ID of the entity being imported, this will always be defined.
"""
self.entity_id = entity_id
self.entity = entity
if entity is None:
self.entity_type = 'none'
elif isinstance(entity, model.Observation):
self.entity_type = 'observation'
elif isinstance(entity, model.FileRecord):
self.entity_type = 'file'
elif isinstance(entity, model.ObservatoryMetadata):
self.entity_type = 'metadata'
else:
raise ValueError("Unknown entity type, cannot continue.")
def response_complete(self):
"""
Signal that this particular entity has been fully processed. The exporter will not send it to this target again
under this particular export configuration (there is no guarantee another export configuration on the same
server won't send it, or that it won't be received from another server though, so you must always check whether
you have an entity and return this status as early as possible if so)
:return:
A response that can be returned from a Flask service method
"""
ImportRequest.logger.info("Completed import for {0} with id {1}".format(self.entity_type, self.entity_id))
ImportRequest.logger.debug("Sending: complete")
return jsonify({'state': 'complete'})
@staticmethod
def response_failed(message='Import failed'):
"""
Signal that import for this entity failed. Whether this results in a retry either immediately or later in time
is entirely up to the exporting party - this should therefore only be used for error cases, and not used to
indicate duplicate data (use the response_complete for this as it tells the exporter that it shouldn't send the
data again)
:param string message:
An optional message to convey about the failure
:return:
A response that can be returned from a Flask service method
"""
ImportRequest.logger.debug("Sending: failed")
return jsonify({'state': 'failed', 'message': message})
def response_continue(self):
"""
Signals that a partial reception of data has occurred and that the exporter should continue to send data for
this entity. This should also be used if import-side caching has missed, in which case the response will direct
the exporter to re-send the full data for the entity (otherwise it will send back the entity ID and rely on the
import party's caching to resolve it). Use this for generic cases where we need to be messaged again about this
entity - currently used after requesting and receiving a status block, and in its cache-refresh form if we have
a cache miss during import.
:return:
A response that can be returned from a Flask service method
"""
if self.entity is not None:
ImportRequest.logger.debug("Sending: continue")
return jsonify({'state': 'continue'})
else:
ImportRequest.logger.debug("Sending: continue-nocache")
return jsonify({'state': 'continue-nocache'})
@staticmethod
def response_continue_after_file():
"""
As with response_continue, but static to allow it to be called from context where we don't have a populated
ImportRequest object. Always uses cached IDs, with the expectation that a subsequent request will force cache
revalidation if required. Use this when acting on reception of binary data.
:return:
A response that can be returned from a Flask service method
"""
return jsonify({'state': 'continue'})
@staticmethod
def response_need_file_data(file_id):
"""
Signal the exporter that we need the binary data associated with a given file ID
:param string file_id:
the UUID of the :class:`meteorpi_model.FileRecord` for which we don't currently have data
:return:
A response that can be returned from a Flask service method
"""
ImportRequest.logger.debug("Sending: need_file_data, id={0}".format(file_id))
return jsonify({'state': 'need_file_data', 'file_id': file_id})
@staticmethod
def process_request():
"""
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
"""
g.request_dict = safe_load(request.get_data())
entity_type = g.request_dict['type']
entity_id = g.request_dict[entity_type]['id']
ImportRequest.logger.debug("Received request, type={0}, id={1}".format(entity_type, entity_id))
entity = ImportRequest._get_entity(entity_id)
ImportRequest.logger.debug("Entity with id={0} was {1}".format(entity_id, entity))
return ImportRequest(entity=entity, entity_id=entity_id)
@staticmethod
def _get_entity(entity_id):
"""
Uses the request context to retrieve a :class:`meteorpi_model.CameraStatus`, :class:`meteorpi_model.Event` or
:class:`meteorpi_model.FileRecord` from the POSTed JSON string.
:param string entity_id:
The ID of a CameraStatus, Event or FileRecord contained within the request
:return:
The corresponding entity from the request.
"""
entity_type = g.request_dict['type']
if entity_type == 'file':
return model.FileRecord.from_dict(g.request_dict['file'])
elif entity_type == 'metadata':
return model.ObservatoryMetadata.from_dict(g.request_dict['metadata'])
elif entity_type == 'observation':
return model.Observation.from_dict(g.request_dict['observation'])
else:
return None
def add_routes(meteor_app, url_path='/importv2'):
"""
Add two routes to the specified instance of :class:`meteorpi_server.MeteorApp` to implement the import API and allow
for replication of data to this server.
:param meteorpi_server.MeteorApp meteor_app:
The :class:`meteorpi_server.MeteorApp` to which import routes should be added
:param meteorpi_server.importer_api.BaseImportReceiver handler:
A subclass of :class:`meteorpi_server.importer_api.BaseImportReceiver` which is used to handle the import. If
not specified this defaults to an instance of :class:`meteorpi_server.importer_api.MeteorDatabaseImportReceiver`
which will replicate any missing information from the import into the database attached to the meteor_app.
:param string url_path:
The base of the import routes for this application. Defaults to '/import' - routes will be created at this path
and as import_path/data/<id> for binary data reception. Both paths only respond to POST requests and require
that the requests are authenticated and that the authenticated user has the 'import' role.
"""
app = meteor_app.app
@app.route(url_path, methods=['POST'])
@meteor_app.requires_auth(roles=['import'])
def import_entities():
"""
Receive an entity import request, using :class:`meteorpi_server.import_api.ImportRequest` to parse it, then
passing the parsed request on to an instance of :class:`meteorpi_server.import_api.BaseImportReceiver` to deal
with the possible import types.
:return:
A response, generally using one of the response_xxx methods in ImportRequest
"""
db = meteor_app.get_db()
handler = MeteorDatabaseImportReceiver(db=db)
import_request = ImportRequest.process_request()
if import_request.entity is None:
return import_request.response_continue()
if import_request.entity_type == 'file':
response = handler.receive_file_record(import_request)
handler.db.commit()
db.close_db()
if response is not None:
return response
else:
return import_request.response_complete()
elif import_request.entity_type == 'observation':
response = handler.receive_observation(import_request)
handler.db.commit()
db.close_db()
if response is not None:
return response
else:
return import_request.response_complete()
elif import_request.entity_type == 'metadata':
response = handler.receive_metadata(import_request)
handler.db.commit()
db.close_db()
if response is not None:
return response
else:
return import_request.response_continue()
else:
db.close_db()
return import_request.response_failed("Unknown import request")
@app.route('{0}/data/<file_id_hex>/<md5_hex>'.format(url_path), methods=['POST'])
@meteor_app.requires_auth(roles=['import'])
def import_file_data(file_id_hex, md5_hex):
"""
Receive a file upload, passing it to the handler if it contains the appropriate information
:param string file_id_hex:
The hex representation of the :class:`meteorpi_model.FileRecord` to which this data belongs.
"""
db = meteor_app.get_db()
handler = MeteorDatabaseImportReceiver(db=db)
file_id = file_id_hex
file_data = request.files['file']
if file_data:
handler.receive_file_data(file_id=file_id, file_data=file_data, md5_hex=md5_hex)
db.close_db()
return ImportRequest.response_continue_after_file()
| gpl-3.0 | 2,854,792,162,750,640,000 | 45.750769 | 120 | 0.648743 | false | 4.281206 | false | false | false |
frnknglrt/grako | grako/semantics.py | 1 | 3741 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from grako.util import simplify_list, eval_escapes, warning
from grako.util import re, RE_FLAGS
from grako import grammars
from grako.exceptions import FailedSemantics
from grako.model import ModelBuilderSemantics
class GrakoASTSemantics(object):
def group(self, ast, *args):
return simplify_list(ast)
def element(self, ast, *args):
return simplify_list(ast)
def sequence(self, ast, *args):
return simplify_list(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return simplify_list(ast[0])
return ast
class GrakoSemantics(ModelBuilderSemantics):
def __init__(self, grammar_name):
super(GrakoSemantics, self).__init__(
baseType=grammars.Model,
types=grammars.Model.classes()
)
self.grammar_name = grammar_name
self.rules = OrderedDict()
def token(self, ast, *args):
token = eval_escapes(ast)
return grammars.Token(token)
def pattern(self, ast, *args):
pattern = ast
try:
re.compile(pattern, RE_FLAGS)
except (TypeError, re.error) as e:
raise FailedSemantics('regexp error: ' + str(e))
return grammars.Pattern(pattern)
def hext(self, ast):
return int(ast, 16)
def float(self, ast):
return float(ast)
def int(self, ast):
return int(ast)
def cut_deprecated(self, ast, *args):
warning('The use of >> for cut is deprecated. Use the ~ symbol instead.')
return grammars.Cut()
def override_single_deprecated(self, ast, *args):
warning('The use of @ for override is deprecated. Use @: instead')
return grammars.Override(ast)
def sequence(self, ast, *args):
seq = ast.sequence
assert isinstance(seq, list), str(seq)
if len(seq) == 1:
return seq[0]
return grammars.Sequence(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return ast[0]
return grammars.Choice(ast)
def new_name(self, name):
if name in self.rules:
raise FailedSemantics('rule "%s" already defined' % str(name))
return name
def known_name(self, name):
if name not in self.rules:
raise FailedSemantics('rule "%s" not yet defined' % str(name))
return name
def directive(self, ast):
return ast
def rule(self, ast, *args):
decorators = ast.decorators
name = ast.name
exp = ast.exp
base = ast.base
params = ast.params
kwparams = OrderedDict(ast.kwparams) if ast.kwparams else None
if 'override' not in decorators and name in self.rules:
self.new_name(name)
elif 'override' in decorators:
self.known_name(name)
if not base:
rule = grammars.Rule(ast, name, exp, params, kwparams, decorators=decorators)
else:
self.known_name(base)
base_rule = self.rules[base]
rule = grammars.BasedRule(ast, name, exp, base_rule, params, kwparams, decorators=decorators)
self.rules[name] = rule
return rule
def rule_include(self, ast, *args):
name = str(ast)
self.known_name(name)
rule = self.rules[name]
return grammars.RuleInclude(rule)
def grammar(self, ast, *args):
directives = {d.name: d.value for d in ast.directives}
return grammars.Grammar(
self.grammar_name,
list(self.rules.values()),
directives=directives
)
| bsd-2-clause | -8,701,532,255,546,483,000 | 28 | 105 | 0.600374 | false | 3.809572 | false | false | false |
nanodan/branca | tests/test_colormap.py | 1 | 1778 | # -*- coding: utf-8 -*-
""""
Folium Colormap Module
----------------------
"""
import branca.colormap as cm
def test_simple_step():
step = cm.StepColormap(['green', 'yellow', 'red'],
vmin=3., vmax=10.,
index=[3, 4, 8, 10], caption='step')
step = cm.StepColormap(['r', 'y', 'g', 'c', 'b', 'm'])
step._repr_html_()
def test_simple_linear():
linear = cm.LinearColormap(['green', 'yellow', 'red'], vmin=3., vmax=10.)
linear = cm.LinearColormap(['red', 'orange', 'yellow', 'green'],
index=[0, 0.1, 0.9, 1.])
linear._repr_html_()
def test_linear_to_step():
some_list = [30.6, 50, 51, 52, 53, 54, 55, 60, 70, 100]
lc = cm.linear.YlOrRd_06
lc.to_step(n=12)
lc.to_step(index=[0, 2, 4, 6, 8, 10])
lc.to_step(data=some_list, n=12)
lc.to_step(data=some_list, n=12, method='linear')
lc.to_step(data=some_list, n=12, method='log')
lc.to_step(data=some_list, n=30, method='quantiles')
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1])
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1], round_method='int')
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1],
round_method='log10')
def test_step_to_linear():
step = cm.StepColormap(['green', 'yellow', 'red'],
vmin=3., vmax=10.,
index=[3, 4, 8, 10], caption='step')
step.to_linear()
def test_linear_object():
cm.linear.OrRd_06._repr_html_()
cm.linear.PuBu_06.to_step(12)
cm.linear.YlGn_06.scale(3, 12)
cm.linear._repr_html_()
def test_step_object():
cm.step.OrRd_06._repr_html_()
cm.step.PuBu_06.to_linear()
cm.step.YlGn_06.scale(3, 12)
cm.step._repr_html_()
| mit | -9,212,736,154,703,194,000 | 30.192982 | 78 | 0.535433 | false | 2.743827 | true | false | false |
exxeleron/qPython | qpython/utils.py | 4 | 1882 | # Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
def uncompress(data, uncompressed_size):
_0 = numpy.intc(0)
_1 = numpy.intc(1)
_2 = numpy.intc(2)
_128 = numpy.intc(128)
_255 = numpy.intc(255)
n, r, s, p = _0, _0, _0, _0
i, d = _1, _1
f = _255 & data[_0]
ptrs = numpy.zeros(256, dtype = numpy.intc)
uncompressed = numpy.zeros(uncompressed_size, dtype = numpy.uint8)
idx = numpy.arange(uncompressed_size, dtype = numpy.intc)
while s < uncompressed_size:
pp = p + _1
if f & i:
r = ptrs[data[d]]
n = _2 + data[d + _1]
uncompressed[idx[s:s + n]] = uncompressed[r:r + n]
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
if s == pp:
ptrs[(uncompressed[pp]) ^ (uncompressed[pp + _1])] = pp
d += _2
r += _2
s = s + n
p = s
else:
uncompressed[s] = data[d]
if pp == s:
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
p = pp
s += _1
d += _1
if i == _128:
if s < uncompressed_size:
f = _255 & data[d]
d += _1
i = _1
else:
i += i
return uncompressed
| apache-2.0 | 9,074,845,266,785,013,000 | 25.885714 | 75 | 0.52763 | false | 3.440585 | false | false | false |
typepad/python-typepad-api | typepad/api.py | 1 | 70481 | # Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The `typepad.api` module contains `TypePadObject` implementations of all the
content objects provided in the TypePad API.
"""
from urlparse import urljoin
from remoteobjects.dataobject import find_by_name
from typepad.tpobject import *
from typepad.tpobject import _ImageResizer, _VideoResizer
from typepad import fields
import typepad
class Account(TypePadObject):
"""A user account on an external website."""
crosspostable = fields.Field()
"""`True` if this account can be used to crosspost, or `False` otherwise.
An account can be used to crosspost if its service supports crossposting and
the user has enabled crossposting for the specific account.
"""
domain = fields.Field()
"""The DNS domain of the service that provides the account."""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the account."""
provider_icon_url = fields.Field(api_name='providerIconUrl')
"""The URL of a 16-by-16 pixel icon that represents the service that provides
this account."""
provider_name = fields.Field(api_name='providerName')
"""A human-friendly name for the service that provides this account."""
provider_url = fields.Field(api_name='providerURL')
"""**Deprecated.** The URL of the home page of the service that provides this
account."""
url = fields.Field()
"""The URL of the user's profile or primary page on the remote site, if known."""
user_id = fields.Field(api_name='userId')
"""The machine identifier or primary key for the account, if known.
(Some sites only have a `username`.)
"""
username = fields.Field()
"""The username of the account, if known.
(Some sites only have a `user_id`.)
"""
@property
def xid(self):
return self.id.rsplit(':', 1)[-1]
class ApiKey(TypePadObject):
api_key = fields.Field(api_name='apiKey')
"""The actual API key string.
Use this as the consumer key when making an OAuth request.
"""
owner = fields.Object('Application')
"""The application that owns this API key.
:attrtype:`Application`
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/api-keys/%s.json' % self.api_key)
@classmethod
def get_by_api_key(cls, api_key):
"""Returns an `ApiKey` instance with the given consumer key.
Asserts that the api_key parameter matches ^\w+$."""
assert re.match('^\w+$', api_key), "invalid api_key parameter given"
return cls.get('/api-keys/%s.json' % api_key)
class Application(TypePadObject):
"""An application that can authenticate to the TypePad API using OAuth.
An application is identified by its OAuth consumer key, which in the case
of a hosted group is the same as the identifier for the group itself.
"""
_class_object_type = "Application"
external_feed_subscriptions = fields.Link(ListOf('ExternalFeedSubscription'), api_name='external-feed-subscriptions')
"""Get a list of the application's active external feed subscriptions.
:attrtype:`list of ExternalFeedSubscription`
"""
groups = fields.Link(ListOf('Group'))
"""Get a list of groups in which a client using a ``app_full`` access auth
token from this application can act.
:attrtype:`list of Group`
"""
id = fields.Field()
"""A string containing the canonical identifier that can be used to identify
this application in URLs."""
name = fields.Field()
"""The name of the application as provided by its developer."""
oauth_access_token_url = fields.Field(api_name='oauthAccessTokenUrl')
"""The URL of the OAuth access token endpoint for this application."""
oauth_authorization_url = fields.Field(api_name='oauthAuthorizationUrl')
"""The URL to send the user's browser to for the user authorization step."""
oauth_identification_url = fields.Field(api_name='oauthIdentificationUrl')
"""The URL to send the user's browser to in order to identify who is logged in
(that is, the "sign in" link)."""
oauth_request_token_url = fields.Field(api_name='oauthRequestTokenUrl')
"""The URL of the OAuth request token endpoint for this application."""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object this is.
For an Application object, `object_type` will be ``Application``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** The object types for this object.
This set will contain the string ``tag:api.typepad.com,2009:Application`` for
an Application object.
:attrtype:`list`
"""
session_sync_script_url = fields.Field(api_name='sessionSyncScriptUrl')
"""The URL of the session sync script."""
signout_url = fields.Field(api_name='signoutUrl')
"""The URL to send the user's browser to in order to sign them out of TypePad."""
user_flyouts_script_url = fields.Field(api_name='userFlyoutsScriptUrl')
"""The URL of a script to embed to enable the user flyouts functionality."""
class _CreateExternalFeedSubscriptionPost(TypePadObject):
callback_url = fields.Field(api_name='callbackUrl')
"""The URL which will receive notifications of new content in the subscribed
feeds."""
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers of the initial set of feeds to be subscribed to.
:attrtype:`list`
"""
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""A list of rules for filtering notifications to this subscription; each rule
is a query string using the search API's syntax.
:attrtype:`list`
"""
secret = fields.Field()
"""An optional subscriber-provided opaque token that will be used to compute
an HMAC digest to be sent along with each item delivered to the
`callback_url`."""
verify_token = fields.Field(api_name='verifyToken')
"""A subscriber-provided opaque token that will be echoed back in the
verification request to assist the subscriber in identifying which
subscription request is being verified."""
class _CreateExternalFeedSubscriptionResponse(TypePadObject):
subscription = fields.Object('ExternalFeedSubscription')
"""The subscription object that was created.
:attrtype:`ExternalFeedSubscription`
"""
create_external_feed_subscription = fields.ActionEndpoint(api_name='create-external-feed-subscription', post_type=_CreateExternalFeedSubscriptionPost, response_type=_CreateExternalFeedSubscriptionResponse)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/applications/%s.json' % self.id)
@classmethod
def get_by_id(cls, id, **kwargs):
if id == '':
raise ValueError("An id is required")
obj = cls.get('/applications/%s.json' % id, **kwargs)
obj.__dict__['id'] = id
return obj
@classmethod
def get_by_api_key(cls, api_key, **kwargs):
"""Returns an `Application` instance by the API key.
Asserts that the api_key parameter matches ^\w+$."""
assert re.match('^\w+$', api_key), "invalid api_key parameter given"
import logging
logging.getLogger("typepad.api").warn(
'%s.get_by_api_key is deprecated' % cls.__name__)
return cls.get('/applications/%s.json' % api_key, **kwargs)
@property
def browser_upload_endpoint(self):
"""The endpoint to use for uploading file assets directly to
TypePad."""
return urljoin(typepad.client.endpoint, '/browser-upload.json')
user_flyouts_script = renamed_property(old='user_flyouts_script', new='user_flyouts_script_url')
class Asset(TypePadObject):
"""An item of content generated by a user."""
_class_object_type = "Asset"
author = fields.Object('User')
"""The user who created the selected asset.
:attrtype:`User`
"""
categories = fields.Link(ListObject)
"""Get a list of categories into which this asset has been placed within its
blog.
Currently supported only for `Post` assets that are posted within a blog.
:attrtype:`list`
"""
comment_count = fields.Field(api_name='commentCount')
"""The number of comments that have been posted in reply to this asset.
This number includes comments that have been posted in response to other
comments.
"""
comment_tree = fields.Link(ListOf('CommentTreeItem'), api_name='comment-tree')
"""Get a list of assets that were posted in response to the selected asset and
their depth in the response tree
:attrtype:`list of CommentTreeItem`
"""
comments = fields.Link(ListOf('Comment'))
"""Get a list of assets that were posted in response to the selected asset.
POST: Create a new Comment asset as a response to the selected asset.
:attrtype:`list of Comment`
"""
container = fields.Object('ContainerRef')
"""An object describing the group or blog to which this asset belongs.
:attrtype:`ContainerRef`
"""
content = fields.Field()
"""The raw asset content.
The `text_format` property describes how to format this data. Use this
property to set the asset content in write operations. An asset posted in a
group may have a `content` value up to 10,000 bytes long, while a `Post` asset
in a blog may have up to 65,000 bytes of content.
"""
crosspost_accounts = fields.List(fields.Field(), api_name='crosspostAccounts')
"""**Editable.** A set of identifiers for `Account` objects to which to
crosspost this asset when it's posted.
This property is omitted when retrieving existing assets.
:attrtype:`list`
"""
description = fields.Field()
"""The description of the asset."""
excerpt = fields.Field()
"""A short, plain-text excerpt of the entry content.
This is currently available only for `Post` assets.
"""
extended_content = fields.Link('AssetExtendedContent', api_name='extended-content')
"""Get the extended content for the asset, if any.
Currently supported only for `Post` assets that are posted within a blog.
:attrtype:`AssetExtendedContent`
"""
favorite_count = fields.Field(api_name='favoriteCount')
"""The number of distinct users who have added this asset as a favorite."""
favorites = fields.Link(ListOf('Favorite'))
"""Get a list of favorites that have been created for the selected asset.
:attrtype:`list of Favorite`
"""
feedback_status = fields.Link('FeedbackStatus', api_name='feedback-status')
"""Get the feedback status of selected asset PUT: Set the feedback status of
selected asset
:attrtype:`FeedbackStatus`
"""
groups = fields.List(fields.Field())
"""**Deprecated.** An array of strings containing the `id` URI of the `Group`
object that this asset is mapped into, if any.
This property has been superseded by the `container` property.
:attrtype:`list`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the user."""
is_favorite_for_current_user = fields.Field(api_name='isFavoriteForCurrentUser')
"""`True` if this asset is a favorite for the currently authenticated user, or
`False` otherwise.
This property is omitted from responses to anonymous requests.
"""
media_assets = fields.Link(ListOf('Asset'), api_name='media-assets')
"""Get a list of media assets that are embedded in the content of the selected
asset.
:attrtype:`list of Asset`
"""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of asset this is."""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs identifying the
type of this asset.
Only the one object type URI for the particular type of asset this asset is
will be present.
:attrtype:`list`
"""
permalink_url = fields.Field(api_name='permalinkUrl')
"""The URL that is this asset's permalink.
This will be omitted if the asset does not have a permalink of its own (for
example, if it's embedded in another asset) or if TypePad does not know its
permalink.
"""
publication_status = fields.Object('PublicationStatus', api_name='publicationStatus')
"""**Editable.** An object describing the visibility status and publication
date for this asset.
Only visibility status is editable.
:attrtype:`PublicationStatus`
"""
publication_status_obj = fields.Link('PublicationStatus', api_name='publication-status')
"""Get the publication status of selected asset PUT: Set the publication
status of selected asset
:attrtype:`PublicationStatus`
"""
published = fields.Datetime()
"""The time at which the asset was created, as a W3CDTF timestamp.
:attrtype:`datetime`
"""
reblogs = fields.Link(ListOf('Post'))
"""Get a list of posts that were posted as reblogs of the selected asset.
:attrtype:`list of Post`
"""
rendered_content = fields.Field(api_name='renderedContent')
"""The content of this asset rendered to HTML.
This is currently available only for `Post` and `Page` assets.
"""
source = fields.Object('AssetSource')
"""An object describing the site from which this asset was retrieved, if the
asset was obtained from an external source.
:attrtype:`AssetSource`
"""
text_format = fields.Field(api_name='textFormat')
"""A keyword that indicates what formatting mode to use for the content of
this asset.
This can be ``html`` for assets the content of which is HTML,
``html_convert_linebreaks`` for assets the content of which is HTML but where
paragraph tags should be added automatically, or ``markdown`` for assets the
content of which is Markdown source. Other formatting modes may be added in
future. Applications that present assets for editing should use this property
to present an appropriate editor.
"""
title = fields.Field()
"""The title of the asset."""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class _AddCategoryPost(TypePadObject):
category = fields.Field()
"""The category to add"""
add_category = fields.ActionEndpoint(api_name='add-category', post_type=_AddCategoryPost)
class _MakeCommentPreviewPost(TypePadObject):
content = fields.Field()
"""The body of the comment."""
class _MakeCommentPreviewResponse(TypePadObject):
comment = fields.Object('Asset')
"""A mockup of the future comment.
:attrtype:`Asset`
"""
make_comment_preview = fields.ActionEndpoint(api_name='make-comment-preview', post_type=_MakeCommentPreviewPost, response_type=_MakeCommentPreviewResponse)
class _RemoveCategoryPost(TypePadObject):
category = fields.Field()
"""The category to remove"""
remove_category = fields.ActionEndpoint(api_name='remove-category', post_type=_RemoveCategoryPost)
class _UpdatePublicationStatusPost(TypePadObject):
draft = fields.Field()
"""A boolean indicating whether the asset is a draft"""
publication_date = fields.Field(api_name='publicationDate')
"""The publication date of the asset"""
spam = fields.Field()
"""A boolean indicating whether the asset is spam; Comment only"""
update_publication_status = fields.ActionEndpoint(api_name='update-publication-status', post_type=_UpdatePublicationStatusPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/assets/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/assets/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
actor = renamed_property(old='actor', new='author')
def primary_object_type(self):
try:
return self.object_types[0]
except (TypeError, IndexError):
return
@property
def asset_ref(self):
"""An `AssetRef` instance representing this asset."""
return AssetRef(url_id=self.url_id,
id=self.id,
author=self.author,
href='/assets/%s.json' % self.url_id,
type='application/json',
object_types=self.object_types,
object_type=self.object_type)
def __unicode__(self):
return self.title or self.content
def __str__(self):
return self.__unicode__()
class AssetExtendedContent(TypePadObject):
rendered_extended_content = fields.Field(api_name='renderedExtendedContent')
"""The HTML rendered version of this asset's extended content, if it has any.
Otherwise, this property is omitted.
"""
class AssetRef(TypePadObject):
"""A structure that refers to an asset without including its full
content."""
author = fields.Object('User')
"""The user who created the referenced asset.
:attrtype:`User`
"""
href = fields.Field()
"""The URL of a representation of the referenced asset."""
id = fields.Field()
"""The URI from the referenced `Asset` object's `id` property."""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of asset the referenced `Asset` object is."""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs identifying the
type of the referenced asset.
Only the one object type URI for the particular type of asset the referenced
asset is will be present.
:attrtype:`list`
"""
type = fields.Field()
"""The MIME type of the representation at the URL given in the `href`
property."""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier from the referenced `Asset` object's `url_id`
property."""
def reclass_for_data(self, data):
"""Returns ``False``.
This method prevents `AssetRef` instances from being reclassed when
updated from a data dictionary based on the dictionary's
``objectTypes`` member.
"""
# AssetRefs are for any object type, so don't reclass them.
return False
class AssetSource(TypePadObject):
"""Information about an `Asset` instance imported from another service."""
by_user = fields.Field(api_name='byUser')
"""**Deprecated.** `True` if this content is considered to be created by its
author, or `False` if it's actually someone else's content imported by the
asset author."""
permalink_url = fields.Field(api_name='permalinkUrl')
"""The permalink URL of the resource from which the related asset was
imported."""
provider = fields.Dict(fields.Field())
"""**Deprecated.** Description of the external service provider from which
this content was imported, if known.
Contains ``name``, ``icon``, and ``uri`` properties. This property will be
omitted if the service from which the related asset was imported is not
recognized.
:attrtype:`dict`
"""
class AudioLink(TypePadObject):
"""A link to an audio recording."""
duration = fields.Field()
"""The duration of the audio stream in seconds.
This property will be omitted if the length of the audio stream could not be
determined.
"""
url = fields.Field()
"""The URL of an MP3 representation of the audio stream."""
class AuthToken(TypePadObject):
auth_token = fields.Field(api_name='authToken')
"""The actual auth token string.
Use this as the access token when making an OAuth request.
"""
target_object = fields.Object('TypePadObject', api_name='targetObject')
"""**Deprecated.** The root object to which this auth token grants access.
This is a legacy field maintained for backwards compatibility with older
clients, as auth tokens are no longer scoped to specific objects.
:attrtype:`TypePadObject`
"""
def make_self_link(self):
# TODO: We don't have the API key, so we can't build a self link.
return
@classmethod
def get_by_key_and_token(cls, api_key, auth_token):
return cls.get('/auth-tokens/%s:%s.json' % (api_key, auth_token))
target = renamed_property(old='target', new='target_object')
class Badge(TypePadObject):
description = fields.Field()
"""A human-readable description of what a user must do to win this badge."""
display_name = fields.Field(api_name='displayName')
"""A human-readable name for this badge."""
id = fields.Field()
"""The canonical identifier that can be used to identify this badge in URLs.
This can be used to recognise where the same badge is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
image_link = fields.Object('ImageLink', api_name='imageLink')
"""A link to the image that depicts this badge to users.
:attrtype:`ImageLink`
"""
class Blog(TypePadObject):
categories = fields.Link(ListObject)
"""Get a list of categories which are defined for the selected blog.
:attrtype:`list`
"""
commenting_settings = fields.Link('BlogCommentingSettings', api_name='commenting-settings')
"""Get the commenting-related settings for this blog.
:attrtype:`BlogCommentingSettings`
"""
comments = fields.Link(ListOf('Comment'))
crosspost_accounts = fields.Link(ListOf('Account'), api_name='crosspost-accounts')
"""Get a list of accounts that can be used for crossposting with this blog.
:attrtype:`list of Account`
"""
description = fields.Field()
"""The description of the blog as provided by its owner."""
home_url = fields.Field(api_name='homeUrl')
"""The URL of the blog's home page."""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the object."""
media_assets = fields.Link(ListOf('Asset'), api_name='media-assets')
"""POST: Add a new media asset to the account that owns this blog.
:attrtype:`list of Asset`
"""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object this is.
For a Blog object, `object_type` will be ``Blog``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs.
This set will contain the string ``tag:api.typepad.com,2009:Blog`` for a Blog
object.
:attrtype:`list`
"""
owner = fields.Object('User')
"""The user who owns the blog.
:attrtype:`User`
"""
page_assets = fields.Link(ListOf('Page'), api_name='page-assets')
"""Get a list of pages associated with the selected blog.
POST: Add a new page to a blog
:attrtype:`list of Page`
"""
post_assets = fields.Link(ListOf('Post'), api_name='post-assets')
"""Get a list of posts associated with the selected blog.
POST: Add a new post to a blog
:attrtype:`list of Post`
"""
post_by_email_settings = fields.Link('PostByEmailAddress', api_name='post-by-email-settings')
stats = fields.Link('BlogStats')
"""Get data about the pageviews for the selected blog.
:attrtype:`BlogStats`
"""
title = fields.Field()
"""The title of the blog."""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class _AddCategoryPost(TypePadObject):
category = fields.Field()
"""The category to add"""
add_category = fields.ActionEndpoint(api_name='add-category', post_type=_AddCategoryPost)
class _DiscoverExternalPostAssetPost(TypePadObject):
permalink_url = fields.Field(api_name='permalinkUrl')
"""The URL of the page whose external post stub is being retrieved."""
class _DiscoverExternalPostAssetResponse(TypePadObject):
asset = fields.Object('Asset')
"""The asset that acts as a stub for the given permalink.
:attrtype:`Asset`
"""
discover_external_post_asset = fields.ActionEndpoint(api_name='discover-external-post-asset', post_type=_DiscoverExternalPostAssetPost, response_type=_DiscoverExternalPostAssetResponse)
class _RemoveCategoryPost(TypePadObject):
category = fields.Field()
"""The category to remove"""
remove_category = fields.ActionEndpoint(api_name='remove-category', post_type=_RemoveCategoryPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/blogs/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/blogs/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
class BlogCommentingSettings(TypePadObject):
captcha_required = fields.Field(api_name='captchaRequired')
"""`True` if this blog requires anonymous commenters to pass a CAPTCHA before
submitting a comment, or `False` otherwise."""
email_address_required = fields.Field(api_name='emailAddressRequired')
"""`True` if this blog requires anonymous comments to be submitted with an
email address, or `False` otherwise."""
html_allowed = fields.Field(api_name='htmlAllowed')
"""`True` if this blog allows commenters to use basic HTML formatting in
comments, or `False` if HTML will be removed."""
moderation_enabled = fields.Field(api_name='moderationEnabled')
"""`True` if this blog places new comments into a moderation queue for
approval before they are displayed, or `False` if new comments may be
available immediately."""
signin_allowed = fields.Field(api_name='signinAllowed')
"""`True` if this blog allows users to sign in to comment, or `False` if all
new comments are anonymous."""
signin_required = fields.Field(api_name='signinRequired')
"""`True` if this blog requires users to be logged in in order to leave a
comment, or `False` if anonymous comments will be rejected."""
time_limit = fields.Field(api_name='timeLimit')
"""Number of days after a post is published that comments will be allowed.
If the blog has no time limit for comments, this property will be omitted.
"""
urls_auto_linked = fields.Field(api_name='urlsAutoLinked')
"""`True` if comments in this blog will automatically have any bare URLs
turned into links, or `False` if URLs will be shown unlinked."""
class BlogStats(TypePadObject):
daily_page_views = fields.Dict(fields.Field(), api_name='dailyPageViews')
"""A map containing the daily page views on the blog for the last 120 days.
The keys of the map are dates in W3CDTF format, and the values are the integer
number of page views on the blog for that date.
:attrtype:`dict`
"""
total_page_views = fields.Field(api_name='totalPageViews')
"""The total number of page views received by the blog for all time."""
class CommentTreeItem(TypePadObject):
comment = fields.Object('Asset')
"""The comment asset at this point in the tree.
:attrtype:`Asset`
"""
depth = fields.Field()
"""The number of levels deep this comment is in the tree.
A comment that is directly in reply to the root asset is 1 level deep. If a
given comment has a depth of 1, all of the direct replies to that comment will
have a depth of 2; their replies will have depth 3, and so forth.
"""
class ContainerRef(TypePadObject):
display_name = fields.Field(api_name='displayName')
"""The display name of the blog or group, as set by its owner."""
home_url = fields.Field(api_name='homeUrl')
"""The URL of the home page of the referenced blog or group."""
id = fields.Field()
"""The URI from the `id` property of the referenced blog or group."""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object the referenced container is."""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier from the `url_id` property of the referenced blog
or group."""
class Endpoint(TypePadObject):
action_endpoints = fields.List(fields.Object('Endpoint'), api_name='actionEndpoints')
"""For noun endpoints, an array of action endpoints that it supports.
:attrtype:`list of Endpoint`
"""
can_have_id = fields.Field(api_name='canHaveId')
"""For noun endpoints, `True` if an id part is accepted, or `False` if the
noun may only be used alone."""
can_omit_id = fields.Field(api_name='canOmitId')
"""For noun endpoints, `True` if the id part can be ommitted, or `False` if it
is always required."""
filter_endpoints = fields.List(fields.Object('Endpoint'), api_name='filterEndpoints')
"""For endpoints that return lists, an array of filters that can be appended
to the endpoint.
:attrtype:`list of Endpoint`
"""
format_sensitive = fields.Field(api_name='formatSensitive')
"""`True` if this endpoint requires a format suffix, or `False` otherwise."""
name = fields.Field()
"""The name of the endpoint, as it appears in URLs."""
parameterized = fields.Field()
"""For filter endpoints, `True` if a parameter is required on the filter, or
`False` if it's a boolean filter."""
post_object_type = fields.Object('ObjectType', api_name='postObjectType')
"""The type of object that this endpoint accepts for ``POST`` operations.
This property is omitted if this endpoint does not accept ``POST`` requests.
:attrtype:`ObjectType`
"""
property_endpoints = fields.List(fields.Object('Endpoint'), api_name='propertyEndpoints')
"""For noun endpoints, an array of property endpoints that it supports.
:attrtype:`list of Endpoint`
"""
resource_object_type = fields.Object('ObjectType', api_name='resourceObjectType')
"""The type of object that this endpoint represents for ``GET``, ``PUT`` and
``DELETE`` operations.
This property is omitted for action endpoints, as they do not represent
resources.
:attrtype:`ObjectType`
"""
response_object_type = fields.Object('ObjectType', api_name='responseObjectType')
"""For action endpoints, the type of object that this endpoint returns on
success.
If the endpoint returns no payload on success, or if this is not an action
endpoint, this property is omitted.
:attrtype:`ObjectType`
"""
supported_methods = fields.Dict(fields.Field(), api_name='supportedMethods')
"""A mapping of the HTTP methods that this endpoint accepts to the docstrings
describing the result of each method.
:attrtype:`dict`
"""
supported_query_arguments = fields.List(fields.Field(), api_name='supportedQueryArguments')
"""The names of the query string arguments that this endpoint accepts.
:attrtype:`list`
"""
class Entity(TypePadObject):
id = fields.Field()
"""A URI that serves as a globally unique identifier for the object."""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class Event(TypePadObject):
"""An action that a user or group did.
An event has an `actor`, which is the user or group that did the action; a
set of `verbs` that describe what kind of action occured; and an `object`
that is the object that the action was done to. In the current TypePad API
implementation, only assets, users and groups can be the object of an
event.
"""
actor = fields.Object('Entity')
"""The user who performed the action described by this event.
:attrtype:`Entity`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the user."""
object = fields.Object('TypePadObject')
"""The object to which the action described by this event was performed.
:attrtype:`TypePadObject`
"""
published = fields.Datetime()
"""The time at which the event was performed, as a W3CDTF timestamp.
:attrtype:`datetime`
"""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
verb = fields.Field()
"""A keyword identifying the type of event this is."""
verbs = fields.List(fields.Field())
"""**Deprecated.** An array of verb identifier URIs.
This set will contain one verb identifier URI.
:attrtype:`list`
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/events/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/events/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
def __unicode__(self):
return unicode(self.object)
class ExternalFeedSubscription(TypePadObject):
callback_status = fields.Field(api_name='callbackStatus')
"""The HTTP status code that was returned by the last call to the
subscription's callback URL."""
callback_url = fields.Field(api_name='callbackUrl')
"""The URL to which to send notifications of new items in this subscription's
feeds."""
feeds = fields.Link(ListObject)
"""Get a list of strings containing the identifiers of the feeds to which this
subscription is subscribed.
:attrtype:`list`
"""
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""A list of rules for filtering notifications to this subscription.
Each rule is a full-text search query string, like those used with the
``/assets`` endpoint. An item will be delivered to the `callback_url` if it
matches any one of these query strings.
:attrtype:`list`
"""
post_as_user_id = fields.List(fields.Field(), api_name='postAsUserId')
"""For a Group-owned subscription, the urlId of the User who will own the
items posted into the group by the subscription.
:attrtype:`list`
"""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier that can be used to identify this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class _AddFeedsPost(TypePadObject):
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers to be added to the subscription's set of feeds.
:attrtype:`list`
"""
add_feeds = fields.ActionEndpoint(api_name='add-feeds', post_type=_AddFeedsPost)
class _RemoveFeedsPost(TypePadObject):
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers to be removed from the subscription's set of feeds.
:attrtype:`list`
"""
remove_feeds = fields.ActionEndpoint(api_name='remove-feeds', post_type=_RemoveFeedsPost)
class _UpdateFiltersPost(TypePadObject):
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""The new list of rules for filtering notifications to this subscription;
this will replace the subscription's existing rules.
:attrtype:`list`
"""
update_filters = fields.ActionEndpoint(api_name='update-filters', post_type=_UpdateFiltersPost)
class _UpdateNotificationSettingsPost(TypePadObject):
callback_url = fields.Field(api_name='callbackUrl')
"""The new callback URL to receive notifications of new content in this
subscription's feeds."""
secret = fields.Field()
"""An optional subscriber-provided opaque token that will be used to compute
an HMAC digest to be sent along with each item delivered to the
`callback_url`."""
verify_token = fields.Field(api_name='verifyToken')
"""A subscriber-provided opaque token that will be echoed back in a
verification request to the `callback_url`.
Required, if the `callback_url` is being modified with this endpoint.
"""
update_notification_settings = fields.ActionEndpoint(api_name='update-notification-settings', post_type=_UpdateNotificationSettingsPost)
class _UpdateUserPost(TypePadObject):
post_as_user_id = fields.Field(api_name='postAsUserId')
"""The `url_id` of the user who will own the assets and events posted into the
group's stream by this subscription.
The user must be an administrator of the group.
"""
update_user = fields.ActionEndpoint(api_name='update-user', post_type=_UpdateUserPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/external-feed-subscriptions/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/external-feed-subscriptions/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
class Favorite(TypePadObject):
"""A favorite of some other asset.
Asserts that the user_id and asset_id parameter match ^\w+$."""
_class_object_type = "Favorite"
author = fields.Object('User')
"""The user who saved this favorite.
That is, this property is the user who saved the target asset as a favorite,
not the creator of that asset.
:attrtype:`User`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the favorite."""
in_reply_to = fields.Object('AssetRef', api_name='inReplyTo')
"""A reference to the target asset that has been marked as a favorite.
:attrtype:`AssetRef`
"""
published = fields.Datetime()
"""The time that the favorite was created, as a W3CDTF timestamp.
:attrtype:`datetime`
"""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this favorite in URLs.
This can be used to recognise where the same favorite is returned in response
to different requests, and as a mapping key for an application's local data
store.
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/favorites/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/favorites/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
@classmethod
def get_by_user_asset(cls, user_id, asset_id, **kwargs):
assert re.match('^\w+$', user_id), "invalid user_id parameter given"
assert re.match('^\w+$', asset_id), "invalid asset_id parameter given"
return cls.get('/favorites/%s:%s.json' % (asset_id, user_id),
**kwargs)
@classmethod
def head_by_user_asset(cls, *args, **kwargs):
fav = cls.get_by_user_asset(*args, **kwargs)
return fav.head()
class FeedbackStatus(TypePadObject):
allow_comments = fields.Field(api_name='allowComments')
"""`True` if new comments may be posted to the related asset, or `False` if no
new comments are accepted."""
allow_trackback = fields.Field(api_name='allowTrackback')
"""`True` if new trackback pings may be posted to the related asset, or
`False` if no new pings are accepted."""
show_comments = fields.Field(api_name='showComments')
"""`True` if comments should be displayed on the related asset's permalink
page, or `False` if they should be hidden."""
class ImageLink(TypePadObject, _ImageResizer):
"""A link to an image.
Images hosted by TypePad can be resized with image sizing specs. See
the `url_template` field and `at_size` method.
"""
height = fields.Field()
"""The height of the original image in pixels.
If the height of the image is not available (for example, if the image isn't
hosted on TypePad), this property will be omitted.
"""
url = fields.Field()
"""The URL for the original, full size version of the image."""
url_template = fields.Field(api_name='urlTemplate')
"""An URL template with which to build alternate sizes of this image.
If present, replace the placeholder string ``{spec}`` with a valid sizing
specifier to generate the URL for an alternate version of this image. This
property is omitted if TypePad is unable to provide a scaled version of this
image (for example, if the image isn't hosted on TypePad).
"""
width = fields.Field()
"""The width of the original image in pixels.
If the width of the image is not available (for example, if the image isn't
hosted on TypePad), this property will be omitted.
"""
href = renamed_property(old='url', new='href')
class ObjectProperty(TypePadObject):
doc_string = fields.Field(api_name='docString')
"""A human-readable description of this property."""
name = fields.Field()
"""The name of the property."""
type = fields.Field()
"""The name of the type of this property."""
class ObjectType(TypePadObject):
name = fields.Field()
"""The name of this object type.
If this is an anonymous type representing the request or response of an action
endpoint, this property is omitted.
"""
parent_type = fields.Field(api_name='parentType')
"""The name of the parent type.
This property is omitted if this object type has no parent type.
"""
properties = fields.List(fields.Object('ObjectProperty'))
"""The properties belonging to objects of this object type.
:attrtype:`list of ObjectProperty`
"""
class PostByEmailAddress(TypePadObject):
email_address = fields.Field(api_name='emailAddress')
"""A private email address for posting via email."""
class PublicationStatus(TypePadObject):
"""A container for the flags that represent an asset's publication status.
Publication status is currently represented by two flags: published and
spam. The published flag is false when an asset is held for moderation,
and can be set to true to publish the asset. The spam flag is true when
TypePad's spam filter has determined that an asset is spam, or when the
asset has been marked as spam by a moderator.
"""
draft = fields.Field()
"""`True` if this asset is private (not yet published), or `False` if it has
been published."""
publication_date = fields.Field(api_name='publicationDate')
"""The time at which the related asset was (or will be) published, as a W3CDTF
timestamp.
If the related asset has been scheduled to be posted later, this property's
timestamp will be in the future.
"""
class Relationship(TypePadObject):
"""The unidirectional relationship between a pair of entities.
A Relationship can be between a user and a user (a contact relationship),
or a user and a group (a membership). In either case, the relationship's
status shows *all* the unidirectional relationships between the source and
target entities.
"""
created = fields.Dict(fields.Datetime())
"""A mapping of the relationship types present between the source and target
objects to the times those types of relationship were established.
The keys of the map are the relationship type URIs present in the
relationship's `status` property; the values are W3CDTF timestamps for the
times those relationship edges were created.
:attrtype:`dict of datetime`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the relationship."""
source = fields.Object('Entity')
"""The source entity of the relationship.
:attrtype:`Entity`
"""
status = fields.Object('RelationshipStatus')
"""An object describing all the types of relationship that currently exist
between the source and target objects.
:attrtype:`RelationshipStatus`
"""
status_obj = fields.Link('RelationshipStatus', api_name='status')
"""Get the status information for the selected relationship, including its
types.
PUT: Change the status information for the selected relationship, including
its types.
:attrtype:`RelationshipStatus`
"""
target = fields.Object('Entity')
"""The target entity of the relationship.
:attrtype:`Entity`
"""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same relationship is returned in
response to different requests, and as a mapping key for an application's
local data store.
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/relationships/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/relationships/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
def _rel_type_updater(uri):
def update(self):
rel_status = RelationshipStatus.get(self.status_obj._location, batch=False)
if uri:
rel_status.types = [uri]
else:
rel_status.types = []
rel_status.put()
return update
block = _rel_type_updater("tag:api.typepad.com,2009:Blocked")
unblock = _rel_type_updater(None)
leave = _rel_type_updater(None)
def _rel_type_checker(uri):
def has_edge_with_uri(self):
return uri in self.status.types
return has_edge_with_uri
is_member = _rel_type_checker("tag:api.typepad.com,2009:Member")
is_admin = _rel_type_checker("tag:api.typepad.com,2009:Admin")
is_blocked = _rel_type_checker("tag:api.typepad.com,2009:Blocked")
class RelationshipStatus(TypePadObject):
"""A representation of just the relationship types of a relationship,
without the associated endpoints."""
types = fields.List(fields.Field())
"""A list of relationship type URIs describing the types of the related
relationship.
:attrtype:`list`
"""
class UserBadge(TypePadObject):
badge = fields.Object('Badge')
"""The badge that was won.
:attrtype:`Badge`
"""
earned_time = fields.Field(api_name='earnedTime')
"""The time that the user earned the badge given in `badge`."""
class UserProfile(TypePadObject):
"""Additional profile information about a TypePad user.
This additional information is useful when showing information about a
TypePad account directly, but is generally not required when linking to
an ancillary TypePad account, such as the author of a post.
"""
about_me = fields.Field(api_name='aboutMe')
"""The user's long description or biography, as a free-form string they
provided."""
avatar_link = fields.Object('ImageLink', api_name='avatarLink')
"""A link to an image representing this user.
:attrtype:`ImageLink`
"""
display_name = fields.Field(api_name='displayName')
"""The user's chosen display name."""
email = fields.Field()
"""The user's email address.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
follow_frame_content_url = fields.Field(api_name='followFrameContentUrl')
"""The URL of a widget that, when rendered in an ``iframe``, allows viewers to
follow this user.
Render this widget in an ``iframe`` 300 pixels wide and 125 pixels high.
"""
gender = fields.Field()
"""The user's gender, as they provided it.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
homepage_url = fields.Field(api_name='homepageUrl')
"""The address of the user's homepage, as a URL they provided.
This property is omitted if the user has not provided a homepage.
"""
id = fields.Field()
"""The URI from the related `User` object's `id` property."""
interests = fields.List(fields.Field())
"""A list of interests provided by the user and displayed on their profile
page.
:attrtype:`list`
"""
location = fields.Field()
"""The user's location, as a free-form string they provided."""
membership_management_page_url = fields.Field(api_name='membershipManagementPageUrl')
"""The URL of a page where this user can manage their group memberships.
If this is not the authenticated user's UserProfile object, this property is
omitted.
"""
preferred_username = fields.Field(api_name='preferredUsername')
"""The name the user has chosen for use in the URL of their TypePad profile
page.
This property can be used to select this user in URLs, although it is not a
persistent key, as the user can change it at any time.
"""
profile_edit_page_url = fields.Field(api_name='profileEditPageUrl')
"""The URL of a page where this user can edit their profile information.
If this is not the authenticated user's UserProfile object, this property is
omitted.
"""
profile_page_url = fields.Field(api_name='profilePageUrl')
"""The URL of the user's TypePad profile page."""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier from the related `User` object's `url_id`
property."""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/users/%s/profile.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
"""Returns the `UserProfile` instance with the given URL identifier."""
if url_id == '':
raise ValueError("An url_id is required")
prof = cls.get('/users/%s/profile.json' % url_id, **kwargs)
prof.__dict__['url_id'] = url_id
prof.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return prof
@property
def user(self):
"""Returns a `User` instance for the TypePad member whose
`UserProfile` this is."""
return find_by_name('User').get_by_url_id(self.url_id)
class VideoLink(TypePadObject, _VideoResizer):
"""A link to a web video."""
embed_code = fields.Field(api_name='embedCode')
"""An opaque HTML fragment that, when embedded in a HTML page, provides an
inline player for the video."""
permalink_url = fields.Field(api_name='permalinkUrl')
"""**Editable.** The permalink URL for the video on its own site.
When posting a new video, send only the `permalink_url` property; videos on
supported sites will be discovered and the embed code generated automatically.
"""
html = renamed_property(old='html', new='embed_code')
class Audio(Asset):
"""An entry in a blog."""
_class_object_type = "Audio"
audio_link = fields.Object('AudioLink', api_name='audioLink')
"""A link to the audio stream that is this Audio asset's content.
:attrtype:`AudioLink`
"""
class Comment(Asset):
"""A text comment posted in reply to some other asset."""
_class_object_type = "Comment"
in_reply_to = fields.Object('AssetRef', api_name='inReplyTo')
"""A reference to the asset that this comment is in reply to.
:attrtype:`AssetRef`
"""
class Group(Entity):
"""A group that users can join, and to which users can post assets.
TypePad API social applications are represented as groups.
"""
_class_object_type = "Group"
audio_assets = fields.Link(ListOf('Audio'), api_name='audio-assets')
"""POST: Create a new Audio asset within the selected group.
:attrtype:`list of Audio`
"""
avatar_link = fields.Object('ImageLink', api_name='avatarLink')
"""A link to an image representing this group.
:attrtype:`ImageLink`
"""
display_name = fields.Field(api_name='displayName')
"""The display name set by the group's owner."""
events = fields.Link(ListOf('Event'))
"""Get a list of events describing actions performed in the selected group.
:attrtype:`list of Event`
"""
external_feed_subscriptions = fields.Link(ListOf('ExternalFeedSubscription'), api_name='external-feed-subscriptions')
"""Get a list of the group's active external feed subscriptions.
:attrtype:`list of ExternalFeedSubscription`
"""
link_assets = fields.Link(ListOf('Link'), api_name='link-assets')
"""POST: Create a new Link asset within the selected group.
:attrtype:`list of Link`
"""
memberships = fields.Link(ListOf('Relationship'))
"""Get a list of relationships between users and the selected group.
:attrtype:`list of Relationship`
"""
object_type = fields.Field(api_name='objectType')
"""A keyword describing the type of this object.
For a group object, `object_type` will be ``Group``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs.
:attrtype:`list`
"""
photo_assets = fields.Link(ListOf('Photo'), api_name='photo-assets')
"""POST: Create a new Photo asset within the selected group.
:attrtype:`list of Photo`
"""
post_assets = fields.Link(ListOf('Post'), api_name='post-assets')
"""POST: Create a new Post asset within the selected group.
:attrtype:`list of Post`
"""
site_url = fields.Field(api_name='siteUrl')
"""The URL to the front page of the group website."""
tagline = fields.Field()
"""A tagline describing the group, as set by the group's owner."""
video_assets = fields.Link(ListOf('Video'), api_name='video-assets')
"""POST: Create a new Video asset within the selected group.
:attrtype:`list of Video`
"""
class _AddMemberPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being added."""
add_member = fields.ActionEndpoint(api_name='add-member', post_type=_AddMemberPost)
class _BlockUserPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being blocked."""
block_user = fields.ActionEndpoint(api_name='block-user', post_type=_BlockUserPost)
class _CreateExternalFeedSubscriptionPost(TypePadObject):
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers of the initial set of feeds to be subscribed to.
:attrtype:`list`
"""
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""A list of rules for filtering notifications to this subscription; each rule
is a query string using the search API's syntax.
:attrtype:`list`
"""
post_as_user_id = fields.Field(api_name='postAsUserId')
"""the urlId of the user who will own the assets and events posted into the
group's stream by this subscription.
The user must be an administrator of the group.
"""
class _CreateExternalFeedSubscriptionResponse(TypePadObject):
subscription = fields.Object('ExternalFeedSubscription')
"""The subscription object that was created.
:attrtype:`ExternalFeedSubscription`
"""
create_external_feed_subscription = fields.ActionEndpoint(api_name='create-external-feed-subscription', post_type=_CreateExternalFeedSubscriptionPost, response_type=_CreateExternalFeedSubscriptionResponse)
class _RemoveMemberPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being removed."""
remove_member = fields.ActionEndpoint(api_name='remove-member', post_type=_RemoveMemberPost)
class _UnblockUserPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being unblocked."""
unblock_user = fields.ActionEndpoint(api_name='unblock-user', post_type=_UnblockUserPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/groups/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/groups/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
class Link(Asset):
"""A shared link to some URL."""
_class_object_type = "Link"
target_url = fields.Field(api_name='targetUrl')
"""The URL that is the target of this link."""
class Page(Asset):
embedded_image_links = fields.List(fields.Object('ImageLink'), api_name='embeddedImageLinks')
"""A list of links to the images that are embedded within the content of this
page.
:attrtype:`list of ImageLink`
"""
feedback_status = fields.Object('FeedbackStatus', api_name='feedbackStatus')
"""**Editable.** An object describing the comment and trackback behavior for
this page.
:attrtype:`FeedbackStatus`
"""
filename = fields.Field()
"""**Editable.** The base name of the page, used to create the
`permalink_url`."""
class Photo(Asset):
"""An entry in a blog."""
_class_object_type = "Photo"
image_link = fields.Object('ImageLink', api_name='imageLink')
"""A link to the image that is this Photo asset's content.
:attrtype:`ImageLink`
"""
class Post(Asset):
"""An entry in a blog."""
_class_object_type = "Post"
categories = fields.List(fields.Field())
"""**Editable.** A list of categories associated with the post.
:attrtype:`list`
"""
embedded_audio_links = fields.List(fields.Object('AudioLink'), api_name='embeddedAudioLinks')
"""A list of links to the audio streams that are embedded within the content
of this post.
:attrtype:`list of AudioLink`
"""
embedded_image_links = fields.List(fields.Object('ImageLink'), api_name='embeddedImageLinks')
"""A list of links to the images that are embedded within the content of this
post.
:attrtype:`list of ImageLink`
"""
embedded_video_links = fields.List(fields.Object('VideoLink'), api_name='embeddedVideoLinks')
"""A list of links to the videos that are embedded within the content of this
post.
:attrtype:`list of VideoLink`
"""
feedback_status = fields.Object('FeedbackStatus', api_name='feedbackStatus')
"""**Editable.** An object describing the comment and trackback behavior for
this post.
:attrtype:`FeedbackStatus`
"""
filename = fields.Field()
"""**Editable.** The base name of the post to use when creating its
`permalink_url`."""
reblog_count = fields.Field(api_name='reblogCount')
"""The number of times this post has been reblogged by other people."""
reblog_of = fields.Object('AssetRef', api_name='reblogOf')
"""A reference to a post of which this post is a reblog.
:attrtype:`AssetRef`
"""
class User(Entity):
"""A TypePad user.
This includes those who own TypePad blogs, those who use TypePad Connect
and registered commenters who have either created a TypePad account or
signed in with OpenID.
"""
_class_object_type = "User"
avatar_link = fields.Object('ImageLink', api_name='avatarLink')
"""A link to an image representing this user.
:attrtype:`ImageLink`
"""
badges = fields.Link(ListOf('UserBadge'))
"""Get a list of badges that the selected user has won.
:attrtype:`list of UserBadge`
"""
blogs = fields.Link(ListOf('Blog'))
"""Get a list of blogs that the selected user has access to.
:attrtype:`list of Blog`
"""
display_name = fields.Field(api_name='displayName')
"""The user's chosen display name."""
elsewhere_accounts = fields.Link(ListOf('Account'), api_name='elsewhere-accounts')
"""Get a list of elsewhere accounts for the selected user.
:attrtype:`list of Account`
"""
email = fields.Field()
"""**Deprecated.** The user's email address.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
events = fields.Link(StreamOf('Event'))
"""Get a list of events describing actions that the selected user performed.
:attrtype:`list of Event`
"""
favorites = fields.Link(ListOf('Favorite'))
"""Get a list of favorites that were listed by the selected user.
POST: Create a new favorite in the selected user's list of favorites.
:attrtype:`list of Favorite`
"""
gender = fields.Field()
"""**Deprecated.** The user's gender, as they provided it.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
interests = fields.List(fields.Field())
"""**Deprecated.** A list of interests provided by the user and displayed on
the user's profile page.
Use the `interests` property of the `UserProfile` object, which can be
retrieved from the ``/users/{id}/profile`` endpoint.
:attrtype:`list`
"""
location = fields.Field()
"""**Deprecated.** The user's location, as a free-form string provided by
them.
Use the the `location` property of the related `UserProfile` object, which can
be retrieved from the ``/users/{id}/profile`` endpoint.
"""
memberships = fields.Link(ListOf('Relationship'))
"""Get a list of relationships that the selected user has with groups.
:attrtype:`list of Relationship`
"""
notifications = fields.Link(ListOf('Event'))
"""Get a list of events describing actions by users that the selected user is
following.
:attrtype:`list of Event`
"""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object this is.
For a User object, `object_type` will be ``User``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs.
:attrtype:`list`
"""
preferred_username = fields.Field(api_name='preferredUsername')
"""The name the user has chosen for use in the URL of their TypePad profile
page.
This property can be used to select this user in URLs, although it is not a
persistent key, as the user can change it at any time.
"""
profile = fields.Link('UserProfile')
"""Get a more extensive set of user properties that can be used to build a
user profile page.
:attrtype:`UserProfile`
"""
profile_page_url = fields.Field(api_name='profilePageUrl')
"""The URL of the user's TypePad profile page."""
relationships = fields.Link(ListOf('Relationship'))
"""Get a list of relationships that the selected user has with other users,
and that other users have with the selected user.
:attrtype:`list of Relationship`
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/users/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/users/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
@classmethod
def get_self(cls, **kwargs):
"""Returns a `User` instance representing the account as whom the
client library is authenticating."""
return cls.get('/users/@self.json', **kwargs)
class Video(Asset):
"""An entry in a blog."""
_class_object_type = "Video"
preview_image_link = fields.Object('ImageLink', api_name='previewImageLink')
"""A link to a preview image or poster frame for this video.
This property is omitted if no such image is available.
:attrtype:`ImageLink`
"""
video_link = fields.Object('VideoLink', api_name='videoLink')
"""A link to the video that is this Video asset's content.
:attrtype:`VideoLink`
"""
browser_upload = BrowserUploadEndpoint()
| bsd-3-clause | 5,230,581,425,076,842,000 | 32.074144 | 209 | 0.667925 | false | 4.066759 | false | false | false |
thnuclub/scrapy_weibospider | spider/settings.py | 1 | 1267 | # -*- coding: utf-8 -*-
# Scrapy settings for spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spider'
SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'
COOKIES_ENABLES=False
ITEM_PIPELINES = {
'spider.pipelines.SpiderPipeline': 300,
}
# ES configuration
ES_HOST = "localhost:9200"
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS=2
LOG_LEVEL = 'INFO'
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY=10
TIME_DELTA =20
SCHEDULER_IDLE_BEFORE_CLOSE = 10
# Specify the host and port to use when connecting to Redis (optional).
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN=2
CONCURRENT_REQUESTS_PER_IP=2
| apache-2.0 | 1,573,329,689,842,755,600 | 28.465116 | 81 | 0.759274 | false | 3.334211 | false | false | false |
evansde77/cirrus | tests/unit/cirrus/harnesses.py | 1 | 2422 | #!/usr/bin/env python
"""
harnesses
Common/reusable test harnesses
"""
import os
import unittest
import tempfile
import mock
import subprocess
from cirrus._2to3 import ConfigParser, to_str
from cirrus.configuration import Configuration
def _repo_directory():
command = ['git', 'rev-parse', '--show-toplevel']
process = subprocess.Popen(command, stdout=subprocess.PIPE)
outp, err = process.communicate()
return to_str(outp.strip())
def write_cirrus_conf(config_file, **sections):
"""
_write_cirrus_conf_
Util to create a cirrus configuration file and populate it
with the settings for the package, gitflow, pypi etc sections.
sections should be nested dict of the form {sectionname: {sectionsettings}}
Eg:
settings={'package': {'name': 'package_name'} }
"""
parser = ConfigParser.RawConfigParser()
for section, settings in iter(sections.items()):
parser.add_section(section)
for key, value in iter(settings.items()):
parser.set(section, key, value)
with open(config_file, 'w') as handle:
parser.write(handle)
class CirrusConfigurationHarness(object):
"""
CirrusConfigurationHarness
Test harness that generates a mock for load_configuration in the
module that is being mocked.
TODO: better location for this, plus maybe combine with
generating the cirrus config file
"""
def __init__(self, module_symbol, config_file, gitconf_content=None, **settings):
self.module_symbol = module_symbol
self.config_file = config_file
self.gitconf_str = gitconf_content
if self.gitconf_str is None:
self.gitconf_str = "cirrus.credential-plugin=default"
def setUp(self):
self.patch_environ = mock.patch.dict(os.environ, {'HOME': 'womp'})
self.patch_environ.start()
self.mock_config = mock.patch(self.module_symbol)
self.load_mock = self.mock_config.start()
self.patch_gitconfig = mock.patch('cirrus.gitconfig.shell_command')
self.mock_gitconfig = self.patch_gitconfig.start()
self.mock_gitconfig.return_value = self.gitconf_str
self.config = Configuration(self.config_file)
self.config.load()
self.load_mock.return_value = self.config
def tearDown(self):
self.patch_environ.stop()
self.mock_config.stop()
self.patch_gitconfig.stop()
| apache-2.0 | 5,484,854,321,533,871,000 | 28.180723 | 85 | 0.672172 | false | 3.749226 | true | false | false |
philgyford/django-spectator | spectator/events/migrations/0029_plays_to_works.py | 1 | 1259 | # Generated by Django 2.0 on 2018-02-08 11:47
from django.db import migrations
def forwards(apps, schema_editor):
"""
Change all Play objects into Work objects, and their associated
data into WorkRole and WorkSelection models, then delete the Play.
"""
Play = apps.get_model("spectator_events", "Play")
Work = apps.get_model("spectator_events", "Work")
WorkRole = apps.get_model("spectator_events", "WorkRole")
WorkSelection = apps.get_model("spectator_events", "WorkSelection")
for p in Play.objects.all():
work = Work.objects.create(kind="play", title=p.title, title_sort=p.title_sort)
for role in p.roles.all():
WorkRole.objects.create(
creator=role.creator,
work=work,
role_name=role.role_name,
role_order=role.role_order,
)
for selection in p.events.all():
WorkSelection.objects.create(
event=selection.event, work=work, order=selection.order
)
p.delete()
class Migration(migrations.Migration):
dependencies = [
("spectator_events", "0028_dancepieces_to_works"),
]
operations = [
migrations.RunPython(forwards),
]
| mit | 1,469,798,443,884,644,000 | 27.613636 | 87 | 0.611597 | false | 3.946708 | false | false | false |
galad-loth/LearnDescriptor | symbols/match_net_symbol.py | 1 | 4004 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 04 08:55:52 2017
@author: galad-loth
"""
import mxnet as mx
def SharedFeatureNet(data,conv_weight, conv_bias):
shared_net = mx.sym.Convolution(data=data, kernel=(7, 7), stride=(1,1),
pad=(3, 3), num_filter=24,weight=conv_weight[0],
bias=conv_bias[0],name="conv0")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu0")
shared_net = mx.sym.Pooling(data=shared_net, kernel=(3,3),pool_type="max",
stride=(2,2), name="maxpool0")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(5, 5), stride=(1,1),
pad=(2, 2), num_filter=64, weight=conv_weight[1],
bias=conv_bias[1],name="conv1")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu1")
shared_net = mx.sym.Pooling(data=shared_net, kernel=(3,3),pool_type="max",
stride=(2,2), name="maxpool1")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=96, weight=conv_weight[2],
bias=conv_bias[2],name="conv2")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu2")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=96, weight=conv_weight[3],
bias=conv_bias[3],name="conv3")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu3")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[4],
bias=conv_bias[4],name="conv4")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu4")
shared_net = mx.sym.Pooling(data=shared_net, kernel=(3,3),pool_type="max",
stride=(2,2), name="maxpool4")
return shared_net
def match_net_symbol():
data1=mx.sym.Variable("data1")
data2=mx.sym.Variable("data2")
dim_bottleneck=256
conv_weight = []
conv_bias = []
for i in range(5):
conv_weight.append(mx.sym.Variable('conv' + str(i) + '_weight'))
conv_bias.append(mx.sym.Variable('conv' + str(i) + '_bias'))
conv_res1=SharedFeatureNet(data1,conv_weight,conv_bias)
conv_res2=SharedFeatureNet(data2,conv_weight,conv_bias)
botteneck_weights=mx.sym.Variable("botteneck_weights")
botteneck_bias=mx.sym.Variable("botteneck_bias")
feat1 = mx.sym.FullyConnected(data=conv_res1,num_hidden=dim_bottleneck,
weight=botteneck_weights,bias=botteneck_bias,
name="botteneck")
feat2 = mx.sym.FullyConnected(data=conv_res2,num_hidden=dim_bottleneck,
weight=botteneck_weights,bias=botteneck_bias,
name="botteneck")
conv_res=mx.sym.Concat(feat1,feat2,dim=1, name='conv_res')
net = mx.sym.FullyConnected(data=conv_res,num_hidden=256, name="fc1")
net = mx.sym.Activation(data=net, act_type="relu", name="fc1_relu")
net = mx.sym.FullyConnected(data=net,num_hidden=256, name="fc2")
net = mx.sym.Activation(data=net, act_type="relu", name="fc2_relu")
net = mx.sym.FullyConnected(data=net,num_hidden=2, name="fc3")
net = mx.symbol.Softmax(data=net,name="softmax")
return net
if __name__=="__main__":
matchnet=MatchNetSymbol()
matchnet_ex=matchnet.simple_bind(ctx=mx.cpu(), data1=(50,1,64,64),
data2=(50,1,64,64),softmax_label=(50,))
| apache-2.0 | 4,235,894,168,940,592,000 | 43.011236 | 105 | 0.546953 | false | 3.234249 | false | false | false |
cerealito/handyman | src/ManifestAnalizer.py | 1 | 2774 | import sys
from Manifest import Manifest
from ProjectAnalizer import ProjectAnalizer
from os.path import dirname, basename, isdir, join, realpath
__author__ = 'Samuel Flores'
class ManifestAnalizer:
def __init__(self, manifest_p, root=None):
self.manifest = manifest_p
if root:
self.root = root
else:
try:
self.root = self.locate_root()
except EnvironmentError:
print('cant find .repo dir', file=sys.stderr)
sys.exit(-1)
self.project_l = self.get_projects(manifest_p)
self.analizer_l = []
self.missing_buildfile = []
self.missing_module = []
for p in self.project_l:
analizer = ProjectAnalizer(p, self.root)
self.analizer_l.append(analizer)
def get_summary(self):
total = len(self.project_l)
complete = len(self.get_complete())
missing_file = len(self.get_missing_buildfile())
ret = 'Total projects: ' + str(total)
ret += '\nComplete: ' + str(complete)
ret += '\nWith Android.mk but incomplete: ' + str(total-complete-missing_file)
ret += '\nMissing Android.mk: ' + str(missing_file)
return ret
# TODO: optimize this
def get_missing_buildfile(self):
ret = []
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if not pa.buildFile:
ret.append(pa)
return ret
def get_incomplete(self):
ret = []
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if not pa.module_name and pa.buildFile:
ret.append(pa)
return ret
def get_complete(self):
ret = []
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if pa.module_name:
ret.append(pa)
return ret
def get_analizer_for_project(self, project_name):
ret = None
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if pa.project.name == project_name:
ret = pa
break
return ret
# look for ".repo" in parent directory
# thanks to autopatch
def locate_root(self):
lookdir = realpath(dirname(self.manifest))
while not isdir(join(lookdir, '.repo')):
newlookdir = dirname(lookdir)
if lookdir == newlookdir:
# .repo not found
raise EnvironmentError
lookdir = newlookdir
print('found repo top at', lookdir)
return lookdir
def get_projects(self, manifest_p):
m = Manifest(manifest_p)
return m.proj_list | gpl-2.0 | -1,360,184,337,938,270,200 | 28.210526 | 86 | 0.562365 | false | 3.912553 | false | false | false |
dopplershift/siphon | siphon/simplewebservice/wyoming.py | 1 | 4132 | # Copyright (c) 2013-2015 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Read upper air data from the Wyoming archives."""
from io import StringIO
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from .._tools import get_wind_components
from ..http_util import HTTPEndPoint
class WyomingUpperAir(HTTPEndPoint):
"""Download and parse data from the University of Wyoming's upper air archive."""
def __init__(self):
"""Set up endpoint."""
super(WyomingUpperAir, self).__init__('http://weather.uwyo.edu/cgi-bin/sounding')
@classmethod
def request_data(cls, time, site_id, **kwargs):
r"""Retrieve upper air observations from the Wyoming archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, site_id, **kwargs)
return df
def _get_data(self, time, site_id, region='naconf'):
r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
region
Region to request data from
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
raw_data = self._get_data_raw(time, site_id, region)
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(raw_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot'}
return df
def _get_data_raw(self, time, site_id, region='naconf'):
"""Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
region : str
The region in which the station resides. Defaults to `naconf`.
Returns
-------
a file-like object from which to read the data
"""
path = ('?region={region}&TYPE=TEXT%3ALIST'
'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'
'&STNM={stid}').format(region=region, time=time, stid=site_id)
resp = self.get_path(path)
# See if the return is valid, but has no data
if resp.text.find('Can\'t') != -1:
raise ValueError(
'No data available for {time:%Y-%m-%d %HZ} from region {region} '
'for station {stid}.'.format(time=time, region=region,
stid=site_id))
soup = BeautifulSoup(resp.text, 'html.parser')
return StringIO(soup.find_all('pre')[0].contents[0])
| mit | 3,074,026,012,844,382,000 | 34.016949 | 91 | 0.553001 | false | 4.152764 | false | false | false |
smallyear/linuxLearn | salt/salt/states/pkgbuild.py | 1 | 9224 | # -*- coding: utf-8 -*-
'''
The pkgbuild state is the front of Salt package building backend. It
automatically
.. versionadded:: 2015.8.0
.. code-block:: yaml
salt_2015.5.2:
pkgbuild.built:
- runas: thatch
- results:
- salt-2015.5.2-2.el7.centos.noarch.rpm
- salt-api-2015.5.2-2.el7.centos.noarch.rpm
- salt-cloud-2015.5.2-2.el7.centos.noarch.rpm
- salt-master-2015.5.2-2.el7.centos.noarch.rpm
- salt-minion-2015.5.2-2.el7.centos.noarch.rpm
- salt-ssh-2015.5.2-2.el7.centos.noarch.rpm
- salt-syndic-2015.5.2-2.el7.centos.noarch.rpm
- dest_dir: /tmp/pkg
- spec: salt://pkg/salt/spec/salt.spec
- template: jinja
- deps:
- salt://pkg/salt/sources/required_dependency.rpm
- tgt: epel-7-x86_64
- sources:
- salt://pkg/salt/sources/logrotate.salt
- salt://pkg/salt/sources/README.fedora
- salt://pkg/salt/sources/salt-2015.5.2.tar.gz
- salt://pkg/salt/sources/salt-2015.5.2-tests.patch
- salt://pkg/salt/sources/salt-api
- salt://pkg/salt/sources/salt-api.service
- salt://pkg/salt/sources/salt-master
- salt://pkg/salt/sources/salt-master.service
- salt://pkg/salt/sources/salt-minion
- salt://pkg/salt/sources/salt-minion.service
- salt://pkg/salt/sources/saltpkg.sls
- salt://pkg/salt/sources/salt-syndic
- salt://pkg/salt/sources/salt-syndic.service
- salt://pkg/salt/sources/SaltTesting-2015.5.8.tar.gz
/tmp/pkg:
pkgbuild.repo
'''
# Import python libs
from __future__ import absolute_import, print_function
import errno
import logging
import os
# Import salt libs
import salt.utils
from salt.ext import six
log = logging.getLogger(__name__)
def _get_missing_results(results, dest_dir):
'''
Return a list of the filenames specified in the ``results`` argument, which
are not present in the dest_dir.
'''
try:
present = set(os.listdir(dest_dir))
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug(
'pkgbuild.built: dest_dir \'{0}\' does not exist'
.format(dest_dir)
)
elif exc.errno == errno.EACCES:
log.error(
'pkgbuilt.built: cannot access dest_dir \'{0}\''
.format(dest_dir)
)
present = set()
return sorted(set(results).difference(present))
def built(name,
runas,
dest_dir,
spec,
sources,
tgt,
template=None,
deps=None,
env=None,
results=None,
force=False,
always=None,
saltenv='base',
log_dir='/var/log/salt/pkgbuild'):
'''
Ensure that the named package is built and exists in the named directory
name
The name to track the build, the name value is otherwise unused
runas
The user to run the build process as
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
tgt
The target platform to run the build on
template
Run the spec file through a templating engine
.. versionchanged:: 2015.8.2
This argument is now optional, allowing for no templating engine to
be used if none is desired.
deps
Packages required to ensure that the named package is built
can be hosted on either the salt master server or on an HTTP
or FTP server. Both HTTPS and HTTP are supported as well as
downloading directly from Amazon S3 compatible URLs with both
pre-configured and automatic IAM credentials
env
A dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
results
The names of the expected rpms that will be built
force : False
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. versionadded:: 2015.8.2
always
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. deprecated:: 2015.8.2
Use ``force`` instead.
saltenv
The saltenv to use for files downloaded from the salt filesever
log_dir : /var/log/salt/rpmbuild
Root directory for log files created from the build. Logs will be
organized by package name, version, OS release, and CPU architecture
under this directory.
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if always is not None:
salt.utils.warn_until(
'Carbon',
'The \'always\' argument to the pkgbuild.built state has been '
'deprecated, please use \'force\' instead.'
)
force = always
if not results:
ret['comment'] = '\'results\' argument is required'
ret['result'] = False
return ret
if isinstance(results, six.string_types):
results = results.split(',')
needed = _get_missing_results(results, dest_dir)
if not force and not needed:
ret['comment'] = 'All needed packages exist'
return ret
if __opts__['test']:
ret['result'] = None
if force:
ret['comment'] = 'Packages will be force-built'
else:
ret['comment'] = 'The following packages need to be built: '
ret['comment'] += ', '.join(needed)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
ret['result'] = False
return ret
ret['changes'] = __salt__['pkgbuild.build'](
runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv,
log_dir)
needed = _get_missing_results(results, dest_dir)
if needed:
ret['comment'] = 'The following packages were not built: '
ret['comment'] += ', '.join(needed)
ret['result'] = False
else:
ret['comment'] = 'All needed packages were built'
return ret
def repo(name, keyid=None, env=None):
'''
Make a package repository, the name is directoty to turn into a repo.
This state is best used with onchanges linked to your package building
states
name
The directory to find packages that will be in the repository
keyid
Optional Key ID to use in signing repository
env
A dictionary of environment variables to be utlilized in creating the repository.
Example:
.. code-block:: yaml
- env:
OPTIONS: 'ask-passphrase'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Use of OPTIONS on some platforms, for example: ask-passphrase, will
require gpg-agent or similar to cache passphrases.
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Package repo at {0} will be rebuilt'.format(name)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
return ret
__salt__['pkgbuild.make_repo'](name, keyid, env)
ret['changes'] = {'refresh': True}
return ret
| apache-2.0 | -3,610,073,035,797,008,400 | 30.697595 | 89 | 0.582502 | false | 4.047389 | false | false | false |
project-chip/connectedhomeip | scripts/tools/zap_regen_all.py | 1 | 2408 | #!/usr/bin/env python3
#
# Copyright (c) 2020 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pathlib import Path
import sys
import subprocess
CHIP_ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..'))
def checkPythonVersion():
if sys.version_info[0] < 3:
print('Must use Python 3. Current version is ' + str(sys.version_info[0]))
exit(1)
def getGlobalTemplatesTargets():
targets = []
targets.extend([[str(filepath)] for filepath in Path('./examples').rglob('*.zap')])
targets.extend([[str(filepath)] for filepath in Path('./src/darwin').rglob('*.zap')])
targets.extend([[str(filepath)] for filepath in Path('./src/controller/data_model').rglob('*.zap')])
return targets
def getSpecificTemplatesTargets():
targets = []
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/app/common/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'examples/chip-tool/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/controller/python/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/darwin/Framework/CHIP/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/controller/java/templates/templates.json'])
return targets
def getTargets():
targets = []
targets.extend(getGlobalTemplatesTargets())
targets.extend(getSpecificTemplatesTargets())
return targets
def main():
checkPythonVersion()
os.chdir(CHIP_ROOT_DIR)
targets = getTargets()
for target in targets:
subprocess.check_call(['./scripts/tools/zap/generate.py'] + target)
if __name__ == '__main__':
main()
| apache-2.0 | 7,816,920,460,108,642,000 | 38.47541 | 133 | 0.699751 | false | 3.750779 | false | false | false |
PyORBIT-Collaboration/py-orbit | py/orbit/parsers/field_parser.py | 2 | 6787 | import os
import sys
import re
import math
from spacecharge import Grid3D
class Field_Parser3D:
""" 3D field parser """
def __init__(self):
""" Create instance of the Field_Parser3D class """
self.__lines = []
def __del__(self):
del self.__lines
###########################################################################
#Gets the limits of the file being parsed
###########################################################################
def getLimits(self,filename):
infile = open(filename,"r")
# Resize the grid so more files can be processed
numLines = 0
xmin,xmax,ymin,ymax,zmin,zmax = 0,0,0,0,0,0
for line in infile.readlines():
splitline = line.split()
value = map(float, splitline)
# finding minimums and maximums of data to compute range
xmax = max(xmax,value[0])
ymax = max(ymax,value[1])
zmax = max(zmax,value[2])
xmin = min(xmin,value[0])
ymin = min(ymin,value[1])
zmin = min(zmin,value[2])
numLines += 1
xmax = max(xmax,0)
ymax = max(ymax,0)
zmax = max(zmax,0)
xmin = min(xmin,0)
ymin = min(ymin,0)
zmin = min(zmin,0)
print "Min and Max values: " , xmin, xmax, ymin, ymax, zmin, zmax, numLines, "\n"
limits = [xmin, xmax, ymin, ymax, zmin, zmax, numLines]
return limits
###########################################################################
#Limits is a list of format [xmin,xmax,ymin,ymax,zmin,zmax]
###########################################################################
def getRange(self,limits):
xmin = limits[0]
xmax = limits[1]
ymin = limits[2]
ymax = limits[3]
zmin = limits[4]
zmax = limits[5]
Xrange = xmax-xmin
Yrange = ymax-ymin
Zrange = zmax-zmin
range = [Xrange,Yrange,Zrange]
print "Range of X,Y,Z values in data: ", Xrange, " ", Yrange, " ", Zrange
return range
###############################################################################
# gets the gridsize give the range of each variable
# and the step of each variable
##############################################################################
def getGridSize(self,range, step, usrLimits):
for i in xrange(3):
range[i] = range[i]*1.0/step[i]
gridSize = [range[0]+1,range[1]+1, range[2]+1]
xrnge = usrLimits[1] - usrLimits[0]
yrnge = usrLimits[3] - usrLimits[2]
zrnge = usrLimits[5] - usrLimits[4]
usrRange = [xrnge,yrnge,zrnge]
for i in xrange(3):
usrRange[i] = (usrRange[i]*1.0/step[i]) + 1
for i in xrange(3):
if(usrRange[i]<gridSize[i]):
gridSize[i] = usrRange[i]
gridSize = map(int,gridSize)
print "Grid Size [x,y,z]: " , gridSize
return gridSize
###############################################################################
#Returns the coordinates in the grid given the rawNumbers
#and the limits of each variable.
##############################################################################
def getCoordinates(self, gridSize, step,rawNumbers, limits):
coordinates = [rawNumbers[0] ,rawNumbers[1],rawNumbers[2]]
for i in xrange(len(coordinates)):
coordinates[i] = coordinates[i]*(1.0/step[i])
coordinates[i] = coordinates[i]-limits[2*i]/step[i]
coordinates = map(int, coordinates)
return coordinates
#######################################################################
# Checks to see if the given coordinates are within the range specified
#######################################################################
def checkLimits(self, arrayLimits, value):
if(value[0] >= arrayLimits[0] and
value[0] <= arrayLimits[1]):
if(value[1] >= arrayLimits[2] and
value[1] <= arrayLimits[3]):
if(value[2] >= arrayLimits[4] and
value[2] <= arrayLimits[5]):
return True
else:
return False
##########################################################################
#Checks to see if the point is on the grid given the current step
##########################################################################
def checkGrid(self,step,value):
localStep = [0,0,0]
localValue = [0,0,0]
for i in xrange(3):
localStep[i] = 2*step[i]
localValue[i] = 2*value[i]
map(int, localStep)
map(int, localValue)
for i in xrange(3):
if(value[i]%step[i] != 0):
return False
else:
return True
###############################################################################
# Parameters
# filename: name of the text file to be processed
# xmin,xmax,ymin,ymax,zmin,zmax - user defined limits for the file being parsed
# xstep,ystep,ztep - the step size for the parsing. (0.5 parses 0.0,0.5,1.0,1.5 etc.
# while a 1.0 value will parse 0.0,1.0,2.0, etc.
# All Grid sizes are user defined.
###############################################################################
def parse(self, filename, xmin,xmax,ymin,ymax,zmin,zmax,xstep,ystep,zstep):
usrLimits = [xmin,xmax,ymin,ymax,zmin,zmax]
limits = self.getLimits(filename)
range = self.getRange(limits)
step = [xstep,ystep,zstep]
#Computes the size of the grid given the user limits and the step
gridSize = self.getGridSize(range, step, usrLimits)
numLines = limits[6]
print "Number of lines in the file: ",numLines , "\n"
#for now we will say that the size of the grid encompasses all datapoints
print "GridSize " , gridSize[0],gridSize[1],gridSize[2]
BXGrid = Grid3D(gridSize[0],gridSize[1],gridSize[2])
BYGrid = Grid3D(gridSize[0],gridSize[1],gridSize[2])
BZGrid = Grid3D(gridSize[0],gridSize[1],gridSize[2])
fieldgrid3DMag = Grid3D(gridSize[0],gridSize[1],gridSize[2])
XGrid = []
YGrid = []
ZGrid = []
# Maps values from file to grid.
infile1 = open(filename,"r")
for line in infile1.readlines():
splitLine = line.split()
rawNumbers = map(float, splitLine)
# Maps data points to integers so that they can be evaluated for stepsize
testRS = map(int, rawNumbers)
if(self.checkGrid(step,rawNumbers) and
self.checkLimits(usrLimits,rawNumbers)
):
coordinates = self.getCoordinates(gridSize,step,rawNumbers, usrLimits)
XGrid.append(rawNumbers[0]/100.0)
YGrid.append(rawNumbers[1]/100.0)
ZGrid.append(rawNumbers[2]/100.0)
BXGrid.setValue(rawNumbers[3]/10000.0, coordinates[0], coordinates[1], coordinates[2])
BYGrid.setValue(rawNumbers[4]/10000.0, coordinates[0], coordinates[1], coordinates[2])
BZGrid.setValue(rawNumbers[5]/10000.0, coordinates[0], coordinates[1], coordinates[2])
getMag = ((rawNumbers[3]**2.0+rawNumbers[4]**2.0+rawNumbers[5]**2.0)**0.5)/10000.0
fieldgrid3DMag.setValue(getMag, coordinates[0], coordinates[1], coordinates[2])
MagList = [BXGrid,BYGrid,BZGrid,fieldgrid3DMag,XGrid,YGrid,ZGrid]
return MagList
| mit | -1,396,745,432,312,111,900 | 30.425926 | 96 | 0.552822 | false | 3.195386 | false | false | false |
shelari/prereform_to_contemporary | dict_settings.py | 1 | 1154 | # -*- coding: utf-8 -*-
__author__ = 'ElenaSidorova'
import codecs
dicts = [
'freq.csv',
'ozhegov.csv',
'sharov.csv',
'ushakov.csv',
'zaliznyak.csv'
]
wrong_dicts = [
'adjectives_ija_ends.csv',
'perfect_verbs.csv'
]
ok_dicts = [
'soft_sign_ends.csv'
]
prefixes = [
'all_prefixes.csv'
]
class DictLoader(object):
@classmethod
def load_dicts(cls):
named_dicts = cls.read_data(dicts)
wrong = cls.read_data(wrong_dicts)
ok = cls.read_data(ok_dicts)
all_prefixes = cls.read_data(prefixes)
dict_settings = [named_dicts, wrong, ok, all_prefixes]
return dict_settings
@classmethod
def read_data(cls, arr):
result = []
for d in arr:
with codecs.open(d, 'r', 'utf-8') as inf:
data = inf.read().strip()
data = data.replace(u'\r', u'').lower().split(u'\n')
if 'freq' in d:
new = []
for el in data:
new.append(el.split()[0])
data = new
result.append(data)
return result
# a = DictLoader()
# b = a.load_dicts() | mit | -1,664,396,728,455,042,600 | 23.574468 | 64 | 0.520797 | false | 3.196676 | false | false | false |
jrising/open-estimate | models/spline_model.py | 1 | 32483 | # -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, The Open Aggregator
# GNU General Public License, Ver. 3 (see docs/license.txt)
################################################################################
"""Model Spline File
Each line in a model spline file represents a polynomial segment in
log-probability space. The format is as follows::
spp1
<x>,<y0>,<y1>,<a0>,<a1>,<a2>
...
Each line describes a segment of a probability distribution of y,
conditional on x = ``<x>``. The segment spans from ``<y0>`` to
``<y1>``, where the lowest value of ``<y0>`` may be ``-inf``, and the
highest value of ``<y1>`` may be ``inf``. The ``<x>`` values may also
be categorical or numerical. If they are numerical, it is assumed
that these values represent samples of a smoothly varying function (a
cubic spline in every y).
The values ``<a0>``, ``<a1>`` and ``<a2>`` are the polynomial
coefficients in y (with quadratic coefficients, only normal or
exponential tails are possible). The final segment of the probability
function is:
exp(a0 + a1 y + a2 y2)
"""
__copyright__ = "Copyright 2014, The Open Aggregator"
__license__ = "GPL"
__author__ = "James Rising"
__credits__ = ["James Rising", "Solomon Hsiang", "Bob Kopp"]
__maintainer__ = "James Rising"
__email__ = "[email protected]"
__status__ = "Production"
__version__ = "$Revision$"
# $Source$
import csv, math, string, random, traceback
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.stats import norm
from scipy.special import erf
from model import Model
from univariate_model import UnivariateModel
from memoizable import MemoizableUnivariate
class SplineModel(UnivariateModel, MemoizableUnivariate):
posinf = float('inf')
neginf = float('-inf')
samples = 1000
def __init__(self, xx_is_categorical=False, xx=None, conditionals=None, scaled=True):
super(SplineModel, self).__init__(xx_is_categorical, xx, scaled)
if conditionals is not None:
self.conditionals = conditionals
else:
self.conditionals = []
def __repr__(self):
return "Spline model"
def kind(self):
return 'spline_model'
def copy(self):
conditionals = []
for conditional in self.conditionals:
conditionals.append(conditional.copy())
return SplineModel(self.xx_is_categorical, list(self.get_xx()), conditionals, scaled=self.scaled)
def get_xx(self):
if self.xx_is_categorical:
return self.xx_text
else:
return self.xx
def eval_pval(self, x, p, threshold=1e-3):
conditional = self.get_conditional(x)
return conditional.get_pval(p, threshold)
def scale_y(self, a):
for conditional in self.conditionals:
conditional.scale_y(a)
if self.scaled:
conditional.rescale()
return self
def scale_p(self, a):
for conditional in self.conditionals:
conditional.scale_p(a)
if self.scaled:
conditional.rescale()
return self
def filter_x(self, xx):
conditionals = []
for x in xx:
conditionals.append(self.get_conditional(x))
return SplineModel(self.xx_is_categorical, xx, conditionals, scaled=self.scaled)
def interpolate_x(self, newxx):
# Is this a subset of our values?
subset = True
for x in newxx:
if x not in self.get_xx():
subset = False
if subset:
return self.filter_x(newxx)
#(limits, ys) = SplineModelConditional.propose_grid(self.conditionals)
#ddp_model = self.to_ddp(ys).interpolate_x(newxx)
#return SplineModel.from_ddp(ddp_model, limits)
conditionals = []
for x in newxx:
conditionals.append(self.get_conditional(x).copy())
return SplineModel(self.xx_is_categorical, newxx, conditionals, True)
# Only for categorical models
def recategorize_x(self, oldxx, newxx):
"""Construct a new model with categorical x values 'newxx', using the conditionals currently assigned to categorical x values 'oldxx'."""
conditionals = []
for ii in range(len(oldxx)):
if oldxx[ii] == -1 or (not isinstance(oldxx[ii], str) and not isinstance(oldxx[ii], unicode) and np.isnan(oldxx[ii])): # Not available
conditionals.append(SplineModelConditional.make_gaussian(-np.inf, np.inf, np.nan, np.nan))
else:
conditionals.append(self.get_conditional(oldxx[ii]))
return SplineModel(True, newxx, conditionals, scaled=self.scaled)
def add_conditional(self, x, conditional):
if not self.xx_is_categorical:
try:
self.xx.append(float(x))
self.xx_text.append(str(x))
except ValueError:
self.xx_is_categorical = True
if self.xx_is_categorical:
self.xx_text.append(x)
self.xx.append(len(self.xx))
self.conditionals.append(conditional)
def get_conditional(self, x):
if x is None or x == '' or len(self.conditionals) == 1:
return self.conditionals[0]
try:
return self.conditionals[self.xx_text.index(str(x))]
except Exception as e:
return SplineModelConditional.find_nearest(self.xx, x, self.conditionals)
def write_file(self, filename, delimiter):
with open(filename, 'w') as fp:
self.write(fp, delimiter)
def write(self, file, delimiter):
if self.scaled:
file.write("spp1\n")
else:
file.write("spv1\n")
writer = csv.writer(file, delimiter=delimiter)
for ii in range(len(self.xx)):
for jj in range(len(self.conditionals[ii].y0s)):
row = [self.xx_text[ii], self.conditionals[ii].y0s[jj], self.conditionals[ii].y1s[jj]]
row.extend(self.conditionals[ii].coeffs[jj])
writer.writerow(row)
def write_gaussian(self, file, delimiter):
writer = csv.writer(file, delimiter=delimiter)
writer.writerow(['dpc1', 'mean', 'sdev'])
for ii in range(len(self.xx)):
for jj in range(len(self.conditionals[ii].y0s)):
if len(self.conditionals[ii].coeffs[jj]) == 1 and self.conditionals[ii].coeffs[jj][0] == SplineModel.neginf:
continue
elif len(self.conditionals[ii].coeffs[jj]) == 3:
writer.writerow([self.xx_text[ii], self.conditionals[ii].gaussian_mean(jj), self.conditionals[ii].gaussian_sdev(jj)])
else:
writer.writerow([self.xx_text[ii], None, None])
def write_gaussian_plus(self, file, delimiter):
writer = csv.writer(file, delimiter=delimiter)
writer.writerow(['dpc1', 'y0', 'y1', 'mean', 'sdev'])
for ii in range(len(self.xx)):
for jj in range(len(self.conditionals[ii].y0s)):
if len(self.conditionals[ii].coeffs[jj]) == 1 and self.conditionals[ii].coeffs[jj][0] == SplineModel.neginf:
continue
elif len(self.conditionals[ii].coeffs[jj]) == 3:
writer.writerow([self.xx_text[ii], self.conditionals[ii].y0s[jj], self.conditionals[ii].y1s[jj], self.conditionals[ii].gaussian_mean(jj), self.conditionals[ii].gaussian_sdev(jj)])
else:
writer.writerow([self.xx_text[ii], self.conditionals[ii].y0s[jj], self.conditionals[ii].y1s[jj], None, None])
def to_points_at(self, x, ys):
conditional = self.get_conditional(x)
return conditional.to_points(ys)
def cdf(self, xx, yy):
conditional = self.get_conditional(xx)
return conditional.cdf(yy)
def is_gaussian(self, x=None):
conditional = self.get_conditional(x)
return len(conditional.y0s) == 1 and len(conditional.coeffs[0]) == 3
def get_mean(self, x=None):
if not isinstance(x, str) and not isinstance(x, unicode) and np.isnan(x):
return np.nan
conditional = self.get_conditional(x)
if conditional.is_gaussian():
return conditional.gaussian_mean(0)
total = 0
for ii in range(conditional.size()):
total += conditional.nongaussian_xpx(ii)
return total
def get_sdev(self, x=None):
conditional = self.get_conditional(x)
if conditional.is_gaussian():
return conditional.gaussian_sdev(0)
total = 0
for ii in range(conditional.size()):
total += conditional.nongaussian_x2px(ii)
mean = self.get_mean(x)
return math.sqrt(total - mean**2)
def draw_sample(self, x=None):
conditional = self.get_conditional(x)
return conditional.draw_sample()
def init_from_spline_file(self, file, delimiter, status_callback=None):
line = string.strip(file.readline())
if line == "spp1":
self.scaled = True
elif line == 'spv1':
self.scaled = False
else:
raise ValueError("Unknown format: %s" % (line))
self.xx = []
self.xx_text = []
self.xx_is_categorical = False
self.conditionals = []
reader = csv.reader(file, delimiter=delimiter)
x = None
conditional = None
for row in reader:
if row[0] != x:
x = row[0]
conditional = SplineModelConditional()
self.add_conditional(x, conditional)
conditional.add_segment(float(row[1]), float(row[2]), map(float, row[3:]))
if status_callback:
status_callback("Parsing...", reader.line_num / (reader.line_num + 3.0))
if self.scaled:
for conditional in self.conditionals:
conditional.rescale()
return self
def to_ddp(self, ys=None):
if ys is None:
(limits, ys) = SplineModelConditional.propose_grid(self.conditionals)
pp = np.ones((len(self.xx), len(ys)))
for ii in range(len(self.xx)):
pp[ii,] = self.to_points_at(self.xx[ii], ys)
return DDPModel('ddp1', 'spline_model', self.xx_is_categorical, self.get_xx(), False, ys, pp, scaled=self.scaled)
### Memoizable
def eval_pval_index(self, ii, p, threshold=1e-3):
return self.conditionals[ii].get_pval(p, threshold)
### Class Methods
@staticmethod
def create_single(xxs, y0s, y1s, coeffss, order=None, xx_is_categorical=True):
conditionals = []
xx = []
for key in (xxs if order is None else order):
xx.append(key)
conditional = SplineModelConditional.make_single(y0s[key], y1s[key], coeffss[key])
conditionals.append(conditional)
return SplineModel(xx_is_categorical, xx, conditionals, True)
@staticmethod
def create_gaussian(xxs, order=None, xx_is_categorical=True):
"""xxs should be a dictionary of the form {x: (mean, variance)}."""
conditionals = []
xx = []
for key in (xxs if order is None else order):
xx.append(key)
mean = float(xxs[key][0])
var = float(xxs[key][1])
conditional = SplineModelConditional.make_gaussian(SplineModel.neginf, SplineModel.posinf, mean, var)
conditionals.append(conditional)
return SplineModel(xx_is_categorical, xx, conditionals, True)
@staticmethod
def from_ddp(ddp_model, limits):
lps = ddp_model.log_p()
conditionals = []
xx = []
for ii in range(len(ddp_model.xx)):
lp = lps[ii,]
updown = np.concatenate((np.linspace(-1000, -900, np.floor(len(lp)/2)), np.linspace(-900, -1000, np.ceil(len(lp)/2))))
lp[lp == SplineModel.neginf] = updown[lp == SplineModel.neginf]
spline = UnivariateSpline(ddp_model.yy, lp, k=2)
try:
conditionals.append(SplineModelConditional.make_conditional_from_spline(spline, limits).rescale())
xx.append(ddp_model.get_xx()[ii])
except Exception as e:
print e
print traceback.print_exc()
return SplineModel(ddp_model.xx_is_categorical, xx, conditionals, True)
@staticmethod
def merge(models):
for model in models:
if not model.scaled:
raise ValueError("Only scaled distributions can be merged.")
(models, xx) = UnivariateModel.intersect_x_all(models)
model = SplineModel()
for ii in range(len(xx)):
conditional = SplineModelConditional()
y0 = SplineModel.neginf
# Loop through each segment
while y0 != SplineModel.posinf:
y1 = SplineModel.posinf
coeffs = np.zeros(3)
for jj in range(len(models)):
modcond = models[jj].get_conditional(xx[ii])
for kk in range(len(modcond.y0s)):
if modcond.y0s[kk] <= y0 and modcond.y1s[kk] > y0:
if modcond.y1s[kk] < y1:
y1 = modcond.y1s[kk]
if np.all(np.isfinite(modcond.coeffs[kk])): # Ignore NA and Inf
coeffs[0:len(modcond.coeffs[kk])] = np.array(coeffs[0:len(modcond.coeffs[kk])]) + np.array(modcond.coeffs[kk])
while len(coeffs) > 0 and coeffs[-1] == 0:
coeffs = coeffs[0:-1]
conditional.add_segment(y0, y1, coeffs)
y0 = y1
model.add_conditional(xx[ii], conditional.rescale())
return model
@staticmethod
def combine(one, two):
if one.xx_is_categorical != two.xx_is_categorical:
raise ValueError("Cannot combine models that do not agree on categoricity")
if not one.scaled or not two.scaled:
raise ValueError("Cannot combine unscaled models")
(one, two, xx) = UnivariateModel.intersect_x(one, two)
conditionals = []
for ii in range(len(xx)):
conditionals.append(one.get_conditional(xx[ii]).convolve(two.get_conditional(xx[ii])).rescale())
return SplineModel(one.xx_is_categorical, xx, conditionals, True)
class SplineModelConditional():
# coeffs is ordered low-order to high-order
def __init__(self, y0s=None, y1s=None, coeffs=None):
if y0s is None:
self.y0s = np.array([])
self.y1s = np.array([])
self.coeffs = []
else:
self.y0s = np.array(y0s)
self.y1s = np.array(y1s)
self.coeffs = coeffs
def size(self):
return len(self.y0s)
def copy(self):
return SplineModelConditional(list(self.y0s), list(self.y1s), list(self.coeffs))
# Does not maintain scaling
def scale_y(self, a):
for ii in range(self.size()):
self.y0s[ii] *= a
self.y1s[ii] *= a
if len(self.coeffs[ii]) > 1:
self.coeffs[ii][1] /= a
if len(self.coeffs[ii]) > 2:
self.coeffs[ii][2] /= a*a
# Does not maintain scaling
def scale_p(self, a):
for ii in range(self.size()):
self.coeffs[ii] = map(lambda c: a*c, self.coeffs[ii])
# Does not check for overlapping segments
def add_segment(self, y0, y1, coeffs):
self.y0s = np.append(self.y0s, [y0])
self.y1s = np.append(self.y1s, [y1])
self.coeffs.append(coeffs)
indexes = np.argsort(self.y0s)
if indexes[-1] != len(self.y0s) - 1:
self.y0s = self.y0s[indexes]
self.y1s = self.y1s[indexes]
self.coeffs = [self.coeffs[index] for index in indexes]
# Note: after calling, need to set scaled on SplineModel object
def rescale(self):
integral = self.cdf(SplineModel.posinf)
if not np.isnan(integral) and integral > 0:
self.scale(1 / integral)
return self
def scale(self, factor):
if factor == 0:
self.y0s = [SplineModel.neginf]
self.y1s = [SplineModel.posinf]
self.coeffs = [[SplineModel.neginf]]
else:
for ii in range(self.size()):
self.coeffs[ii][0] = self.coeffs[ii][0] + math.log(factor)
# Similar to to_points
def evaluate(self, ii, y):
if y == SplineModel.neginf or y == SplineModel.posinf:
return 0
return np.exp(np.polyval(self.coeffs[ii][::-1], y))
# Similar to evaluate
def to_points(self, ys):
result = np.array(ys) * 0
for ii in range(len(self.y0s)):
valid = np.logical_and(ys >= self.y0s[ii], ys <= self.y1s[ii])
result[valid] = np.exp(np.polyval(self.coeffs[ii][::-1], ys[valid]))
return result
def partial_cdf(self, ii, y1):
if len(self.coeffs[ii]) == 0:
return np.nan
if len(self.coeffs[ii]) == 1:
if self.coeffs[ii][0] == SplineModel.neginf:
return 0
return np.exp(self.coeffs[ii][0]) * (y1 - self.y0s[ii])
elif len(self.coeffs[ii]) == 2:
return (np.exp(self.coeffs[ii][0]) / self.coeffs[ii][1]) * (np.exp(self.coeffs[ii][1] * y1) - np.exp(self.coeffs[ii][1] * self.y0s[ii]))
elif self.coeffs[ii][2] > 0:
if self.y0s[ii] == SplineModel.neginf or self.y1s[ii] == SplineModel.posinf:
raise ValueError("Improper area of spline")
myys = np.linspace(self.y0s[ii], y1, SplineModel.samples)
return sum(np.exp(np.polyval(self.coeffs[ii][::-1], myys))) * (y1 - self.y0s[ii]) / SplineModel.samples
else:
var = -.5 / self.coeffs[ii][2]
mean = self.coeffs[ii][1] * var
if np.isnan(mean) or np.isnan(var) or var <= 0:
return 0
exponent = self.coeffs[ii][0] - (-mean*mean / (2*var) + math.log(1 / math.sqrt(2*math.pi*var)))
if exponent > 100:
# math domain error!
return 0
rescale = math.exp(exponent)
below = 0
if float(self.y0s[ii]) != SplineModel.neginf:
below = norm.cdf(float(self.y0s[ii]), loc=mean, scale=math.sqrt(var))
if exponent > 20 and float(self.y0s[ii]) != SplineModel.neginf and float(self.y1s[ii]) != SplineModel.neginf and y1 != SplineModel.posinf:
# approaching math domain error: assume constant
total = rescale * (norm.cdf(self.y1s[ii], loc=mean, scale=math.sqrt(var)) - below)
return total * (y1 - self.y0s[ii]) / (self.y1s[ii] - self.y0s[ii])
return rescale * (norm.cdf(y1, loc=mean, scale=math.sqrt(var)) - below)
def cdf(self, yy):
integral = 0
for ii in range(len(self.y0s)):
if self.y1s[ii] >= yy:
y1 = yy
else:
y1 = self.y1s[ii]
integral += self.partial_cdf(ii, y1)
if self.y1s[ii] >= yy:
break
return integral
def draw_sample(self):
value = random.random()
return self.get_pval(value)
def get_pval(self, p, threshold=1e-3):
# Use finer thresholds later on
if p < .1:
threshold = threshold * p * 10
elif p > .9:
threshold = threshold * (1 - p) * 10
# First figure out which spline p is in
integral = 0
for ii in range(len(self.y0s)):
if ii == len(self.y0s) - 1:
break # this will bring us to 1
partial = self.partial_cdf(ii, self.y1s[ii])
if integral + partial > p:
break
integral += partial
y = SplineModelConditional.ascinv(p - integral, lambda y: self.partial_cdf(ii, y), self.y0s[ii], self.y1s[ii], threshold)
if np.isnan(y):
# Let's just give back some value
if self.y0s[0] < 0 and self.y1s[len(self.y1s)-1] > 0:
y = 0
else:
y = (self.y0s[0] + self.y1s[len(self.y1s)-1]) / 2
return y
# find the x for a given y of an ascending function
# copied from math.js
@staticmethod
def ascinv(y, func, minx, maxx, threshold):
tries = 0
while tries < 10000:
tries += 1
if (minx == SplineModel.neginf and maxx == SplineModel.posinf) or (minx == SplineModel.neginf and maxx > 0) or (minx < 0 and maxx == SplineModel.posinf):
midpoint = 0
elif minx == SplineModel.neginf:
midpoint = (maxx - 1.0) * 2
elif maxx == SplineModel.posinf:
midpoint = (minx + 1.0) * 2
else:
midpoint = (minx + maxx) / 2.0
error = func(midpoint) - y
if abs(error) < threshold:
return midpoint
elif np.isnan(error):
return np.nan
elif error > 0:
maxx = midpoint
elif error < 0:
minx = midpoint
return np.nan
def approximate_mean(self, limits):
rough_limits = self.rough_limits()
limits = (max(float(limits[0]), rough_limits[0]), min(float(limits[1]), rough_limits[1]))
ys = np.linspace(limits[0], limits[1], self.size() * SplineModel.samples)
ps = self.to_points(ys)
ps = ps / sum(ps)
return sum(ps * ys)
# Allow true gaussian or delta
def is_gaussian(self):
return len(self.y0s) == 1 and (len(self.coeffs[0]) == 3 or len(self.coeffs[0]) == 0)
def gaussian_sdev(self, ii):
if len(self.coeffs[ii]) == 0:
return 0
if self.coeffs[ii][2] == 0:
return np.inf
return 1/math.sqrt(-2*self.coeffs[ii][2])
def gaussian_mean(self, ii):
if len(self.coeffs[ii]) == 0:
return (self.y1s[ii] + self.y0s[ii]) / 2
if self.coeffs[ii][2] == 0:
return np.nan
return -self.coeffs[ii][1] / (2*self.coeffs[ii][2])
def nongaussian_xpx(self, ii):
a = self.coeffs[ii][2] if len(self.coeffs[ii]) > 2 else 0
b = self.coeffs[ii][1] if len(self.coeffs[ii]) > 1 else 0
c = self.coeffs[ii][0]
x0 = self.y0s[ii]
x1 = self.y1s[ii]
# From Matlab
if a == 0:
if x0 == SplineModel.neginf:
return (math.exp(c + b*x1)*(b*x1 - 1))/b**2
elif x1 == SplineModel.posinf:
return -(math.exp(c + b*x0)*(b*x0 - 1))/b**2
return (math.exp(c + b*x1)*(b*x1 - 1))/b**2 - (math.exp(c + b*x0)*(b*x0 - 1))/b**2
sqrtpi = math.pi**.5
na05 = ((-a)**.5)
na15 = ((-a)**1.5)
return (math.exp(a*x1**2 + b*x1)*math.exp(c))/(2*a) - (math.exp(a*x0**2 + b*x0)*math.exp(c))/(2*a) + (sqrtpi*b*math.exp(-b**2/(4*a))*math.exp(c)*erf((b + 2*a*x0)/(2*na05)))/(4*na15) - (sqrtpi*b*math.exp(-(b**2)/(4*a))*math.exp(c)*erf((b + 2*a*x1)/(2*na05)))/(4*na15)
def nongaussian_x2px(self, ii):
a = self.coeffs[ii][2] if len(self.coeffs[ii]) > 2 else 0
b = self.coeffs[ii][1] if len(self.coeffs[ii]) > 1 else 0
c = self.coeffs[ii][0]
x0 = self.y0s[ii]
x1 = self.y1s[ii]
# From Matlab
if a == 0:
if x0 == SplineModel.neginf:
return (math.exp(c + b*x1)*(b**2*x1**2 - 2*b*x1 + 2))/b**3
elif x1 == SplineModel.posinf:
return -(math.exp(c + b*x0)*(b**2*x0**2 - 2*b*x0 + 2))/b**3
return (math.exp(c + b*x1)*(b**2*x1**2 - 2*b*x1 + 2))/b**3 - (math.exp(c + b*x0)*(b**2*x0**2 - 2*b*x0 + 2))/b**3
sqrtpi = math.pi**.5
na05 = ((-a)**.5)
na25 = ((-a)**2.5)
na35 = ((-a)**3.5)
return (2*na25*b*math.exp(a*x0**2 + b*x0 + c) - 2*na25*b*math.exp(a*x1**2 + b*x1 + c) + 4*na35*x0*math.exp(a*x0**2 + b*x0 + c) - 4*na35*x1*math.exp(a*x1**2 + b*x1 + c) - 2*(sqrtpi)*a**3*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x0)/(2*na05)) + 2*(sqrtpi)*a**3*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x1)/(2*na05)) + (sqrtpi)*a**2*b**2*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x0)/(2*na05)) - (sqrtpi)*a**2*b**2*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x1)/(2*na05)))/(8*((-a)**4.5))
# Duplicated in models.js
def segment_max(self, jj):
maxyy = self.y0s[jj]
maxval = self.evaluate(jj, self.y0s[jj])
val = self.evaluate(jj, self.y1s[jj])
if (val > maxval):
maxval = val
maxyy = self.y1s[jj]
coeffs = self.coeffs[jj]
if len(coeffs) > 2:
yy = -coeffs[1] / (2*coeffs[2])
if yy > self.y0s[jj] and yy < self.y1s[jj]:
val = self.evaluate(jj, yy)
if val > maxval:
maxval = val
maxyy = yy
return (maxyy, maxval)
# Duplicated in models.js
# returns (yy, val)
def find_mode(self):
maxmax = (None, SplineModel.neginf)
for ii in range(self.size()):
mymax = self.segment_max(ii)
if mymax[1] > maxmax[1]:
maxmax = mymax
return maxmax
# Duplicated in models.js
def rough_span(self):
span = 0
for jj in range(self.size()):
if self.y0s[jj] == SplineModel.neginf or self.y1s[jj] == SplineModel.posinf:
if len(self.coeffs[jj]) == 3:
span += 3 / math.sqrt(abs(2*self.coeffs[jj][2]))
elif len(self.coeffs[jj]) == 2:
span += 5 / abs(self.coeffs[jj][1])
else:
span += 1 / abs(self.coeffs[jj][0]) # improper!
else:
span += self.y1s[jj] - self.y0s[jj]
return span
# Duplicated in models.js
def rough_limits(self):
limits0 = float(self.y0s[0])
limits1 = float(self.y1s[-1])
if limits0 == SplineModel.neginf or limits1 == SplineModel.posinf:
maxmax = self.find_mode()
span = self.rough_span()
if limits0 == SplineModel.neginf:
limits0 = maxmax[0] - span
if limits1 == SplineModel.posinf:
limits1 = maxmax[0] + span
return (limits0, limits1)
def convolve(self, other):
# NOTE: below is for a single segment...
# int_s e^P(s) e^Q(t - s) = int_s e^[P(s) + Q(t - s)] = int_s e^[a1 ss + b1 s + c1 + a2 (tt - 2ts + ss) + b2 t - b2 s + c2]
# int_s e^[(a1 + a2) ss + (b1 - 2t - b2) s] e^[a2 (tt) + b2 t + c1 + c2]
# Have to do approximate sum later anyway, so let's just convert to ddp
(limits, ys) = SplineModelConditional.propose_grid([self, other])
pp_self = self.to_points(ys)
pp_other = other.to_points(ys)
newpp = np.convolve(pp_self, pp_other)
newpp = newpp / sum(newpp) # Scale
yy = np.linspace(2*min(ys), 2*max(ys), 2*len(ys) - 1)
if np.any(newpp == 0):
conditional = SplineModelConditional()
# Break into many pieces
ii = 0
y0 = min(yy)
while ii == 0 or (ii < len(newpp) and newpp[ii] == 0):
if newpp[ii] == 0:
while ii < len(newpp) and newpp[ii] == 0:
ii += 1
if ii < len(newpp):
conditional.add_segment(y0, yy[ii], [SplineModel.neginf])
else:
conditional.add_segment(y0, yy[-1], [SplineModel.neginf])
break
y0 = yy[ii]
i0 = ii
while ii < len(newpp) and newpp[ii] > 0:
ii += 1
spline = UnivariateSpline(yy[i0:ii], np.log(newpp[i0:ii]), k=2, s=(ii - i0) / 1000.0)
if ii < len(newpp):
segments = SplineModelConditional.make_conditional_from_spline(spline, (y0, yy[ii]))
else:
segments = SplineModelConditional.make_conditional_from_spline(spline, (y0, yy[-1]))
for jj in range(segments.size()):
conditional.add_segment(segments.y0s[jj], segments.y1s[jj], segments.coeffs[jj])
if ii < len(newpp):
y0 = yy[ii]
else:
break
return conditional
else:
spline = UnivariateSpline(yy, np.log(newpp), k=2)
return SplineModelConditional.make_conditional_from_spline(spline, (2*limits[0], 2*limits[1]))
@staticmethod
def make_single(y0, y1, coeffs):
return SplineModelConditional(y0s=[y0], y1s=[y1], coeffs=[coeffs])
@staticmethod
def make_gaussian(y0, y1, mean, var):
return SplineModelConditional.make_single(y0, y1, [-mean*mean/(2*var) - np.log(np.sqrt(2*math.pi*var)), mean/var, -1/(2*var)])
@staticmethod
def make_conditional_from_spline(spline, limits):
conditional = SplineModelConditional()
knots = spline.get_knots()
midpoint = (knots[-1] + knots[1]) / 2
knots = sorted(knots)
knots[0] = float(limits[0])
knots[-1] = float(limits[1])
for ii in range(1, len(knots)):
if knots[ii-1] == SplineModel.neginf and knots[ii] == SplineModel.posinf:
y = midpoint
elif knots[ii-1] == SplineModel.neginf:
y = knots[ii]
elif knots[ii] == SplineModel.posinf:
y = knots[ii-1]
else:
y = (knots[ii-1] + knots[ii]) / 2
derivs = spline.derivatives(y)
a = derivs[2] / 2
b = derivs[1] - derivs[2] * y
c = derivs[0] - (a*y*y + b*y)
if a > 0 and (knots[ii-1] == SplineModel.neginf or knots[ii] == SplineModel.posinf):
conditional.add_segment(knots[ii-1], knots[ii], [SplineModel.neginf]) # This segment failed!
else:
conditional.add_segment(knots[ii-1], knots[ii], [c, b, a])
return conditional
@staticmethod
def find_nearest(array, value, within):
if isinstance(value, str) or isinstance(value, unicode):
try:
value = int(value)
except:
raise ValueError("Cannot apply find_nearest to categorical values.")
idx = (np.abs(np.array(array)-value)).argmin()
return within[idx]
@staticmethod
def approximate_sum(conditionals):
if len(conditionals) == 1:
return conditionals[0]
(limits, ys) = SplineModelConditional.propose_grid(conditionals)
ps = np.zeros(len(ys))
for ii in range(len(conditionals)):
ps = ps + conditionals[ii].to_points(ys)
lps = np.log(ps)
spline = UnivariateSpline(ys, lps, k=2)
return SplineModelConditional.make_conditional_from_spline(spline, limits)
@staticmethod
def propose_grid(conditionals):
limits = (SplineModel.neginf, SplineModel.posinf)
rough_limits = (SplineModel.posinf, SplineModel.neginf)
max_segments = 0
for conditional in conditionals:
if conditional.y0s[0] == conditional.y1s[-1] or np.isnan(conditional.y0s[0]) or np.isnan(conditional.y1s[-1]):
continue
limits = (max(limits[0], conditional.y0s[0]), min(limits[1], conditional.y1s[-1]))
conditional_rough_limits = conditional.rough_limits()
rough_limits = (min(rough_limits[0], conditional_rough_limits[0]), max(rough_limits[1], conditional_rough_limits[1]))
max_segments = max(max_segments, sum(map(lambda cc: len(cc), conditional.coeffs)))
num_points = 100 * max_segments / (1 + np.log(len(conditionals)))
ys = np.linspace(rough_limits[0], rough_limits[1], num_points)
return (limits, ys)
from ddp_model import DDPModel
Model.mergers["spline_model"] = SplineModel.merge
Model.mergers["spline_model+ddp_model"] = lambda models: DDPModel.merge(map(lambda m: m.to_ddp(), models))
Model.combiners['spline_model+spline_model'] = SplineModel.combine
Model.combiners["spline_model+ddp_model"] = lambda one, two: DDPModel.combine(one.to_ddp(), two)
| gpl-3.0 | 8,084,758,869,990,397,000 | 36.509238 | 510 | 0.551027 | false | 3.368907 | false | false | false |
Moobusy/learnpython | app/backend/views.py | 1 | 1088 | # -*- coding: utf-8 -*-
'''
Docer - backend - views
~~~~~~
A document viewing platform.
:copyright: (c) 2015 by Docer.Org.
:license: MIT, see LICENSE for more details.
'''
from . import backend
from flask import request, session, g, redirect, url_for, abort, render_template, flash
@backend.route('/')
def hello_world():
return "Hello admin!"
@backend.route('/user')
def user():
return 'Userpage'
@backend.route('/user/reg')
def reg():
return render_template('admin/reg.html')
@backend.route('/user/do_reg', methods=['POST'])
def do_reg():
from app.models import Users
_uname = request.form['username'].strip()
_pwd = request.form['password'].strip()
# 检验数据
if len(_uname)>20 or len(_uname)<5:
return '用户名要求长度5-20'
elif len(_pwd)>20 or len(_pwd)<8:
return '密码要求长度8-20'
else:
exists_users = Users.objects.filter(username = request.form['username'])
if exists_users.count()>0:
return '帐号已存在'
# 执行注册
new_user = Users(
username = _uname,
password = _pwd
)
new_user.save()
return '注册成功'
| mit | -7,357,217,469,530,423,000 | 21.347826 | 87 | 0.660506 | false | 2.635897 | false | false | false |
mikkeljans/pyconomic | pyconomic/base.py | 1 | 19899 | """
Model Abstraction of e-economic.com API
"""
import copy
import re
import os
import base64
from collections import defaultdict
from suds.client import Client
class ObjectDoesNotExist(BaseException):
pass
class MultipleObjectsReturned(BaseException):
pass
class EConomicsService(object):
"""
Interface for e-conomic WSDL service
"""
def __init__(self, service, model_factory, soap_factory, codec):
self.service = service
self.model_factory = model_factory
self.soap_factory = soap_factory
self.ncalls = 0
self.codec = codec
def fetch_list(self, name, expected_wsdltype, *args, **kw):
result = getattr(self.service, name)(*args)
self.ncalls += 1
if not result:
return []
if expected_wsdltype and expected_wsdltype not in result.__keylist__:
return [result]
return result[0]
def fetch(self, name, *args, **kw):
return getattr(self.service, name)(*args)
def upgrade_to_order(self, handle, order_model):
hnd = self.fetch('Quotation_UpgradeToOrder', handle)
return self.model_factory.get_or_create_instance(self, order_model, hnd)
def upgrade_to_invoice(self, handle, current_invoice_model):
hnd = self.fetch('Order_UpgradeToInvoice', handle)
return self.model_factory.get_or_create_instance(self, current_invoice_model, hnd)
def book_invoice(self, handle, invoice_model):
hnd = self.fetch('CurrentInvoice_Book', handle)
return self.model_factory.get_or_create_instance(self, invoice_model, hnd)
def next_available_number(self, model):
return self.fetch('%s_GetNextAvailableNumber' % model.__name__)
def delete(self, model, handle):
self.fetch("%s_Delete" % model.__name__, handle)
def create(self, model, **data):
parsed_data = self.codec.encode_data_object(self, model, data)
hnd = self.fetch("%s_CreateFromData" % model.__name__, parsed_data)
return self.get_instance(model, hnd)
def get_or_create(self, model, **spec):
filter_names = [f['name'] for f in model.__filters__]
get_data = dict((k, v,) for k, v in spec.items() if k in filter_names)
try:
return self.get(model, **get_data)
except ObjectDoesNotExist:
return self.create(model, **spec)
def __find_handles(self, model, **spec):
""" find model instances based on given filter (spec)
The filter is based on available server-calls, so some values might not be available for filtering.
Multiple filter-values is going to do multiple server-calls.
For complex filters in small datasets, it might be faster to fetch all and do your own in-memory filter.
Empty filter will fetch all.
:param model: subclass of EConomicsModel
:param spec: mapping of values to filter by
:return: a list of EConomicsModel instances
"""
server_calls = []
filter_names = dict([(f['name'], f['method'],) for f in model.get_filters()])
if not spec:
server_calls.append({'method': "%s_GetAll" % model.__name__, 'args': []})
else:
for key, value in spec.items():
if not key in filter_names:
raise ValueError("no server-method exists for filtering by '%s'" % key)
args = []
if not hasattr(value, '__iter__'):
value = [value]
if key.endswith('_list'):
vtype = type(value[0]).__name__
# TODO: this surely does not cover all cases of data types
array = self.soap_factory.create('ArrayOf%s' % vtype.capitalize())
getattr(array, "%s" % vtype).extend(value)
args.append(array)
else:
args.extend(value)
method = "%s_%s" % (model.__name__, filter_names[key])
if filter_names[key].startswith('GetAll'):
args = []
server_calls.append({'method': method, 'args': args, 'expect': "%sHandle" % model.__name__})
handles = [
map(Handle, self.fetch_list(scall['method'], scall.get('expect'), *scall['args']))
for scall in server_calls
]
return [h.wsdl for h in reduce(set.intersection, map(set, handles))]
def find(self, model, **spec):
handles = self.__find_handles(model, **spec)
return [self.get_instance(model, hnd) for hnd in handles]
def get(self, model, **spec):
"""get a single model instance by handle
:param model: model
:param handle: instance handle
:return:
"""
handles = self.__find_handles(model, **spec)
if len(handles) > 1:
raise MultipleObjectsReturned()
if not handles:
raise ObjectDoesNotExist()
return self.get_instance(model, handles[0])
def get_instance(self, model, handle):
return self.model_factory.get_or_create_instance(self, model, handle)
def load_instance_data(self, instance):
model = instance.__class__
modelname = model.__name__
data = self.fetch("%s_GetData" % modelname, instance._handle)
instance._data = self.codec.decode_data_object(self, instance._handle, model, data)
def load_data(self, instance):
model = instance.__class__
modelname = model.__name__
handles = [inst._handle for (m, inst,) in self.model_factory.instances_iter([model], loaded=False)]
array = self.soap_factory.create('ArrayOf%sHandle' % modelname)
getattr(array, "%sHandle" % modelname).extend(handles)
for data in self.fetch_list("%s_GetDataArray" % modelname, None, array):
handle = data.Handle
inst = self.get_instance(model, handle)
inst._data = self.codec.decode_data_object(self, handle, model, data)
inst._loaded = True
def get_all_changes(self):
changesets = defaultdict(list)
for model, inst in self.model_factory.instances_iter(updated=True):
changesets[model].append(ModelChange(model, inst))
return changesets
def commit(self):
changesets = self.get_all_changes()
for model, changes in changesets.items():
datalist = [self.codec.encode_data_object(self, model, changeset.get_data()) for changeset in changes]
array = self.soap_factory.create('ArrayOf%sData' % model.__name__)
getattr(array, '%sData' % model.__name__).extend(datalist)
self.fetch("%s_UpdateFromDataArray" % model.__name__, array)
[change.apply_and_clear() for change in changes]
def __getattr__(self, name):
return getattr(self.service, name)
class ModelChange(object):
def __init__(self, model, instance):
self.model = model
self.instance = instance
def __repr__(self):
return "<Changes %r %r>" % (self.instance, self.clean_data(self.instance._changes))
def apply_and_clear(self):
self.instance._data.update(self.instance._changes)
self.instance._changes = {}
def clean_data(self, data):
result = {}
for k, v in data.items():
k = pythonize(k)
if k.endswith('_handle'):
k = k[:-7]
result[k] = v
return result
def get_data(self):
if not self.instance._data:
self.instance.fetch()
data = self.clean_data(self.instance._data)
data.update(self.clean_data(self.instance._changes))
data['Handle'] = self.instance._handle
return data
class PropertyCodec(object):
def __init__(self, missing_value=None):
self.missing_value = missing_value
def decode_data_object(self, service, handle, model, data):
decoded_data = {}
for prop in model.properties:
name = prop.name
if prop.name+'Handle' in data:
name = prop.name + 'Handle'
if not name in data:
value = prop.default_value(service, handle)
else:
value = prop.decode_value(service, handle, data[name])
decoded_data[prop.name] = value
return decoded_data
def encode_data_object(self, service, model, data):
#print 'ENCODE', data
encoded_data = {}
if 'Handle' in data:
encoded_data['Handle'] = data['Handle']
for prop in model.properties:
name = prop.pyname
if not name in data:
# encoded_data[prop.name] = self.missing_value
continue
value = data[name]
if value is None:
# encoded_data[prop.name] = value
continue
encoded_data[prop.name] = prop.encode_value(service, data[name])
return encoded_data
class EConomicsModelFactory(object):
def __init__(self):
self.__models = {}
def instances_iter(self, models=None, loaded=None, updated=None):
if models is None:
models = self.__models.keys()
for model in models:
for inst in self.__models[model].values():
if loaded is not None and bool(inst._loaded) != bool(loaded):
continue
if updated is not None and bool(inst._changes) != bool(updated):
continue
yield (model, inst,)
def get_or_create_instance(self, service, model, handle):
hashkey = hash((service, model, handle[0],))
modeldata = self.__models.setdefault(model, {})
return modeldata.setdefault(hashkey, model(service, handle))
class Handle(object):
def __init__(self, wsdl):
self.wsdl = wsdl
def __hash__(self):
return hash(self.wsdl[0])
def __eq__(self, other):
return hash(self) == other
def __repr__(self):
return "<Handle %r>" % self.wsdl.Id
class EConomicsMeta(type):
registry = {}
def __new__(mcs, name, bases, ns):
properties = []
for k, v in ns.items():
if hasattr(v, '__get__'):
properties.append(v)
ns['properties'] = properties
model = type.__new__(mcs, name, bases, ns)
mcs.registry[name] = model
return model
def get_filters(self):
return self.__filters__
class EConomicsBaseProperty(object):
def encode_value(self, service, value):
return value
def decode_value(self, service, handle, value):
return value
def default_value(self, service, handle):
return None
def __get__(self, instance, owner):
_ = owner
if instance is None:
return self
changes = instance._changes
if self.name in changes:
return changes[self.name]
if not instance._loaded:
instance.load()
value = instance._data[self.name]
if hasattr(value, 'fetched') and not value.fetched:
value.fetch()
return value
def __set__(self, instance, value):
instance._changes[self.name] = value
class EConomicsProperty(EConomicsBaseProperty):
def __init__(self, name):
self.name = name
self.pyname = pythonize(name)
def __repr__(self):
return "<%s Data>" % pythonize(self.name)
class EConomicsReference(EConomicsBaseProperty):
def __init__(self, name, model):
self.name = name + 'Handle'
self.model = model
self.pyname = pythonize(name)
def encode_value(self, service, value):
return value._handle
def decode_value(self, service, handle, value):
return service.get_instance(get_model(self.model), value)
def __repr__(self):
return "<%s %s>" % (self.name, self.model)
class QueryList(list):
def __init__(self, service, handle, model, method):
self.service = service
self.handle = handle
self.model = model
self.method = method
self.fetched = False
def __getattribute__(self, name):
if name in ['fetch', 'service', 'handle', 'model', 'method', 'fetched']:
return list.__getattribute__(self, name)
if self.fetched:
self.fetch()
return list.__getattribute__(self, name)
def fetch(self):
handles = self.service.fetch_list(self.method, None, self.handle)
self[:] = [self.service.get_instance(self.model, hnd) for hnd in handles]
self.fetched = True
return self
class EConomicsReferenceList(EConomicsBaseProperty):
def __init__(self, name, model, method):
self.name = name
self.model = model
self.method = method
self.pyname = pythonize(name)
def __repr__(self):
return "<%s [%s]>" % (self.name, self.model)
def encode_value(self, service, value):
return [v._handle for v in value]
def default_value(self, service, handle):
return QueryList(service, handle, get_model(self.model), self.method)
class EConomicsFileProperty(EConomicsBaseProperty):
def __init__(self, name, method, filetype):
self.name = name
self.filetype = filetype
self.method = method
self.pyname = pythonize(name)
def __repr__(self):
return "<%s %s file>" % (self.name, self.filetype)
def default_value(self, service, handle):
return FileObject(service, self.method, handle, self.filetype)
class FileObject(object):
def __init__(self, service, method, handle, filetype):
self.filedata = None
self.method = method
self.service = service
self.handle = handle
self.filetype = filetype
self.fetched = False
self.__last_location = None
def fetch(self):
self.filedata = self.service.fetch(self.method, self.handle)
self.fetched = True
return self
def save(self, location):
if not location.endswith(self.filetype):
location += '.' + self.filetype
with open(location, 'wb') as f:
f.write(base64.b64decode(self.filedata))
self.__last_location = location
def show(self):
if not self.__last_location:
self.save('/tmp/economic_tmp')
os.system('xdg-open %s' % self.__last_location)
class EConomicsModel(object):
__filters__ = []
__metaclass__ = EConomicsMeta
def __init__(self, service, handle):
self._handle = handle
self._loaded = False
self._service = service
self._data = {}
self._changes = {}
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._handle[0])
def fetch(self):
self._service.load_instance_data(self)
return self
def update(self, **data):
for k, v in data.items():
setattr(self, k, v)
def load(self):
self._service.load_data(self)
def delete(self):
self._service.delete(self.__class__, self._handle)
def get_model(name):
return EConomicsMeta.registry[name]
def pythonize(name):
return re.sub('([A-Z])([a-z])', r'_\1\2', name).strip('_').lower()
def camelcase(name):
return ''.join(map(str.capitalize, name.split('_')))
def build_model_code(client):
"""
Generate source code for e-conomic models based on WSDL connection.
This is based on the assumption that the API follows a specific method naming-convention.
Not all models and attributes has been tested.
The source-generation is mostly to help improve readability and IDE auto-completion.
:param client:
:return: source code for models.py
"""
models = {}
references = {}
for method in client.wsdl.services[0].ports[0].methods.values():
if not '_' in method.name:
continue
model, action = method.name.split('_')
models.setdefault(model, {'properties': [], 'filters': []})
references[model] = model
if model[-1] == 'y':
references[model[:-1] + 'ies'] = model
else:
references[model+'s'] = model
references['OurReference'] = 'Employee'
references['GetYourReference'] = 'DebtorContact'
references['GetAttention'] = 'DebtorContact'
references['Layout'] = 'TemplateCollection'
special = {
'Order_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'Order_GetPdf'", "'pdf'"]
},
'Invoice_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'Invoice_GetPdf'", "'pdf'"]
},
'CurrentInvoice_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'CurrentInvoice_GetPdf'", "'pdf'"]
}
}
for line in ['Order', 'Invoice', 'CurrentInvoice', 'Quotation']:
method = '%s_GetLines' % line
special[method] = {
'type': 'EConomicsReferenceList',
'args': ["'%sLine'" % line, "'%s'" % method]
}
for method in client.wsdl.services[0].ports[0].methods.values():
if not '_' in method.name:
continue
model, action = method.name.split('_')
if action in ['GetData', 'GetAll', 'GetDataArray']:
continue
modeldata = models[model]
if action == 'GetAllUpdated':
camelname = action[3:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
if re.findall('GetAll[A-Z].+', action):
camelname = action[3:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
elif action.startswith('FindBy'):
camelname = action[6:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
elif action.startswith('Get'):
propname = action[3:]
pyname = pythonize(propname)
if not propname:
continue
get_type = re.findall('Get(%s)[a-z0-9]*?$' % ('|'.join(references.keys())), action)
if get_type and get_type[0] in references:
refmodel = references[get_type[0]]
if action[-1] == 's':
modeldata['properties'].append({
'type': 'EConomicsReferenceList',
'args': ["'%s'" % propname, "'%s'" % refmodel, "'%s'" % method.name],
'name': pyname
})
else:
modeldata['properties'].append({
'type': 'EConomicsReference',
'args': ["'%s'" % propname, "'%s'" % refmodel],
'name': pyname
})
elif method.name in special:
spdata = special[method.name]
modeldata['properties'].append({
'type': spdata['type'],
'args': ["'%s'" % propname] + spdata['args'],
'name': pyname
})
else:
modeldata['properties'].append({
'type': 'EConomicsProperty',
'args': ["'%s'" % propname],
'name': pyname
})
classes = []
for modelname, modeldata in models.items():
propertycode = ["%s = %s(%s)" % (md['name'], md['type'], ', '.join(md['args']))
for md in modeldata['properties']]
code = "class %s(%s):\n __filters__ = %r\n %s" % (modelname, 'EConomicsModel',
modeldata['filters'], '\n '.join(propertycode))
classes.append(code)
return "from pyconomic.base import *\n\n\n" + "\n\n\n".join(classes)
| gpl-2.0 | -5,205,653,397,330,828,000 | 33.015385 | 114 | 0.565707 | false | 4.003823 | false | false | false |
Jonekee/chromium.src | build/android/gyp/write_ordered_libraries.py | 36 | 4035 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes dependency ordered list of native libraries.
The list excludes any Android system libraries, as those are not bundled with
the APK.
This list of libraries is used for several steps of building an APK.
In the component build, the --input-libraries only needs to be the top-level
library (i.e. libcontent_shell_content_view). This will then use readelf to
inspect the shared libraries and determine the full list of (non-system)
libraries that should be included in the APK.
"""
# TODO(cjhopman): See if we can expose the list of library dependencies from
# gyp, rather than calculating it ourselves.
# http://crbug.com/225558
import optparse
import os
import re
import sys
from util import build_utils
_readelf = None
_library_dirs = None
_library_re = re.compile(
'.*NEEDED.*Shared library: \[(?P<library_name>[\w/.]+)\]')
def SetReadelfPath(path):
global _readelf
_readelf = path
def SetLibraryDirs(dirs):
global _library_dirs
_library_dirs = dirs
def FullLibraryPath(library_name):
assert _library_dirs is not None
for directory in _library_dirs:
path = '%s/%s' % (directory, library_name)
if os.path.exists(path):
return path
return library_name
def IsSystemLibrary(library_name):
# If the library doesn't exist in the libraries directory, assume that it is
# an Android system library.
return not os.path.exists(FullLibraryPath(library_name))
def CallReadElf(library_or_executable):
assert _readelf is not None
readelf_cmd = [_readelf,
'-d',
FullLibraryPath(library_or_executable)]
return build_utils.CheckOutput(readelf_cmd)
def GetDependencies(library_or_executable):
elf = CallReadElf(library_or_executable)
return set(_library_re.findall(elf))
def GetNonSystemDependencies(library_name):
all_deps = GetDependencies(FullLibraryPath(library_name))
return set((lib for lib in all_deps if not IsSystemLibrary(lib)))
def GetSortedTransitiveDependencies(libraries):
"""Returns all transitive library dependencies in dependency order."""
return build_utils.GetSortedTransitiveDependencies(
libraries, GetNonSystemDependencies)
def GetSortedTransitiveDependenciesForBinaries(binaries):
if binaries[0].endswith('.so'):
libraries = [os.path.basename(lib) for lib in binaries]
else:
assert len(binaries) == 1
all_deps = GetDependencies(binaries[0])
libraries = [lib for lib in all_deps if not IsSystemLibrary(lib)]
return GetSortedTransitiveDependencies(libraries)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--input-libraries',
help='A list of top-level input libraries.')
parser.add_option('--libraries-dir',
help='The directory which contains shared libraries.')
parser.add_option('--readelf', help='Path to the readelf binary.')
parser.add_option('--output', help='Path to the generated .json file.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
SetReadelfPath(options.readelf)
SetLibraryDirs(options.libraries_dir.split(','))
libraries = build_utils.ParseGypList(options.input_libraries)
if len(libraries):
libraries = GetSortedTransitiveDependenciesForBinaries(libraries)
# Convert to "base" library names: e.g. libfoo.so -> foo
java_libraries_list = (
'{%s}' % ','.join(['"%s"' % s[3:-3] for s in libraries]))
build_utils.WriteJson(
{'libraries': libraries, 'java_libraries_list': java_libraries_list},
options.output,
only_if_changed=True)
if options.stamp:
build_utils.Touch(options.stamp)
if options.depfile:
print libraries
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 793,590,433,413,980,300 | 27.821429 | 78 | 0.720694 | false | 3.698442 | false | false | false |
jfterpstra/bluebottle | bluebottle/geo/views.py | 1 | 1169 | from rest_framework import generics
from bluebottle.geo.models import Location
from bluebottle.geo.serializers import LocationSerializer
from bluebottle.projects.models import Project
from .serializers import CountrySerializer
from .models import Country
class CountryList(generics.ListAPIView):
serializer_class = CountrySerializer
queryset = Country.objects.all()
def get_queryset(self):
return self.queryset.filter(alpha2_code__isnull=False).order_by(
'name').all()
class CountryDetail(generics.RetrieveAPIView):
serializer_class = CountrySerializer
queryset = Country.objects.all()
def get_queryset(self):
qs = super(CountryDetail, self).get_queryset()
return qs
class UsedCountryList(CountryList):
def get_queryset(self):
qs = super(UsedCountryList, self).get_queryset()
project_country_ids = Project.objects.filter(
status__viewable=True).values_list('country', flat=True).distinct()
return qs.filter(id__in=project_country_ids)
class LocationList(generics.ListAPIView):
serializer_class = LocationSerializer
queryset = Location.objects.all()
| bsd-3-clause | 6,346,517,698,109,374,000 | 28.974359 | 79 | 0.730539 | false | 4.189964 | false | false | false |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/numpy/lib/function_base.py | 15 | 150516 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x`. The Sturges estimator
is quite good for small (<1000) datasets and is the default in the R
language. This method gives good off the shelf behaviour.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
# There is no need to check for zero here. If ptp is, so is IQR and
# vice versa. Either both are zero or neither one is.
return min(_hist_bin_fd(x), _hist_bin_sturges(x))
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size .
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1})}
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
# Do not modify the original value of range so we can check for `None`
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
mn, mx = 0.0, 1.0
else:
mn, mx = a.min() + 0.0, a.max() + 0.0
else:
mn, mx = [mi + 0.0 for mi in range]
if mn > mx:
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if mn == mx:
mn -= 0.5
mx += 0.5
if isinstance(bins, basestring):
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bins not in _hist_bin_selectors:
raise ValueError("{0} not a valid estimator for bins".format(bins))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
# Make a reference to `a`
b = a
# Update the reference if the range needs truncation
if range is not None:
keep = (a >= mn)
keep &= (a <= mx)
if not np.logical_and.reduce(keep):
b = a[keep]
if b.size == 0:
bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bins](b)
if width:
bins = int(np.ceil((mx - mn) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
bins = 1
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# Compute the bin edges for potential correction.
bin_edges = linspace(mn, mx, bins + 1, endpoint=True)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a_data = tmp_a.astype(float)
tmp_a = tmp_a_data - mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a_data < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# Rename the bin edges for return.
bins = bin_edges
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| mit | 7,609,337,912,041,124,000 | 31.813604 | 99 | 0.572318 | false | 3.64181 | false | false | false |
lonnen/socorro | webapp-django/crashstats/crashstats/views.py | 1 | 13979 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import json
from django import http
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.core.cache import cache
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.http import urlquote
from csp.decorators import csp_update
from crashstats.crashstats import forms, models, utils
from crashstats.crashstats.decorators import pass_default_context
from crashstats.supersearch.models import SuperSearchFields
from socorro.external.crashstorage_base import CrashIDNotFound
# To prevent running in to a known Python bug
# (http://bugs.python.org/issue7980)
# we, here at "import time" (as opposed to run time) make use of time.strptime
# at least once
datetime.datetime.strptime('2013-07-15 10:00:00', '%Y-%m-%d %H:%M:%S')
def ratelimit_blocked(request, exception):
# http://tools.ietf.org/html/rfc6585#page-3
status = 429
# If the request is an AJAX on, we return a plain short string.
# Also, if the request is coming from something like curl, it will
# send the header `Accept: */*`. But if you take the same URL and open
# it in the browser it'll look something like:
# `Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8`
if (
request.is_ajax() or
'text/html' not in request.META.get('HTTP_ACCEPT', '')
):
# Return a super spartan message.
# We could also do something like `{"error": "Too Many Requests"}`
return http.HttpResponse(
'Too Many Requests',
status=status,
content_type='text/plain'
)
return render(request, 'crashstats/ratelimit_blocked.html', status=status)
def robots_txt(request):
return http.HttpResponse(
'User-agent: *\n'
'%s: /' % ('Allow' if settings.ENGAGE_ROBOTS else 'Disallow'),
content_type='text/plain',
)
def build_id_to_date(build_id):
yyyymmdd = str(build_id)[:8]
return '{}-{}-{}'.format(
yyyymmdd[:4],
yyyymmdd[4:6],
yyyymmdd[6:8],
)
@csp_update(CONNECT_SRC='analysis-output.telemetry.mozilla.org')
@pass_default_context
def report_index(request, crash_id, default_context=None):
valid_crash_id = utils.find_crash_id(crash_id)
if not valid_crash_id:
return http.HttpResponseBadRequest('Invalid crash ID')
# Sometimes, in Socorro we use a prefix on the crash ID. Usually it's
# 'bp-' but this is configurable.
# If you try to use this to reach the perma link for a crash, it should
# redirect to the report index with the correct crash ID.
if valid_crash_id != crash_id:
return redirect(reverse('crashstats:report_index', args=(valid_crash_id,)))
context = default_context or {}
context['crash_id'] = crash_id
refresh_cache = request.GET.get('refresh') == 'cache'
raw_api = models.RawCrash()
try:
context['raw'] = raw_api.get(crash_id=crash_id, refresh_cache=refresh_cache)
except CrashIDNotFound:
# If the raw crash can't be found, we can't do much.
return render(request, 'crashstats/report_index_not_found.html', context, status=404)
utils.enhance_raw(context['raw'])
context['your_crash'] = (
request.user.is_active and
context['raw'].get('Email') == request.user.email
)
api = models.UnredactedCrash()
try:
context['report'] = api.get(crash_id=crash_id, refresh_cache=refresh_cache)
except CrashIDNotFound:
# ...if we haven't already done so.
cache_key = 'priority_job:{}'.format(crash_id)
if not cache.get(cache_key):
priority_api = models.PriorityJob()
priority_api.post(crash_ids=[crash_id])
cache.set(cache_key, True, 60)
return render(request, 'crashstats/report_index_pending.html', context)
if 'json_dump' in context['report']:
json_dump = context['report']['json_dump']
if 'sensitive' in json_dump and not request.user.has_perm('crashstats.view_pii'):
del json_dump['sensitive']
context['raw_stackwalker_output'] = json.dumps(
json_dump,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
utils.enhance_json_dump(json_dump, settings.VCS_MAPPINGS)
parsed_dump = json_dump
else:
context['raw_stackwalker_output'] = 'No dump available'
parsed_dump = {}
# NOTE(willkg): pull cpu count from parsed_dump if it's not in report;
# remove in July 2019
if 'cpu_count' not in context['report']:
context['report']['cpu_count'] = parsed_dump.get('system_info', {}).get('cpu_count')
# NOTE(willkg): "cpu_name" is deprecated, but populate "cpu_arch" if
# cpu_arch is empty; remove in July 2019.
if 'cpu_arch' not in context['report']:
context['report']['cpu_arch'] = context['report']['cpu_name']
context['crashing_thread'] = parsed_dump.get('crash_info', {}).get('crashing_thread')
if context['report']['signature'].startswith('shutdownhang'):
# For shutdownhang signatures, we want to use thread 0 as the
# crashing thread, because that's the thread that actually contains
# the useful data about what happened.
context['crashing_thread'] = 0
context['parsed_dump'] = parsed_dump
context['bug_product_map'] = settings.BUG_PRODUCT_MAP
context['bug_associations'] = list(
models.BugAssociation.objects
.filter(signature=context['report']['signature'])
.values('bug_id', 'signature')
.order_by('-bug_id')
)
context['raw_keys'] = []
if request.user.has_perm('crashstats.view_pii'):
# hold nothing back
context['raw_keys'] = context['raw'].keys()
else:
context['raw_keys'] = [
x for x in context['raw']
if x in models.RawCrash.API_ALLOWLIST()
]
# Sort keys case-insensitively
context['raw_keys'] = sorted(context['raw_keys'], key=lambda s: s.lower())
if request.user.has_perm('crashstats.view_rawdump'):
context['raw_dump_urls'] = [
reverse('crashstats:raw_data', args=(crash_id, 'dmp')),
reverse('crashstats:raw_data', args=(crash_id, 'json'))
]
if context['raw'].get('additional_minidumps'):
suffixes = [
x.strip()
for x in context['raw']['additional_minidumps'].split(',')
if x.strip()
]
for suffix in suffixes:
name = 'upload_file_minidump_%s' % (suffix,)
context['raw_dump_urls'].append(
reverse('crashstats:raw_data_named', args=(crash_id, name, 'dmp'))
)
if (
context['raw'].get('ContainsMemoryReport') and
context['report'].get('memory_report') and
not context['report'].get('memory_report_error')
):
context['raw_dump_urls'].append(
reverse('crashstats:raw_data_named', args=(crash_id, 'memory_report', 'json.gz'))
)
# Add descriptions to all fields.
all_fields = SuperSearchFields().get()
descriptions = {}
for field in all_fields.values():
key = '{}.{}'.format(field['namespace'], field['in_database_name'])
descriptions[key] = '{} Search: {}'.format(
field.get('description', '').strip() or 'No description for this field.',
field['is_exposed'] and field['name'] or 'N/A',
)
def make_raw_crash_key(key):
"""In the report_index.html template we need to create a key
that we can use to look up against the 'fields_desc' dict.
Because you can't do something like this in jinja::
{{ fields_desc.get(u'raw_crash.{}'.format(key), empty_desc) }}
we do it here in the function instead.
The trick is that the lookup key has to be a unicode object or
else you get UnicodeEncodeErrors in the template rendering.
"""
return u'raw_crash.{}'.format(key)
context['make_raw_crash_key'] = make_raw_crash_key
context['fields_desc'] = descriptions
context['empty_desc'] = 'No description for this field. Search: unknown'
context['BUG_PRODUCT_MAP'] = settings.BUG_PRODUCT_MAP
# report.addons used to be a list of lists.
# In https://bugzilla.mozilla.org/show_bug.cgi?id=1250132
# we changed it from a list of lists to a list of strings, using
# a ':' to split the name and version.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1250132#c7
# Considering legacy, let's tackle both.
# In late 2017, this code is going to be useless and can be removed.
if (
context['report'].get('addons') and
isinstance(context['report']['addons'][0], (list, tuple))
):
# This is the old legacy format. This crash hasn't been processed
# the new way.
context['report']['addons'] = [
':'.join(x) for x in context['report']['addons']
]
return render(request, 'crashstats/report_index.html', context)
@pass_default_context
def login(request, default_context=None):
context = default_context or {}
return render(request, 'crashstats/login.html', context)
def quick_search(request):
query = request.GET.get('query', '').strip()
crash_id = utils.find_crash_id(query)
if crash_id:
url = reverse(
'crashstats:report_index',
kwargs=dict(crash_id=crash_id)
)
elif query:
url = '%s?signature=%s' % (
reverse('supersearch:search'),
urlquote('~%s' % query)
)
else:
url = reverse('supersearch:search')
return redirect(url)
@utils.json_view
def buginfo(request, signatures=None):
form = forms.BugInfoForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
bug_ids = form.cleaned_data['bug_ids']
bzapi = models.BugzillaBugInfo()
result = bzapi.get(bug_ids)
return result
@permission_required('crashstats.view_rawdump')
def raw_data(request, crash_id, extension, name=None):
api = models.RawCrash()
if extension == 'json':
format = 'meta'
content_type = 'application/json'
elif extension == 'dmp':
format = 'raw'
content_type = 'application/octet-stream'
elif extension == 'json.gz' and name == 'memory_report':
# Note, if the name is 'memory_report' it will fetch a raw
# crash with name and the files in the memory_report bucket
# are already gzipped.
# This is important because it means we don't need to gzip
# the HttpResponse below.
format = 'raw'
content_type = 'application/octet-stream'
else:
raise NotImplementedError(extension)
data = api.get(crash_id=crash_id, format=format, name=name)
response = http.HttpResponse(content_type=content_type)
if extension == 'json':
response.write(json.dumps(data))
else:
response.write(data)
return response
@pass_default_context
def about_throttling(request, default_context=None):
"""Return a simple page that explains about how throttling works."""
context = default_context or {}
return render(request, 'crashstats/about_throttling.html', context)
@pass_default_context
def home(request, default_context=None):
context = default_context or {}
return render(request, 'crashstats/home.html', context)
@pass_default_context
def product_home(request, product, default_context=None):
context = default_context or {}
# Figure out versions
if product not in context['products']:
raise http.Http404('Not a recognized product')
if product in context['active_versions']:
context['versions'] = [
x['version']
for x in context['active_versions'][product]
if x['is_featured']
]
# If there are no featured versions but there are active
# versions, then fall back to use that instead.
if not context['versions'] and context['active_versions'][product]:
# But when we do that, we have to make a manual cut-off of
# the number of versions to return. So make it max 4.
context['versions'] = [
x['version']
for x in context['active_versions'][product]
][:settings.NUMBER_OF_FEATURED_VERSIONS]
else:
context['versions'] = []
return render(request, 'crashstats/product_home.html', context)
def handler500(request, template_name='500.html'):
if getattr(request, '_json_view', False):
# Every view with the `utils.json_view` decorator sets,
# on the request object, that it wants to eventually return
# a JSON output. Let's re-use that fact here.
return http.JsonResponse({
'error': 'Internal Server Error',
'path': request.path,
'query_string': request.META.get('QUERY_STRING'),
}, status=500)
context = {}
return render(request, '500.html', context, status=500)
def handler404(request, exception, template_name='404.html'):
if getattr(request, '_json_view', False):
# Every view with the `utils.json_view` decorator sets,
# on the request object, that it wants to eventually return
# a JSON output. Let's re-use that fact here.
return http.JsonResponse({
'error': 'Page not found',
'path': request.path,
'query_string': request.META.get('QUERY_STRING'),
}, status=404)
context = {}
return render(request, '404.html', context, status=404)
| mpl-2.0 | -4,250,730,929,189,438,500 | 35.883905 | 97 | 0.625367 | false | 3.800707 | false | false | false |
google/orbit | third_party/conan/recipes/libdisasm/conanfile.py | 2 | 1408 | from conans import ConanFile, CMake, tools
import shutil
class LibDisasmConan(ConanFile):
name = "libdisasm"
version = "0.23"
license = "Clarified Artistic License"
description = "An basic x86 disassembler in library form."
topics = ("libdisasm", "disasm")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
exports_sources = [ "CMakeLists.txt", "sizeofvoid.patch" ]
def source(self):
tools.download("https://sourceforge.net/projects/bastard/files/libdisasm/{0}/libdisasm-{0}.tar.gz/download".format(self.version),
"libdisasm-{}.tar.gz".format(self.version))
tools.untargz("libdisasm-{}.tar.gz".format(self.version))
tools.patch(patch_file="sizeofvoid.patch",
base_path="libdisasm-{}".format(self.version))
shutil.move("CMakeLists.txt", "libdisasm-{}/".format(self.version))
def get_env(self):
cmake = CMake(self)
cmake.configure(source_folder="libdisasm-{}".format(self.version))
return cmake
def build(self):
cmake = self.get_env()
cmake.build()
def package(self):
cmake = self.get_env()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["disasm"]
self.cpp_info.includedirs = ["include"]
| bsd-2-clause | 712,558,446,428,111,600 | 33.341463 | 137 | 0.620028 | false | 3.528822 | false | false | false |
honmaple/maple-bbs | forums/count.py | 1 | 4325 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2017 jianglin
# File Name: count.py
# Author: jianglin
# Email: [email protected]
# Created: 2017-03-29 21:28:52 (CST)
# Last Update:星期日 2017-4-2 15:24:37 (CST)
# By:
# Description: 一些统计信息
# **************************************************************************
from flask import request
from .extension import redis_data
class Count(object):
@classmethod
def board_topic_count(cls, boardId, value=None):
key = 'count:board:%s' % str(boardId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'topic', value)
pipe.execute()
return redis_data.hget(key, 'topic') or 0
@classmethod
def board_post_count(cls, boardId, value=None):
key = 'count:board:%s' % str(boardId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'post', value)
pipe.execute()
return redis_data.hget(key, 'post') or 0
@classmethod
def topic_reply_count(cls, topicId, value=None):
key = 'count:topic:%s' % str(topicId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'replies', value)
pipe.execute()
return redis_data.hget(key, 'replies') or 0
@classmethod
def topic_read_count(cls, topicId, value=None):
key = 'count:topic:%s' % str(topicId)
expire_key = 'expire:topic:read:{}'.format(request.remote_addr)
if not redis_data.exists(expire_key):
# 设置三分钟之内,阅读次数不增加
redis_data.set(expire_key, '1')
redis_data.expire(expire_key, 180)
if value is not None:
redis_data.hincrby(key, 'read', value)
return redis_data.hget(key, 'read') or 0
@classmethod
def reply_liker_count(cls, replyId, value=None):
key = 'count:reply:%s' % str(replyId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'liker', value)
pipe.execute()
return redis_data.hget(key, 'liker') or 0
@classmethod
def user_topic_count(cls, userId, value=None):
key = 'count:user:%s' % str(userId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'topic', value)
pipe.execute()
cls.forums_post_count(1)
cls.forums_topic_count(1)
return redis_data.hget(key, 'topic') or 0
@classmethod
def user_reply_count(cls, userId, value=None):
key = 'count:user:%s' % str(userId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'replies', value)
pipe.execute()
cls.forums_post_count(1)
return redis_data.hget(key, 'replies') or 0
@classmethod
def user_message_count(cls, userId, value=None, clear=False):
key = 'count:user:%s' % str(userId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'message', value)
pipe.execute()
if clear:
redis_data.hset(key, 'message', 0)
return redis_data.hget(key, 'message') or 0
@classmethod
def user_email_time(cls, userId, value=None):
key = 'count:user:%s' % str(userId)
if value is not None:
redis_data.hset(key, 'email', value)
return redis_data.hget(key, 'email') or '2015-1-1 1:1:1'
@classmethod
def forums_user_count(cls, value=None):
key = 'count:forums'
if value is not None:
redis_data.hincrby(key, 'user', value)
return redis_data.hget(key, 'user') or 0
@classmethod
def forums_topic_count(cls, value=None):
key = 'count:forums'
if value is not None:
redis_data.hincrby(key, 'topic', value)
return redis_data.hget(key, 'topic') or 0
@classmethod
def forums_post_count(cls, value=None):
key = 'count:forums'
if value is not None:
redis_data.hincrby(key, 'post', value)
return redis_data.hget(key, 'post') or 0
| gpl-3.0 | 3,023,951,892,401,284,000 | 33.5 | 76 | 0.55353 | false | 3.387173 | false | false | false |
uucidl/uu.xunitgen | xunitgen/step_recording.py | 1 | 1840 | import sys
import time
from contextlib import contextmanager
from .main import EventReceiver
class Recorder(object):
"""Use this class to record the result of running python code as a xunit xml
It allows you to record a series of steps into a single xunit.xml file.
"""
def __init__(self, xunit_destination, name, package_name=None):
self.name = name
self.package_name = package_name
self.destination = xunit_destination
self.event_receiver = None
def __enter__(self):
self.event_receiver = EventReceiver()
return self
def now_seconds(self):
return time.time()
def step(self, step_name):
"""Start a new step. returns a context manager which allows you to
report an error"""
@contextmanager
def step_context(step_name):
if self.event_receiver.current_case is not None:
raise Exception('cannot open a step within a step')
self.event_receiver.begin_case(step_name, self.now_seconds(), self.name)
try:
yield self.event_receiver
except:
etype, evalue, tb = sys.exc_info()
self.event_receiver.error('%r' % [etype, evalue, tb])
raise
finally:
self.event_receiver.end_case(step_name, self.now_seconds())
return step_context(step_name)
def __exit__(self, *exc_info):
results = self.event_receiver.results()
if not results:
already_throwing = exc_info and exc_info[0] is not None
if not already_throwing:
raise ValueError('your hook must at least perform one step!')
self.destination.write_reports(
self.name, self.name, results, package_name=self.package_name,
)
| mit | 2,230,151,689,008,873,700 | 28.677419 | 84 | 0.60163 | false | 4.259259 | false | false | false |
vdemeester/compose | tests/unit/network_test.py | 3 | 6287 | import pytest
from .. import mock
from .. import unittest
from compose.network import check_remote_network_config
from compose.network import Network
from compose.network import NetworkConfigChangedError
class NetworkTest(unittest.TestCase):
def test_check_remote_network_config_success(self):
options = {'com.docker.network.driver.foo': 'bar'}
ipam_config = {
'driver': 'default',
'config': [
{'subnet': '172.0.0.1/16', },
{
'subnet': '156.0.0.1/25',
'gateway': '156.0.0.1',
'aux_addresses': ['11.0.0.1', '24.25.26.27'],
'ip_range': '156.0.0.1-254'
}
],
'options': {
'iface': 'eth0',
}
}
labels = {
'com.project.tests.istest': 'true',
'com.project.sound.track': 'way out of here',
}
remote_labels = labels.copy()
remote_labels.update({
'com.docker.compose.project': 'compose_test',
'com.docker.compose.network': 'net1',
})
net = Network(
None, 'compose_test', 'net1', 'bridge',
options, enable_ipv6=True, ipam=ipam_config,
labels=labels
)
check_remote_network_config(
{
'Driver': 'bridge',
'Options': options,
'EnableIPv6': True,
'Internal': False,
'Attachable': True,
'IPAM': {
'Driver': 'default',
'Config': [{
'Subnet': '156.0.0.1/25',
'Gateway': '156.0.0.1',
'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
'IPRange': '156.0.0.1-254'
}, {
'Subnet': '172.0.0.1/16',
'Gateway': '172.0.0.1'
}],
'Options': {
'iface': 'eth0',
},
},
'Labels': remote_labels
},
net
)
def test_check_remote_network_config_whitelist(self):
options = {'com.docker.network.driver.foo': 'bar'}
remote_options = {
'com.docker.network.driver.overlay.vxlanid_list': '257',
'com.docker.network.driver.foo': 'bar',
'com.docker.network.windowsshim.hnsid': 'aac3fd4887daaec1e3b',
}
net = Network(
None, 'compose_test', 'net1', 'overlay',
options
)
check_remote_network_config(
{'Driver': 'overlay', 'Options': remote_options}, net
)
@mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_config_driver_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
with pytest.raises(NetworkConfigChangedError) as e:
check_remote_network_config(
{'Driver': 'bridge', 'Options': {}}, net
)
assert 'driver has changed' in str(e.value)
@mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_config_options_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
with pytest.raises(NetworkConfigChangedError) as e:
check_remote_network_config({'Driver': 'overlay', 'Options': {
'com.docker.network.driver.foo': 'baz'
}}, net)
assert 'option "com.docker.network.driver.foo" has changed' in str(e.value)
def test_check_remote_network_config_null_remote(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
check_remote_network_config(
{'Driver': 'overlay', 'Options': None}, net
)
def test_check_remote_network_config_null_remote_ipam_options(self):
ipam_config = {
'driver': 'default',
'config': [
{'subnet': '172.0.0.1/16', },
{
'subnet': '156.0.0.1/25',
'gateway': '156.0.0.1',
'aux_addresses': ['11.0.0.1', '24.25.26.27'],
'ip_range': '156.0.0.1-254'
}
]
}
net = Network(
None, 'compose_test', 'net1', 'bridge', ipam=ipam_config,
)
check_remote_network_config(
{
'Driver': 'bridge',
'Attachable': True,
'IPAM': {
'Driver': 'default',
'Config': [{
'Subnet': '156.0.0.1/25',
'Gateway': '156.0.0.1',
'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
'IPRange': '156.0.0.1-254'
}, {
'Subnet': '172.0.0.1/16',
'Gateway': '172.0.0.1'
}],
'Options': None
},
},
net
)
@mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_labels_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay', labels={
'com.project.touhou.character': 'sakuya.izayoi'
})
remote = {
'Driver': 'overlay',
'Options': None,
'Labels': {
'com.docker.compose.network': 'net1',
'com.docker.compose.project': 'compose_test',
'com.project.touhou.character': 'marisa.kirisame',
}
}
with mock.patch('compose.network.log') as mock_log:
check_remote_network_config(remote, net)
mock_log.warning.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warning.mock_calls[0]
assert 'label "com.project.touhou.character" has changed' in args[0]
def test_remote_config_labels_none(self):
remote = {'Labels': None}
local = Network(None, 'test_project', 'test_network')
check_remote_network_config(remote, local)
| apache-2.0 | 8,827,114,187,849,751,000 | 35.552326 | 83 | 0.472721 | false | 3.861794 | true | false | false |
BT-ojossen/sale-workflow | __unported__/stock_picking_reorder_lines/stock.py | 15 | 2645 | # -*- coding: utf-8 -*-
#
#
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm, fields
class stock_move(orm.Model):
_inherit = 'stock.move'
_columns = {
'sequence': fields.integer('Sequence',
help="Gives the sequence of this line when "
"displaying the picking."),
}
_order = 'date_expected desc, sequence, id'
_defaults = {'sequence': 10,
}
class stock_picking(orm.Model):
_inherit = 'stock.picking'
def _prepare_invoice_line(
self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None
):
res = super(stock_picking, self)._prepare_invoice_line(cr, uid,
group,
picking,
move_line,
invoice_id,
invoice_vals,
context)
res['sequence'] = move_line.sequence
return res
class sale_order(orm.Model):
_inherit = 'sale.order'
def _prepare_order_line_move(
self, cr, uid, order, line, picking_id, date_planned, context=None
):
res = super(sale_order, self)._prepare_order_line_move(cr, uid,
order,
line,
picking_id,
date_planned,
context)
res['sequence'] = line.sequence
return res
| agpl-3.0 | -6,019,221,204,846,369,000 | 37.897059 | 79 | 0.471078 | false | 5.106178 | false | false | false |
googlegenomics/gcp-variant-transforms | gcp_variant_transforms/beam_io/vcfio_test.py | 1 | 47862 | # This Python file uses the following encoding: utf-8
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vcfio module."""
import glob
import gzip
import logging
import os
import tempfile
import unittest
import apache_beam as beam
from apache_beam.io.filesystem import CompressionTypes
import apache_beam.io.source_test_utils as source_test_utils
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from gcp_variant_transforms.testing import asserts
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.beam_io.vcfio import _VcfSource as VcfSource
from gcp_variant_transforms.beam_io.vcfio import ReadAllFromVcf
from gcp_variant_transforms.beam_io.vcfio import ReadFromVcf
from gcp_variant_transforms.beam_io.vcfio import Variant
from gcp_variant_transforms.beam_io.vcfio import VariantCall
from gcp_variant_transforms.beam_io.vcfio import SampleNameEncoding
from gcp_variant_transforms.testing import testdata_util
from gcp_variant_transforms.testing.temp_dir import TempDir
# Note: mixing \n and \r\n to verify both behaviors.
_SAMPLE_HEADER_LINES = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="Genotype Quality">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample1\tSample2\r'
'\n',
]
_SAMPLE_TEXT_LINES = [
'20\t14370\t.\tG\tA\t29\tPASS\tAF=0.5\tGT:GQ\t0|0:48\t1|0:48\n',
'20\t17330\t.\tT\tA\t3\tq10\tAF=0.017\tGT:GQ\t0|0:49\t0|1:3\n',
'20\t1110696\t.\tA\tG,T\t67\tPASS\tAF=0.3,0.7\tGT:GQ\t1|2:21\t2|1:2\n',
'20\t1230237\t.\tT\t.\t47\tPASS\t.\tGT:GQ\t0|0:54\t0|0:48\n',
'19\t1234567\t.\tGTCT\tG,GTACT\t50\tPASS\t.\tGT:GQ\t0/1:35\t0/2:17\n',
'20\t1234\trs123\tC\tA,T\t50\tPASS\tAF=0.5\tGT:GQ\t0/0:48\t1/0:20\n',
'19\t123\trs1234\tGTC\t.\t40\tq10;s50\tNS=2\tGT:GQ\t1|0:48\t0/1:.\n',
'19\t12\t.\tC\t<SYMBOLIC>\t49\tq10\tAF=0.5\tGT:GQ\t0|1:45\t.:.\n'
]
hash_name = testdata_util.hash_name
VCF_LINE_1 = ('20 1234 rs123;rs2 C A,T 50 '
'PASS AF=0.5,0.1;NS=1;SVTYPE=BÑD GT:GQ 0/0:48 1/0:20\n')
VCF_LINE_2 = '19 123 rs1234 GTC . 40 q10;s50 NS=2 GT:GQ .|0:48 0/.:.\n'
VCF_LINE_3 = (
'19 12 . C <SYMBOLIC> 49 q10 AF=0.5 GT:PS:GQ 0|1:1:45 .:.:.\n')
GVCF_LINE = '19 1234 . C <NON_REF> 50 . END=1236 GT:GQ 0/0:99\n'
def _get_hashing_function(file_name, use_hashing):
def _hash_name_method(sample_name):
return sample_name if not use_hashing else hash_name(sample_name, file_name)
return _hash_name_method
def _get_sample_variant_1(file_name='', use_1_based_coordinate=False,
use_hashing=True, move_hom_ref_calls=False):
"""Get first sample variant.
Features:
multiple alternates
not phased
multiple names
utf-8 encoded
"""
hash_name_method = _get_hashing_function(file_name, use_hashing)
variant = vcfio.Variant(
reference_name='20', start=1233 + use_1_based_coordinate, end=1234,
reference_bases='C', alternate_bases=['A', 'T'], names=['rs123', 'rs2'],
quality=50, filters=['PASS'],
hom_ref_calls=([('Sample1', hash_name_method('Sample1'))] if
move_hom_ref_calls else None),
info={'AF': [0.5, 0.1], 'NS': 1, 'SVTYPE': ['BÑD']})
if not move_hom_ref_calls:
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample1'), name='Sample1',
genotype=[0, 0], info={'GQ': 48}))
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample2'), name='Sample2',
genotype=[1, 0], info={'GQ': 20}))
return variant
def _get_sample_variant_2(file_name='', use_1_based_coordinate=False,
use_hashing=True, move_hom_ref_calls=False):
"""Get second sample variant.
Features:
multiple references
no alternate
phased
multiple filters
missing format field
"""
hash_name_method = _get_hashing_function(file_name, use_hashing)
variant = vcfio.Variant(
reference_name='19',
start=122 + use_1_based_coordinate, end=125, reference_bases='GTC',
alternate_bases=[], names=['rs1234'], quality=40,
filters=['q10', 's50'], hom_ref_calls=[] if move_hom_ref_calls else None,
info={'NS': 2})
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample1'), name='Sample1',
genotype=[-1, 0], phaseset=vcfio.DEFAULT_PHASESET_VALUE,
info={'GQ': 48}))
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample2'), name='Sample2',
genotype=[0, -1], info={'GQ': None}))
return variant
def _get_sample_variant_3(file_name='', use_1_based_coordinate=False,
use_hashing=True, move_hom_ref_calls=False):
"""Get third sample variant.
Features:
symbolic alternate
no calls for sample 2
alternate phaseset
"""
hash_name_method = _get_hashing_function(file_name, use_hashing)
variant = vcfio.Variant(
reference_name='19', start=11 + use_1_based_coordinate, end=12,
reference_bases='C', alternate_bases=['<SYMBOLIC>'], quality=49,
filters=['q10'], hom_ref_calls=[] if move_hom_ref_calls else None,
info={'AF': [0.5]})
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample1'), name='Sample1',
genotype=[0, 1], phaseset='1', info={'GQ': 45}))
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample2'), name='Sample2',
genotype=[vcfio.MISSING_GENOTYPE_VALUE],
info={'GQ': None}))
return variant
def _get_sample_non_variant(use_1_based_coordinate=False):
"""Get sample non variant."""
non_variant = vcfio.Variant(
reference_name='19', start=1233 + use_1_based_coordinate, end=1236,
reference_bases='C', alternate_bases=['<NON_REF>'], quality=50)
non_variant.calls.append(
vcfio.VariantCall(sample_id=hash_name('Sample1'), name='Sample1',
genotype=[0, 0], info={'GQ': 99}))
return non_variant
class VcfSourceTest(unittest.TestCase):
VCF_FILE_DIR_MISSING = not os.path.exists(testdata_util.get_full_dir())
def _create_temp_vcf_file(
self, lines, tempdir, compression_type=CompressionTypes.UNCOMPRESSED):
if compression_type in (CompressionTypes.UNCOMPRESSED,
CompressionTypes.AUTO):
suffix = '.vcf'
elif compression_type == CompressionTypes.GZIP:
suffix = '.vcf.gz'
elif compression_type == CompressionTypes.BZIP2:
suffix = '.vcf.bz2'
else:
raise ValueError('Unrecognized compression type {}'.format(
compression_type))
return tempdir.create_temp_file(
suffix=suffix, lines=lines, compression_type=compression_type)
def _read_records(self, file_or_pattern, representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
**kwargs):
return source_test_utils.read_from_source(
VcfSource(file_or_pattern,
representative_header_lines=representative_header_lines,
sample_name_encoding=sample_name_encoding,
**kwargs))
def _create_temp_file_and_read_records(
self, lines, representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH):
return self._create_temp_file_and_return_records_with_file_name(
lines, representative_header_lines, sample_name_encoding)[0]
def _create_temp_file_and_return_records_with_file_name(
self, lines, representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH):
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.vcf', lines=lines)
return (self._read_records(file_name, representative_header_lines,
sample_name_encoding), file_name)
def _assert_variants_equal(self, actual, expected):
self.assertEqual(
sorted(expected),
sorted(actual))
def _get_invalid_file_contents(self):
"""Gets sample invalid files contents.
Returns:
A `tuple` where the first element is contents that are invalid because
of record errors and the second element is contents that are invalid
because of header errors.
"""
malformed_vcf_records = [
# POS should be an integer.
[
'##FILTER=<ID=PASS,Description="All filters passed">\n',
'##FILTER=<ID=q10,Description="Quality is less than 10.">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample\n',
'19\tabc\trs12345\tT\tC\t9\tq10\tAF=0.2;NS=2\tGT:GQ\t1|0:48\n',
]
]
return malformed_vcf_records #, malformed_header_lines
def _assert_pipeline_read_files_record_count_equal(
self, input_pattern, expected_count, use_read_all=False):
"""Helper method for verifying total records read.
Args:
input_pattern (str): Input file pattern to read.
expected_count (int): Expected number of reacords that was read.
use_read_all (bool): Whether to use the scalable ReadAllFromVcf transform
instead of ReadFromVcf.
"""
pipeline = TestPipeline()
if use_read_all:
pcoll = (pipeline
| 'Create' >> beam.Create([input_pattern])
| 'Read' >> ReadAllFromVcf())
else:
pcoll = pipeline | 'Read' >> ReadFromVcf(input_pattern)
assert_that(pcoll, asserts.count_equals_to(expected_count))
pipeline.run()
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_single_file_large(self):
test_data_conifgs = [
{'file': 'valid-4.0.vcf', 'num_records': 5},
{'file': 'valid-4.0.vcf.gz', 'num_records': 5},
{'file': 'valid-4.0.vcf.bz2', 'num_records': 5},
{'file': 'valid-4.1-large.vcf', 'num_records': 9882},
{'file': 'valid-4.2.vcf', 'num_records': 13},
]
for config in test_data_conifgs:
read_data = self._read_records(
testdata_util.get_full_file_path(config['file']))
self.assertEqual(config['num_records'], len(read_data))
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_file_pattern_large(self):
read_data = self._read_records(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf'))
self.assertEqual(9900, len(read_data))
read_data_gz = self._read_records(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf.gz'))
self.assertEqual(9900, len(read_data_gz))
def test_single_file_no_records(self):
for content in [[''], [' '], ['', ' ', '\n'], ['\n', '\r\n', '\n']]:
self.assertEqual([], self._create_temp_file_and_read_records(
content, _SAMPLE_HEADER_LINES))
def test_single_file_1_based_verify_details(self):
variant = _get_sample_variant_1(use_1_based_coordinate=True)
read_data = None
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_1])
read_data = source_test_utils.read_from_source(
VcfSource(file_name,
representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
use_1_based_coordinate=True))
self.assertEqual(1, len(read_data))
self.assertEqual(variant, read_data[0])
def test_file_pattern_move_hom_ref_calls_verify_details(self):
variant_1 = _get_sample_variant_1(move_hom_ref_calls=True)
variant_2 = _get_sample_variant_2(move_hom_ref_calls=True)
variant_3 = _get_sample_variant_3(move_hom_ref_calls=True)
with TempDir() as tempdir:
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_1])
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_2, VCF_LINE_3])
read_data = source_test_utils.read_from_source(
VcfSource(os.path.join(tempdir.get_path(), '*.vcf'),
representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
move_hom_ref_calls=True))
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_file_pattern_1_based_verify_details(self):
variant_1 = _get_sample_variant_1(use_1_based_coordinate=True)
variant_2 = _get_sample_variant_2(use_1_based_coordinate=True)
variant_3 = _get_sample_variant_3(use_1_based_coordinate=True)
with TempDir() as tempdir:
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_1])
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_2, VCF_LINE_3])
read_data = source_test_utils.read_from_source(
VcfSource(os.path.join(tempdir.get_path(), '*.vcf'),
representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
use_1_based_coordinate=True))
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details(self):
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1])
variant_1 = _get_sample_variant_1()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1])
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
variant_2 = _get_sample_variant_2()
variant_3 = _get_sample_variant_3()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3])
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_file_pattern_verify_details(self):
variant_1 = _get_sample_variant_1()
variant_2 = _get_sample_variant_2()
variant_3 = _get_sample_variant_3()
with TempDir() as tempdir:
self._create_temp_vcf_file(_SAMPLE_HEADER_LINES + [VCF_LINE_1], tempdir)
self._create_temp_vcf_file((_SAMPLE_HEADER_LINES +
[VCF_LINE_2, VCF_LINE_3]),
tempdir)
read_data = self._read_records(os.path.join(tempdir.get_path(), '*.vcf'))
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details_encoded_sample_name_without_file(self):
variant_1 = _get_sample_variant_1()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1],
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH)
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
variant_2 = _get_sample_variant_2()
variant_3 = _get_sample_variant_3()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3],
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH)
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details_encoded_sample_name_with_file(self):
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1],
sample_name_encoding=SampleNameEncoding.WITH_FILE_PATH))
variant_1 = _get_sample_variant_1(file_name)
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3],
sample_name_encoding=SampleNameEncoding.WITH_FILE_PATH))
variant_1 = _get_sample_variant_1(file_name)
variant_2 = _get_sample_variant_2(file_name)
variant_3 = _get_sample_variant_3(file_name)
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details_without_encoding(self):
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1],
sample_name_encoding=SampleNameEncoding.NONE))
variant_1 = _get_sample_variant_1(file_name='', use_hashing=False)
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3],
sample_name_encoding=SampleNameEncoding.NONE))
variant_1 = _get_sample_variant_1(file_name='', use_hashing=False)
variant_2 = _get_sample_variant_2(file_name='Name1', use_hashing=False)
variant_3 = _get_sample_variant_3(file_name=file_name, use_hashing=False)
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_after_splitting(self):
file_name = testdata_util.get_full_file_path('valid-4.1-large.vcf')
source = VcfSource(file_name)
splits = list(p for p in source.split(desired_bundle_size=500))
self.assertGreater(len(splits), 1)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
self.assertGreater(len(sources_info), 1)
split_records = []
for source_info in sources_info:
split_records.extend(source_test_utils.read_from_source(*source_info))
self.assertEqual(9882, len(split_records))
def test_invalid_file(self):
invalid_file_contents = self._get_invalid_file_contents()
for content in invalid_file_contents:
with TempDir() as tempdir, self.assertRaises(ValueError):
self._read_records(self._create_temp_vcf_file(content, tempdir))
self.fail('Invalid VCF file must throw an exception')
# Try with multiple files (any one of them will throw an exception).
with TempDir() as tempdir, self.assertRaises(ValueError):
for content in invalid_file_contents:
self._create_temp_vcf_file(content, tempdir)
self._read_records(os.path.join(tempdir.get_path(), '*.vcf'))
def test_allow_malformed_records(self):
invalid_records = self._get_invalid_file_contents()
# Invalid records should not raise errors
for content in invalid_records:
with TempDir() as tempdir:
self._read_records(self._create_temp_vcf_file(content, tempdir),
allow_malformed_records=True)
def test_no_samples(self):
header_line = '#CHROM POS ID REF ALT QUAL FILTER INFO\n'
record_line = '19 123 . G A . PASS AF=0.2'
expected_variant = Variant(
reference_name='19', start=122, end=123, reference_bases='G',
alternate_bases=['A'], filters=['PASS'], info={'AF': [0.2]})
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES[:-1] + [header_line, record_line])
self.assertEqual(1, len(read_data))
self.assertEqual(expected_variant, read_data[0])
def test_no_info(self):
record_line = 'chr19 123 . . . . . . GT . .'
expected_variant = Variant(reference_name='chr19', start=122, end=123)
expected_variant.calls.append(
VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[vcfio.MISSING_GENOTYPE_VALUE]))
expected_variant.calls.append(
VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[vcfio.MISSING_GENOTYPE_VALUE]))
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [record_line])
self.assertEqual(1, len(read_data))
self.assertEqual(expected_variant, read_data[0])
def test_info_numbers_and_types(self):
info_headers = [
'##INFO=<ID=HA,Number=A,Type=String,Description="StringInfo_A">\n',
'##INFO=<ID=HG,Number=G,Type=Integer,Description="IntInfo_G">\n',
'##INFO=<ID=HR,Number=R,Type=Character,Description="ChrInfo_R">\n',
'##INFO=<ID=HF,Number=0,Type=Flag,Description="FlagInfo">\n',
'##INFO=<ID=HU,Number=.,Type=Float,Description="FloatInfo_variable">\n']
record_lines = [
'19 2 . A T,C . . HA=a1,a2;HG=1,2,3;HR=a,b,c;HF;HU=0.1 GT 1/0 0/1\n',
'19 124 . A T . . HG=3,4,5;HR=d,e;HU=1.1,1.2 GT 0/0 0/1']
variant_1 = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T', 'C'],
info={'HA': ['a1', 'a2'], 'HG': [1, 2, 3], 'HR': ['a', 'b', 'c'],
'HF': True, 'HU': [0.1]})
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
variant_2 = Variant(
reference_name='19', start=123, end=124, reference_bases='A',
alternate_bases=['T'],
info={'HG': [3, 4, 5], 'HR': ['d', 'e'], 'HU': [1.1, 1.2]})
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 0]))
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
read_data = self._create_temp_file_and_read_records(
info_headers + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(2, len(read_data))
self._assert_variants_equal([variant_1, variant_2], read_data)
def test_use_of_representative_header(self):
# Info field `HU` is defined as Float in file header while data is String.
# This results in parser failure. We test if parser completes successfully
# when a representative headers with String definition for field `HU` is
# given.
file_content = [
'##INFO=<ID=HU,Number=.,Type=Float,Description="Info">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\r\n',
'19 2 . A T . . HU=a,b GT 0/0 0/1\n',]
representative_header_lines = [
'##INFO=<ID=HU,Number=.,Type=String,Description="Info">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',]
variant = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T'], info={'HU': ['a', 'b']})
variant.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 0]))
variant.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
# `file_headers` is used.
read_data = self._create_temp_file_and_read_records(file_content)
# Pysam expects Float value for HU, and returns Nones when list is given.
self.assertEqual([None, None], read_data[0].info['HU'])
# `representative_header` is used.
read_data = self._create_temp_file_and_read_records(
file_content, representative_header_lines)
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant], read_data)
def test_use_of_representative_header_two_files(self):
# Info field `HU` is defined as Float in file header while data is String.
# This results in parser failure. We test if parser completes successfully
# when a representative headers with String definition for field `HU` is
# given.
file_content_1 = [
'##INFO=<ID=HU,Number=.,Type=Float,Descri\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample1\r\n',
'9\t2\t.\tA\tT\t.\t.\tHU=a,b\tGT\t0/0']
file_content_2 = [
'##INFO=<ID=HU,Number=.,Type=Float,Descri\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample2\r\n',
'19\t2\t.\tA\tT\t.\t.\tHU=a,b\tGT\t0/1\n',]
representative_header_lines = [
'##INFO=<ID=HU,Number=.,Type=String,Description="Info">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',]
variant_1 = Variant(
reference_name='9', start=1, end=2, reference_bases='A',
alternate_bases=['T'], info={'HU': ['a', 'b']})
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1', genotype=[0, 0]))
variant_2 = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T'], info={'HU': ['a', 'b']})
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2', genotype=[0, 1]))
read_data_1 = self._create_temp_file_and_read_records(
file_content_1, representative_header_lines)
self.assertEqual(1, len(read_data_1))
self._assert_variants_equal([variant_1], read_data_1)
read_data_2 = self._create_temp_file_and_read_records(
file_content_2, representative_header_lines)
self.assertEqual(1, len(read_data_2))
self._assert_variants_equal([variant_2], read_data_2)
def test_end_info_key(self):
end_info_header_line = (
'##INFO=<ID=END,Number=1,Type=Integer,Description="End of record.">\n')
record_lines = ['19 123 . A T . . END=1111 GT 1/0 0/1\n',
'19 123 . A T . . . GT 0/1 1/1\n']
variant_1 = Variant(
reference_name='19', start=122, end=1111, reference_bases='A',
alternate_bases=['T'])
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
variant_2 = Variant(
reference_name='19', start=122, end=123, reference_bases='A',
alternate_bases=['T'])
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 1]))
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[1, 1]))
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(2, len(read_data))
self._assert_variants_equal([variant_1, variant_2], read_data)
def test_end_info_key_unknown_number(self):
end_info_header_line = (
'##INFO=<ID=END,Number=.,Type=Integer,Description="End of record.">\n')
record_lines = ['19 123 . A T . . END=1111 GT 1/0 0/1\n']
variant_1 = Variant(
reference_name='19', start=122, end=1111, reference_bases='A',
alternate_bases=['T'])
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant_1], read_data)
def test_end_info_key_unknown_number_invalid(self):
end_info_header_line = (
'##INFO=<ID=END,Number=.,Type=Integer,Description="End of record.">\n')
# PySam should only take first END field.
variant = Variant(
reference_name='19', start=122, end=150, reference_bases='A',
alternate_bases=['T'])
variant.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] +
['19 123 . A T . . END=150,160 GT 1/0 0/1\n'])
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant], read_data)
# END should be rounded down.
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] +
['19 123 . A T . . END=150.9 GT 1/0 0/1\n'])
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant], read_data)
# END should not be a string.
with self.assertRaises(ValueError):
self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] +
['19 123 . A T . . END=text GT 1/0 0/1\n'])
def test_custom_phaseset(self):
phaseset_header_line = (
'##FORMAT=<ID=PS,Number=1,Type=Integer,Description="Phaseset">\n')
record_lines = ['19 123 . A T . . . GT:PS 1|0:1111 0/1:.\n',
'19 121 . A T . . . GT:PS 1|0:2222 0/1:2222\n']
variant_1 = Variant(
reference_name='19', start=122, end=123, reference_bases='A',
alternate_bases=['T'])
variant_1.calls.append(
VariantCall(sample_id=hash_name('Sample1'), name='Sample1',
genotype=[1, 0], phaseset='1111'))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2', genotype=[0, 1]))
variant_2 = Variant(
reference_name='19', start=120, end=121, reference_bases='A',
alternate_bases=['T'])
variant_2.calls.append(
VariantCall(sample_id=hash_name('Sample1'), name='Sample1',
genotype=[1, 0], phaseset='2222'))
variant_2.calls.append(
VariantCall(sample_id=hash_name('Sample2'), name='Sample2',
genotype=[0, 1], phaseset='2222'))
read_data = self._create_temp_file_and_read_records(
[phaseset_header_line] + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(2, len(read_data))
self._assert_variants_equal([variant_1, variant_2], read_data)
def test_format_numbers(self):
format_headers = [
'##FORMAT=<ID=FU,Number=.,Type=String,Description="Format_variable">\n',
'##FORMAT=<ID=F1,Number=1,Type=Integer,Description="Format_1">\n',
'##FORMAT=<ID=F2,Number=2,Type=Character,Description="Format_2">\n',
'##FORMAT=<ID=AO,Number=A,Type=Integer,Description="Format_3">\n',
'##FORMAT=<ID=AD,Number=G,Type=Integer,Description="Format_4">\n',]
record_lines = [
('19 2 . A T,C . . . '
'GT:FU:F1:F2:AO:AD 1/0:a1:3:a,b:1:3,4 '
'0/1:a2,a3:4:b,c:1,2:3')]
expected_variant = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T', 'C'])
expected_variant.calls.append(VariantCall(
sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0],
info={'FU': ['a1'], 'F1': 3, 'F2': ['a', 'b'], 'AO': [1],
'AD': [3, 4]}))
expected_variant.calls.append(VariantCall(
sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1],
info={'FU': ['a2', 'a3'], 'F1': 4, 'F2': ['b', 'c'], 'AO': [1, 2],
'AD':[3]}))
read_data = self._create_temp_file_and_read_records(
format_headers + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(1, len(read_data))
self.assertEqual(expected_variant, read_data[0])
def test_pipeline_read_single_file(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
self._assert_pipeline_read_files_record_count_equal(
file_name, len(_SAMPLE_TEXT_LINES))
def test_pipeline_read_all_single_file(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
self._assert_pipeline_read_files_record_count_equal(
file_name, len(_SAMPLE_TEXT_LINES), use_read_all=True)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_single_file_large(self):
self._assert_pipeline_read_files_record_count_equal(
testdata_util.get_full_file_path('valid-4.1-large.vcf'), 9882)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_single_file_large(self):
self._assert_pipeline_read_files_record_count_equal(
testdata_util.get_full_file_path('valid-4.1-large.vcf'), 9882)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_file_pattern_large(self):
self._assert_pipeline_read_files_record_count_equal(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf'), 9900)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_file_pattern_large(self):
self._assert_pipeline_read_files_record_count_equal(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf'), 9900)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_gzip_large(self):
self._assert_pipeline_read_files_record_count_equal(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf.gz'), 9900,
use_read_all=True)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_multiple_files_large(self):
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create(
[testdata_util.get_full_file_path('valid-4.0.vcf'),
testdata_util.get_full_file_path('valid-4.1-large.vcf'),
testdata_util.get_full_file_path('valid-4.2.vcf')])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(9900))
pipeline.run()
def test_pipeline_read_all_gzip(self):
with TempDir() as tempdir:
file_name_1 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.GZIP)
file_name_2 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.GZIP)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([file_name_1, file_name_2])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(2 * len(_SAMPLE_TEXT_LINES)))
pipeline.run()
def test_pipeline_read_all_bzip2(self):
with TempDir() as tempdir:
file_name_1 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.BZIP2)
file_name_2 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.BZIP2)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([file_name_1, file_name_2])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(2 * len(_SAMPLE_TEXT_LINES)))
pipeline.run()
def test_pipeline_read_all_multiple_files(self):
with TempDir() as tempdir:
file_name_1 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
file_name_2 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([file_name_1, file_name_2])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(2 * len(_SAMPLE_TEXT_LINES)))
pipeline.run()
def test_read_reentrant_without_splitting(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
source = VcfSource(file_name)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reentrant_after_splitting(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
source = VcfSource(file_name)
splits = list(split for split in source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_dynamic_work_rebalancing(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
source = VcfSource(file_name)
splits = list(split for split in source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
class VcfSinkTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.path = tempfile.NamedTemporaryFile(suffix='.vcf').name
self.variants, self.variant_lines = list(zip(
(_get_sample_variant_1(), VCF_LINE_1),
(_get_sample_variant_2(), VCF_LINE_2),
(_get_sample_variant_3(), VCF_LINE_3),
(_get_sample_non_variant(), GVCF_LINE)))
def _assert_variant_lines_equal(self, actual, expected):
actual_fields = actual.strip().split('\t')
expected_fields = expected.strip().split('\t')
self.assertEqual(len(actual_fields), len(expected_fields))
self.assertEqual(actual_fields[0], expected_fields[0])
self.assertEqual(actual_fields[1], expected_fields[1])
self.assertCountEqual(actual_fields[2].split(';'),
expected_fields[2].split(';'))
self.assertEqual(actual_fields[3], expected_fields[3])
self.assertCountEqual(actual_fields[4].split(','),
expected_fields[4].split(','))
self.assertEqual(actual_fields[5], actual_fields[5])
self.assertCountEqual(actual_fields[6].split(';'),
expected_fields[6].split(';'))
self.assertCountEqual(actual_fields[7].split(';'),
expected_fields[7].split(';'))
self.assertCountEqual(actual_fields[8].split(':'),
expected_fields[8].split(':'))
# Assert calls are the same
for call, expected_call in zip(actual_fields[9:], expected_fields[9:]):
actual_split = call.split(':')
expected_split = expected_call.split(':')
# Compare the first and third values of the GT field
self.assertEqual(actual_split[0], expected_split[0])
# Compare the rest of the items ignoring order
self.assertCountEqual(actual_split[1:], expected_split[1:])
def _get_coder(self, bq_uses_1_based_coordinate=False):
return vcfio._ToVcfRecordCoder(bq_uses_1_based_coordinate)
def test_to_vcf_line_0_based(self):
coder = self._get_coder()
for variant, line in zip(self.variants, self.variant_lines):
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), line)
empty_variant = vcfio.Variant()
empty_line = '\t'.join(['.' for _ in range(9)])
self._assert_variant_lines_equal(
coder.encode(empty_variant).decode('utf-8'), empty_line)
def test_to_vcf_line_1_based(self):
coder = self._get_coder(bq_uses_1_based_coordinate=True)
variants = [
_get_sample_variant_1(use_1_based_coordinate=True),
_get_sample_variant_2(use_1_based_coordinate=True),
_get_sample_variant_3(use_1_based_coordinate=True),
_get_sample_non_variant(use_1_based_coordinate=True)]
for variant, line in zip(variants, self.variant_lines):
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), line)
empty_variant = vcfio.Variant()
empty_line = '\t'.join(['.' for _ in range(9)])
self._assert_variant_lines_equal(
coder.encode(empty_variant).decode('utf-8'), empty_line)
def test_missing_info_key(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 1],
info={'GQ': 10, 'AF': 20}))
variant.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2', genotype=[0, 1],
info={'AF': 20}))
expected = ('. . . . . . . . GT:AF:GQ 0/1:20:10 '
'0/1:20:.\n')
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_info_list(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(VariantCall(sample_id=hash_name('Sample'),
name='Sample',
genotype=[0, 1],
info={'LI': [1, None, 3]}))
expected = '. . . . . . . . GT:LI 0/1:1,.,3\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_info_field_count(self):
coder = self._get_coder()
variant = Variant()
variant.info['NS'] = 3
variant.info['AF'] = [0.333, 0.667]
variant.info['DB'] = True
variant.info['CSQ'] = ['G|upstream_gene_variant||MODIFIER',
'T|||MODIFIER']
expected = ('. . . . . . . NS=3;AF=0.333,0.667;DB;'
'CSQ=G|upstream_gene_variant||MODIFIER,T|||MODIFIER .\n')
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_empty_sample_calls(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(
VariantCall(sample_id=hash_name('Sample2'), name='Sample2',
genotype=-1))
expected = '. . . . . . . . GT .\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_missing_genotype(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(
VariantCall(sample_id=hash_name('Sample'), name='Sample',
genotype=[1, vcfio.MISSING_GENOTYPE_VALUE]))
expected = '. . . . . . . . GT 1/.\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_triploid_genotype(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(VariantCall(
sample_id=hash_name('Sample'), name='Sample', genotype=[1, 0, 1]))
expected = '. . . . . . . . GT 1/0/1\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_write_dataflow_0_based(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.Create(self.variants, reshuffle=False)
_ = pcoll | 'Write' >> vcfio.WriteToVcf(
self.path, bq_uses_1_based_coordinate=False)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
for actual, expected in zip(read_result, self.variant_lines):
self._assert_variant_lines_equal(actual, expected)
def test_write_dataflow_1_based(self):
variants = [
_get_sample_variant_1(use_1_based_coordinate=True),
_get_sample_variant_2(use_1_based_coordinate=True),
_get_sample_variant_3(use_1_based_coordinate=True),
_get_sample_non_variant(use_1_based_coordinate=True)]
pipeline = TestPipeline()
pcoll = pipeline | beam.Create(variants, reshuffle=False)
_ = pcoll | 'Write' >> vcfio.WriteToVcf(self.path)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
for actual, expected in zip(read_result, self.variant_lines):
self._assert_variant_lines_equal(actual, expected)
def test_write_dataflow_auto_compression(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.Create(self.variants, reshuffle=False)
_ = pcoll | 'Write' >> vcfio.WriteToVcf(
self.path + '.gz',
compression_type=CompressionTypes.AUTO,
bq_uses_1_based_coordinate=False)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
for actual, expected in zip(read_result, self.variant_lines):
self._assert_variant_lines_equal(actual.decode('utf-8'), expected)
def test_write_dataflow_header(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.Create(self.variants, reshuffle=False)
headers = ['foo\n']
_ = pcoll | 'Write' >> vcfio.WriteToVcf(
self.path + '.gz',
compression_type=CompressionTypes.AUTO,
headers=headers,
bq_uses_1_based_coordinate=False)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result[0].decode('utf-8'), 'foo')
for actual, expected in zip(read_result[1:], self.variant_lines):
self._assert_variant_lines_equal(actual.decode('utf-8'), expected)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 | -4,279,753,839,158,893,600 | 43.52093 | 80 | 0.618053 | false | 3.220727 | true | false | false |
i-rabot/tractogithub | tracformatter/trac/wiki/web_ui.py | 1 | 33389 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <[email protected]>
# Copyright (C) 2004-2005 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <[email protected]>
# Christopher Lenz <[email protected]>
from __future__ import with_statement
import pkg_resources
import re
from genshi.builder import tag
from trac.attachment import AttachmentModule
from trac.config import IntOption
from trac.core import *
from trac.mimeview.api import IContentConverter, Mimeview
from trac.perm import IPermissionRequestor
from trac.resource import *
from trac.search import ISearchSource, search_to_sql, shorten_result
from trac.timeline.api import ITimelineEventProvider
from trac.util import get_reporter_id
from trac.util.datefmt import from_utimestamp, to_utimestamp
from trac.util.text import shorten_line
from trac.util.translation import _, tag_
from trac.versioncontrol.diff import get_diff_options, diff_blocks
from trac.web.api import IRequestHandler
from trac.web.chrome import (Chrome, INavigationContributor, ITemplateProvider,
add_ctxtnav, add_link, add_notice, add_script,
add_stylesheet, add_warning, prevnext_nav,
web_context)
from trac.wiki.api import IWikiPageManipulator, WikiSystem, validate_page_name
from trac.wiki.formatter import format_to, OneLinerFormatter
from trac.wiki.model import WikiPage
class InvalidWikiPage(TracError):
"""Exception raised when a Wiki page fails validation.
:deprecated: Not used anymore since 0.11
"""
class WikiModule(Component):
implements(IContentConverter, INavigationContributor, IPermissionRequestor,
IRequestHandler, ITimelineEventProvider, ISearchSource,
ITemplateProvider)
page_manipulators = ExtensionPoint(IWikiPageManipulator)
max_size = IntOption('wiki', 'max_size', 262144,
"""Maximum allowed wiki page size in bytes. (''since 0.11.2'')""")
PAGE_TEMPLATES_PREFIX = 'PageTemplates/'
DEFAULT_PAGE_TEMPLATE = 'DefaultPage'
# IContentConverter methods
def get_supported_conversions(self):
yield ('txt', _('Plain Text'), 'txt', 'text/x-trac-wiki', 'text/plain',
9)
def convert_content(self, req, mimetype, content, key):
return (content, 'text/plain;charset=utf-8')
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'wiki'
def get_navigation_items(self, req):
if 'WIKI_VIEW' in req.perm('wiki'):
yield ('mainnav', 'wiki',
tag.a(_('Wiki'), href=req.href.wiki(), accesskey=1))
yield ('metanav', 'help',
tag.a(_('Help/Guide'), href=req.href.wiki('TracGuide'),
accesskey=6))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['WIKI_CREATE', 'WIKI_DELETE', 'WIKI_MODIFY', 'WIKI_RENAME',
'WIKI_VIEW']
return actions + [('WIKI_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/wiki(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
req.args['page'] = match.group(1)
return 1
def process_request(self, req):
action = req.args.get('action', 'view')
pagename = req.args.get('page', 'WikiStart')
version = req.args.get('version')
old_version = req.args.get('old_version')
if pagename.startswith('/') or pagename.endswith('/') or \
'//' in pagename:
pagename = re.sub(r'/{2,}', '/', pagename.strip('/'))
req.redirect(req.href.wiki(pagename))
if not validate_page_name(pagename):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=pagename))
page = WikiPage(self.env, pagename)
versioned_page = WikiPage(self.env, pagename, version=version)
req.perm(page.resource).require('WIKI_VIEW')
req.perm(versioned_page.resource).require('WIKI_VIEW')
if version and versioned_page.version != int(version):
raise ResourceNotFound(
_('No version "%(num)s" for Wiki page "%(name)s"',
num=version, name=page.name))
add_stylesheet(req, 'common/css/wiki.css')
if req.method == 'POST':
if action == 'edit':
if 'cancel' in req.args:
req.redirect(req.href.wiki(page.name))
has_collision = int(version) != page.version
for a in ('preview', 'diff', 'merge'):
if a in req.args:
action = a
break
versioned_page.text = req.args.get('text')
valid = self._validate(req, versioned_page)
if action == 'edit' and not has_collision and valid:
return self._do_save(req, versioned_page)
else:
return self._render_editor(req, page, action, has_collision)
elif action == 'delete':
self._do_delete(req, versioned_page)
elif action == 'rename':
return self._do_rename(req, page)
elif action == 'diff':
style, options, diff_data = get_diff_options(req)
contextall = diff_data['options']['contextall']
req.redirect(req.href.wiki(versioned_page.name, action='diff',
old_version=old_version,
version=version,
contextall=contextall or None))
elif action == 'delete':
return self._render_confirm_delete(req, page)
elif action == 'rename':
return self._render_confirm_rename(req, page)
elif action == 'edit':
return self._render_editor(req, page)
elif action == 'diff':
return self._render_diff(req, versioned_page)
elif action == 'history':
return self._render_history(req, versioned_page)
else:
format = req.args.get('format')
if format:
Mimeview(self.env).send_converted(req, 'text/x-trac-wiki',
versioned_page.text,
format, versioned_page.name)
return self._render_view(req, versioned_page)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.wiki', 'templates')]
# Internal methods
def _validate(self, req, page):
valid = True
# Validate page size
if len(req.args.get('text', '')) > self.max_size:
add_warning(req, _('The wiki page is too long (must be less '
'than %(num)s characters)',
num=self.max_size))
valid = False
# Give the manipulators a pass at post-processing the page
for manipulator in self.page_manipulators:
for field, message in manipulator.validate_wiki_page(req, page):
valid = False
if field:
add_warning(req, _("The Wiki page field '%(field)s' is "
"invalid: %(message)s",
field=field, message=message))
else:
add_warning(req, _("Invalid Wiki page: %(message)s",
message=message))
return valid
def _page_data(self, req, page, action=''):
title = get_resource_summary(self.env, page.resource)
if action:
title += ' (%s)' % action
return {'page': page, 'action': action, 'title': title}
def _prepare_diff(self, req, page, old_text, new_text,
old_version, new_version):
diff_style, diff_options, diff_data = get_diff_options(req)
diff_context = 3
for option in diff_options:
if option.startswith('-U'):
diff_context = int(option[2:])
break
if diff_context < 0:
diff_context = None
diffs = diff_blocks(old_text, new_text, context=diff_context,
ignore_blank_lines='-B' in diff_options,
ignore_case='-i' in diff_options,
ignore_space_changes='-b' in diff_options)
def version_info(v, last=0):
return {'path': get_resource_name(self.env, page.resource),
# TRANSLATOR: wiki page
'rev': v or _('currently edited'),
'shortrev': v or last + 1,
'href': req.href.wiki(page.name, version=v) if v else None}
changes = [{'diffs': diffs, 'props': [],
'new': version_info(new_version, old_version),
'old': version_info(old_version)}]
add_stylesheet(req, 'common/css/diff.css')
add_script(req, 'common/js/diff.js')
return diff_data, changes
def _do_delete(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_DELETE')
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, page.resource, req.href))
version = int(req.args.get('version', 0)) or None
old_version = int(req.args.get('old_version', 0)) or version
with self.env.db_transaction as db:
if version and old_version and version > old_version:
# delete from `old_version` exclusive to `version` inclusive:
for v in range(old_version, version):
page.delete(v + 1, db)
else:
# only delete that `version`, or the whole page if `None`
page.delete(version, db)
if not page.exists:
add_notice(req, _("The page %(name)s has been deleted.",
name=page.name))
req.redirect(req.href.wiki())
else:
if version and old_version and version > old_version + 1:
add_notice(req, _('The versions %(from_)d to %(to)d of the '
'page %(name)s have been deleted.',
from_=old_version + 1, to=version, name=page.name))
else:
add_notice(req, _('The version %(version)d of the page '
'%(name)s has been deleted.',
version=version, name=page.name))
req.redirect(req.href.wiki(page.name))
def _do_rename(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_RENAME')
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, page.resource, req.href))
old_name, old_version = page.name, page.version
new_name = req.args.get('new_name', '')
new_name = re.sub(r'/{2,}', '/', new_name.strip('/'))
redirect = req.args.get('redirect')
# verify input parameters
warn = None
if not new_name:
warn = _("A new name is mandatory for a rename.")
elif not validate_page_name(new_name):
warn = _("The new name is invalid (a name which is separated "
"with slashes cannot be '.' or '..').")
elif new_name == old_name:
warn = _("The new name must be different from the old name.")
elif WikiPage(self.env, new_name).exists:
warn = _("The page %(name)s already exists.", name=new_name)
if warn:
add_warning(req, warn)
return self._render_confirm_rename(req, page, new_name)
with self.env.db_transaction as db:
page.rename(new_name)
if redirect:
redirection = WikiPage(self.env, old_name, db=db)
redirection.text = _('See [wiki:"%(name)s"].', name=new_name)
author = get_reporter_id(req)
comment = u'[wiki:"%s@%d" %s] \u2192 [wiki:"%s"].' % (
new_name, old_version, old_name, new_name)
redirection.save(author, comment, req.remote_addr)
req.redirect(req.href.wiki(old_name if redirect else new_name))
def _do_save(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
elif not page.exists:
req.perm(page.resource).require('WIKI_CREATE')
else:
req.perm(page.resource).require('WIKI_MODIFY')
if 'WIKI_ADMIN' in req.perm(page.resource):
# Modify the read-only flag if it has been changed and the user is
# WIKI_ADMIN
page.readonly = int('readonly' in req.args)
try:
page.save(get_reporter_id(req, 'author'), req.args.get('comment'),
req.remote_addr)
add_notice(req, _("Your changes have been saved in version "
"%(version)s.", version=page.version))
req.redirect(get_resource_url(self.env, page.resource, req.href,
version=None))
except TracError:
add_warning(req, _("Page not modified, showing latest version."))
return self._render_view(req, page)
def _render_confirm_delete(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_DELETE')
version = None
if 'delete_version' in req.args:
version = int(req.args.get('version', 0))
old_version = int(req.args.get('old_version') or 0) or version
what = 'multiple' if version and old_version \
and version - old_version > 1 \
else 'single' if version else 'page'
num_versions = 0
new_date = None
old_date = None
for v, t, author, comment, ipnr in page.get_history():
if (v <= version or what == 'page') and new_date is None:
new_date = t
if (v <= old_version and what == 'multiple' or
num_versions > 1 and what == 'single'):
break
num_versions += 1
old_date = t
data = self._page_data(req, page, 'delete')
data.update({'what': what, 'new_version': None, 'old_version': None,
'num_versions': num_versions, 'new_date': new_date,
'old_date': old_date})
if version is not None:
data.update({'new_version': version, 'old_version': old_version})
self._wiki_ctxtnav(req, page)
return 'wiki_delete.html', data, None
def _render_confirm_rename(self, req, page, new_name=None):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_RENAME')
data = self._page_data(req, page, 'rename')
data['new_name'] = new_name if new_name is not None else page.name
self._wiki_ctxtnav(req, page)
return 'wiki_rename.html', data, None
def _render_diff(self, req, page):
if not page.exists:
raise TracError(_('Version %(num)s of page "%(name)s" does not '
'exist',
num=req.args.get('version'), name=page.name))
old_version = req.args.get('old_version')
if old_version:
old_version = int(old_version)
if old_version == page.version:
old_version = None
elif old_version > page.version:
# FIXME: what about reverse diffs?
old_version = page.resource.version
page = WikiPage(self.env, page.name, version=old_version)
req.perm(page.resource).require('WIKI_VIEW')
latest_page = WikiPage(self.env, page.name, version=None)
req.perm(latest_page.resource).require('WIKI_VIEW')
new_version = int(page.version)
date = author = comment = ipnr = None
num_changes = 0
prev_version = next_version = None
for version, t, a, c, i in latest_page.get_history():
if version == new_version:
date = t
author = a or 'anonymous'
comment = c or '--'
ipnr = i or ''
else:
if version < new_version:
num_changes += 1
if not prev_version:
prev_version = version
if old_version is None or version == old_version:
old_version = version
break
else:
next_version = version
if not old_version:
old_version = 0
old_page = WikiPage(self.env, page.name, old_version)
req.perm(old_page.resource).require('WIKI_VIEW')
# -- text diffs
old_text = old_page.text.splitlines()
new_text = page.text.splitlines()
diff_data, changes = self._prepare_diff(req, page, old_text, new_text,
old_version, new_version)
# -- prev/up/next links
if prev_version:
add_link(req, 'prev', req.href.wiki(page.name, action='diff',
version=prev_version),
_('Version %(num)s', num=prev_version))
add_link(req, 'up', req.href.wiki(page.name, action='history'),
_('Page history'))
if next_version:
add_link(req, 'next', req.href.wiki(page.name, action='diff',
version=next_version),
_('Version %(num)s', num=next_version))
data = self._page_data(req, page, 'diff')
data.update({
'change': {'date': date, 'author': author, 'ipnr': ipnr,
'comment': comment},
'new_version': new_version, 'old_version': old_version,
'latest_version': latest_page.version,
'num_changes': num_changes,
'longcol': 'Version', 'shortcol': 'v',
'changes': changes,
'diff': diff_data,
})
prevnext_nav(req, _('Previous Change'), _('Next Change'),
_('Wiki History'))
return 'wiki_diff.html', data, None
def _render_editor(self, req, page, action='edit', has_collision=False):
if has_collision:
if action == 'merge':
page = WikiPage(self.env, page.name, version=None)
req.perm(page.resource).require('WIKI_VIEW')
else:
action = 'collision'
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_MODIFY')
original_text = page.text
comment = req.args.get('comment', '')
if 'text' in req.args:
page.text = req.args.get('text')
elif 'template' in req.args:
template = self.PAGE_TEMPLATES_PREFIX + req.args.get('template')
template_page = WikiPage(self.env, template)
if template_page and template_page.exists and \
'WIKI_VIEW' in req.perm(template_page.resource):
page.text = template_page.text
elif 'version' in req.args:
old_page = WikiPage(self.env, page.name,
version=int(req.args['version']))
req.perm(page.resource).require('WIKI_VIEW')
page.text = old_page.text
comment = _("Reverted to version %(version)s.",
version=req.args['version'])
if action in ('preview', 'diff'):
page.readonly = 'readonly' in req.args
author = get_reporter_id(req, 'author')
defaults = {'editrows': 20}
prefs = dict((key, req.session.get('wiki_%s' % key, defaults.get(key)))
for key in ('editrows', 'sidebyside'))
if 'from_editor' in req.args:
sidebyside = req.args.get('sidebyside') or None
if sidebyside != prefs['sidebyside']:
req.session.set('wiki_sidebyside', int(bool(sidebyside)), 0)
else:
sidebyside = prefs['sidebyside']
if sidebyside:
editrows = max(int(prefs['editrows']),
len(page.text.splitlines()) + 1)
else:
editrows = req.args.get('editrows')
if editrows:
if editrows != prefs['editrows']:
req.session.set('wiki_editrows', editrows,
defaults['editrows'])
else:
editrows = prefs['editrows']
data = self._page_data(req, page, action)
context = web_context(req, page.resource)
data.update({
'author': author,
'comment': comment,
'edit_rows': editrows, 'sidebyside': sidebyside,
'scroll_bar_pos': req.args.get('scroll_bar_pos', ''),
'diff': None,
'attachments': AttachmentModule(self.env).attachment_data(context),
})
if action in ('diff', 'merge'):
old_text = original_text.splitlines() if original_text else []
new_text = page.text.splitlines() if page.text else []
diff_data, changes = self._prepare_diff(
req, page, old_text, new_text, page.version, '')
data.update({'diff': diff_data, 'changes': changes,
'action': 'preview', 'merge': action == 'merge',
'longcol': 'Version', 'shortcol': 'v'})
elif sidebyside and action != 'collision':
data['action'] = 'preview'
self._wiki_ctxtnav(req, page)
Chrome(self.env).add_wiki_toolbars(req)
Chrome(self.env).add_auto_preview(req)
add_script(req, 'common/js/folding.js')
return 'wiki_edit.html', data, None
def _render_history(self, req, page):
"""Extract the complete history for a given page.
This information is used to present a changelog/history for a given
page.
"""
if not page.exists:
raise TracError(_("Page %(name)s does not exist", name=page.name))
data = self._page_data(req, page, 'history')
history = []
for version, date, author, comment, ipnr in page.get_history():
history.append({
'version': version,
'date': date,
'author': author,
'comment': comment,
'ipnr': ipnr
})
data.update({'history': history, 'resource': page.resource})
add_ctxtnav(req, _("Back to %(wikipage)s", wikipage=page.name),
req.href.wiki(page.name))
return 'history_view.html', data, None
def _render_view(self, req, page):
version = page.resource.version
# Add registered converters
if page.exists:
for conversion in Mimeview(self.env).get_supported_conversions(
'text/x-trac-wiki'):
conversion_href = req.href.wiki(page.name, version=version,
format=conversion[0])
# or...
conversion_href = get_resource_url(self.env, page.resource,
req.href, format=conversion[0])
add_link(req, 'alternate', conversion_href, conversion[1],
conversion[3])
data = self._page_data(req, page)
if page.name == 'WikiStart':
data['title'] = ''
ws = WikiSystem(self.env)
context = web_context(req, page.resource)
higher, related = [], []
if not page.exists:
if 'WIKI_CREATE' not in req.perm(page.resource):
raise ResourceNotFound(_('Page %(name)s not found',
name=page.name))
formatter = OneLinerFormatter(self.env, context)
if '/' in page.name:
parts = page.name.split('/')
for i in range(len(parts) - 2, -1, -1):
name = '/'.join(parts[:i] + [parts[-1]])
if not ws.has_page(name):
higher.append(ws._format_link(formatter, 'wiki',
'/' + name, name, False))
else:
name = page.name
name = name.lower()
related = [each for each in ws.pages
if name in each.lower()
and 'WIKI_VIEW' in req.perm('wiki', each)]
related.sort()
related = [ws._format_link(formatter, 'wiki', '/' + each, each,
False)
for each in related]
latest_page = WikiPage(self.env, page.name, version=None)
req.perm(latest_page.resource).require('WIKI_VIEW')
prev_version = next_version = None
if version:
try:
version = int(version)
for hist in latest_page.get_history():
v = hist[0]
if v != version:
if v < version:
if not prev_version:
prev_version = v
break
else:
next_version = v
except ValueError:
version = None
prefix = self.PAGE_TEMPLATES_PREFIX
templates = [template[len(prefix):]
for template in ws.get_pages(prefix)
if 'WIKI_VIEW' in req.perm('wiki', template)]
# -- prev/up/next links
if prev_version:
add_link(req, 'prev',
req.href.wiki(page.name, version=prev_version),
_('Version %(num)s', num=prev_version))
parent = None
if version:
add_link(req, 'up', req.href.wiki(page.name, version=None),
_('View latest version'))
elif '/' in page.name:
parent = page.name[:page.name.rindex('/')]
add_link(req, 'up', req.href.wiki(parent, version=None),
_("View parent page"))
if next_version:
add_link(req, 'next',
req.href.wiki(page.name, version=next_version),
_('Version %(num)s', num=next_version))
# Add ctxtnav entries
if version:
prevnext_nav(req, _('Previous Version'), _('Next Version'),
_('View Latest Version'))
else:
if parent:
add_ctxtnav(req, _('Up'), req.href.wiki(parent))
self._wiki_ctxtnav(req, page)
# Plugin content validation
fields = {'text': page.text}
for manipulator in self.page_manipulators:
manipulator.prepare_wiki_page(req, page, fields)
text = fields.get('text', '')
data.update({
'context': context,
'text': text,
'latest_version': latest_page.version,
'attachments': AttachmentModule(self.env).attachment_data(context),
'default_template': self.DEFAULT_PAGE_TEMPLATE,
'templates': templates,
'version': version,
'higher': higher, 'related': related,
'resourcepath_template': 'wiki_page_path.html',
})
add_script(req, 'common/js/folding.js')
return 'wiki_view.html', data, None
def _wiki_ctxtnav(self, req, page):
"""Add the normal wiki ctxtnav entries."""
add_ctxtnav(req, _('Start Page'), req.href.wiki('WikiStart'))
add_ctxtnav(req, _('Index'), req.href.wiki('TitleIndex'))
if page.exists:
add_ctxtnav(req, _('History'), req.href.wiki(page.name,
action='history'))
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'WIKI_VIEW' in req.perm:
yield ('wiki', _('Wiki changes'))
def get_timeline_events(self, req, start, stop, filters):
if 'wiki' in filters:
wiki_realm = Resource('wiki')
for ts, name, comment, author, version in self.env.db_query("""
SELECT time, name, comment, author, version FROM wiki
WHERE time>=%s AND time<=%s
""", (to_utimestamp(start), to_utimestamp(stop))):
wiki_page = wiki_realm(id=name, version=version)
if 'WIKI_VIEW' not in req.perm(wiki_page):
continue
yield ('wiki', from_utimestamp(ts), author,
(wiki_page, comment))
# Attachments
for event in AttachmentModule(self.env).get_timeline_events(
req, wiki_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
wiki_page, comment = event[3]
if field == 'url':
return context.href.wiki(wiki_page.id, version=wiki_page.version)
elif field == 'title':
name = tag.em(get_resource_name(self.env, wiki_page))
if wiki_page.version > 1:
return tag_('%(page)s edited', page=name)
else:
return tag_('%(page)s created', page=name)
elif field == 'description':
markup = format_to(self.env, None,
context.child(resource=wiki_page), comment)
if wiki_page.version > 1:
diff_href = context.href.wiki(
wiki_page.id, version=wiki_page.version, action='diff')
markup = tag(markup,
' (', tag.a(_('diff'), href=diff_href), ')')
return markup
# ISearchSource methods
def get_search_filters(self, req):
if 'WIKI_VIEW' in req.perm:
yield ('wiki', _('Wiki'))
def get_search_results(self, req, terms, filters):
if not 'wiki' in filters:
return
with self.env.db_query as db:
sql_query, args = search_to_sql(db, ['w1.name', 'w1.author',
'w1.text'], terms)
wiki_realm = Resource('wiki')
for name, ts, author, text in db("""
SELECT w1.name, w1.time, w1.author, w1.text
FROM wiki w1,(SELECT name, max(version) AS ver
FROM wiki GROUP BY name) w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND """ + sql_query, args):
page = wiki_realm(id=name)
if 'WIKI_VIEW' in req.perm(page):
yield (get_resource_url(self.env, page, req.href),
'%s: %s' % (name, shorten_line(text)),
from_utimestamp(ts), author,
shorten_result(text, terms))
# Attachments
for result in AttachmentModule(self.env).get_search_results(
req, wiki_realm, terms):
yield result
| bsd-3-clause | -3,490,118,129,807,611,400 | 40.858793 | 80 | 0.509989 | false | 4.180168 | true | false | false |
tephyr/sqlalchemy_tzaware | demo.py | 1 | 3767 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""Demonstration of TZAwareDateTime composite column for sqlalchemy"""
__author__ = 'Andrew Ittner <[email protected]>'
__copyright__ = "Public Domain (CC0) <http://creativecommons.org/publicdomain/zero/1.0/>"
# stdlib
from datetime import datetime
# sqlalchemy
from sqlalchemy import MetaData, Table, Column, DateTime, Unicode, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import mapper, relation, composite, create_session
# timezone-aware composite column
from tzaware_datetime import TZAwareDateTime
# 3rd-party: dateutil <http://labix.org/python-dateutil>
from dateutil import tz
# demonstration parent table
class InfoMatic(object):
"""sqlalchemy main demonstration table: contains basic info, plus a composite TZAwareDateTime column"""
def __init__(self, info=None, tzawaredate=None, expectedoffset=None):
self.info = info
self.tzawaredate = tzawaredate
self.expectedoffset = expectedoffset
def __repr__(self):
return "<InfoMatic('%s', %s, %s)" % (self.info, self.tzawaredate, self.expectedoffset)
def prep_database():
global myengine
# create engine
myengine = create_engine('sqlite:///:memory:', echo=False)
# setup table metadata
metadata = MetaData()
table_infomatic = Table('infomatic', metadata,
Column('id', Integer, primary_key=True),
Column('info', Unicode(255)),
Column('expectedoffset', Integer),
Column('utcdate', DateTime), # for TZAwareDateTime
Column('tzname', Unicode), # for TZAwareDateTime
Column('tzoffset', Integer)) # for TZAwareDateTime
# setup mappings
mapper(InfoMatic, table_infomatic, properties={
'info': table_infomatic.c.info,
'expectedoffset': table_infomatic.c.expectedoffset,
'tzawaredate': composite(TZAwareDateTime,
table_infomatic.c.utcdate,
table_infomatic.c.tzname,
table_infomatic.c.tzoffset)
})
# create all tables
metadata.create_all(myengine)
def run_demo():
"""prep the database, create a session, run some example code"""
global myengine
prep_database()
# create session
session = create_session(bind=myengine, autocommit=True, autoflush=True) #autoflush=True: key!
# create & save info objects
lots_of_dates = [InfoMatic(u"first date", TZAwareDateTime(realdate=datetime.now(tz.tzutc())), 0)]
lots_of_dates.append(InfoMatic(u"null date", TZAwareDateTime(), None))
lots_of_dates.append(InfoMatic(u"PST date",
TZAwareDateTime(realdate=datetime.now(tz.gettz("PST"))),
28800))
lots_of_dates.append(InfoMatic(u"New Zealand date",
TZAwareDateTime(realdate=datetime.now(tz.gettz("Pacific/Auckland"),
))))
session.add_all(lots_of_dates)
# print all objects
info_count = session.query(InfoMatic).count()
print '\tAll infomatic objects (%s)' % info_count
for infomatic in session.query(InfoMatic):
assert isinstance(infomatic, InfoMatic)
if infomatic.tzawaredate is not None:
assert isinstance(infomatic.tzawaredate, TZAwareDateTime)
print infomatic
print '\t', infomatic.info
print '\ttzawaredate.realdate', infomatic.tzawaredate.realdate
print '\ttzawaredate.utcdt', infomatic.tzawaredate.utcdt
session.close()
if __name__ == '__main__':
run_demo()
| cc0-1.0 | 1,341,147,860,588,096,000 | 38.25 | 107 | 0.624635 | false | 3.887513 | false | false | false |
OmniLayer/omnicore | test/functional/test_framework/wallet_util.py | 10 | 4035 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful util functions for testing the wallet"""
from collections import namedtuple
from test_framework.address import (
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.script import (
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
hash160,
sha256,
)
from test_framework.util import hex_str_to_bytes
Key = namedtuple('Key', ['privkey',
'pubkey',
'p2pkh_script',
'p2pkh_addr',
'p2wpkh_script',
'p2wpkh_addr',
'p2sh_p2wpkh_script',
'p2sh_p2wpkh_redeem_script',
'p2sh_p2wpkh_addr'])
Multisig = namedtuple('Multisig', ['privkeys',
'pubkeys',
'p2sh_script',
'p2sh_addr',
'redeem_script',
'p2wsh_script',
'p2wsh_addr',
'p2sh_p2wsh_script',
'p2sh_p2wsh_addr'])
def get_key(node):
"""Generate a fresh key on node
Returns a named tuple of privkey, pubkey and all address and scripts."""
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
pkh = hash160(hex_str_to_bytes(pubkey))
return Key(privkey=node.dumpprivkey(addr),
pubkey=pubkey,
p2pkh_script=CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=CScript([OP_0, pkh]).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=CScript([OP_HASH160, hash160(CScript([OP_0, pkh])), OP_EQUAL]).hex(),
p2sh_p2wpkh_redeem_script=CScript([OP_0, pkh]).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_multisig(node):
"""Generate a fresh 2-of-3 multisig on node
Returns a named tuple of privkeys, pubkeys and all address and scripts."""
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
witness_script = CScript([OP_0, sha256(script_code)])
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
pubkeys=pubkeys,
p2sh_script=CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(),
p2sh_addr=script_to_p2sh(script_code),
redeem_script=script_code.hex(),
p2wsh_script=witness_script.hex(),
p2wsh_addr=script_to_p2wsh(script_code),
p2sh_p2wsh_script=CScript([OP_HASH160, witness_script, OP_EQUAL]).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
def test_address(node, address, **kwargs):
"""Get address info for `address` and test whether the returned values are as expected."""
addr_info = node.getaddressinfo(address)
for key, value in kwargs.items():
if value is None:
if key in addr_info.keys():
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
elif addr_info[key] != value:
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
| mit | -4,905,352,963,345,390,000 | 39.757576 | 118 | 0.558116 | false | 3.457584 | true | false | false |
amenonsen/ansible | lib/ansible/modules/cloud/google/gcp_compute_route_info.py | 5 | 7403 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_route_info
description:
- Gather info for GCP Route
- This module was called C(gcp_compute_route_facts) before Ansible 2.9. The usage
has not changed.
short_description: Gather info for GCP Route
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: get info on a route
gcp_compute_route_info:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
destRange:
description:
- The destination range of outgoing packets that this route applies to.
- Only IPv4 is supported.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
network:
description:
- The network that this route applies to.
returned: success
type: dict
priority:
description:
- The priority of this route. Priority is used to break ties in cases where
there is more than one matching route of equal prefix length.
- In the case of two routes with equal prefix length, the one with the lowest-numbered
priority value wins.
- Default value is 1000. Valid range is 0 through 65535.
returned: success
type: int
tags:
description:
- A list of instance tags to which this route applies.
returned: success
type: list
nextHopGateway:
description:
- URL to a gateway that should handle matching packets.
- 'Currently, you can only specify the internet gateway, using a full or partial
valid URL: * U(https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway)
* projects/project/global/gateways/default-internet-gateway * global/gateways/default-internet-gateway
.'
returned: success
type: str
nextHopInstance:
description:
- URL to an instance that should handle matching packets.
- 'You can specify this as a full or partial URL. For example: * U(https://www.googleapis.com/compute/v1/projects/project/zones/zone/)
instances/instance * projects/project/zones/zone/instances/instance * zones/zone/instances/instance
.'
returned: success
type: dict
nextHopIp:
description:
- Network IP address of an instance that should handle matching packets.
returned: success
type: str
nextHopVpnTunnel:
description:
- URL to a VpnTunnel that should handle matching packets.
returned: success
type: dict
nextHopNetwork:
description:
- URL to a Network that should handle matching packets.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if module._name == 'gcp_compute_route_facts':
module.deprecate("The 'gcp_compute_route_facts' module has been renamed to 'gcp_compute_route_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/routes".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | 7,359,065,495,107,359,000 | 32.958716 | 140 | 0.597596 | false | 4.377883 | false | false | false |
guillermo-carrasco/bcbio-nextgen | bcbio/upload/filesystem.py | 6 | 2308 | """Extract files from processing run into output directory, organized by sample.
"""
import os
import shutil
from bcbio import utils
from bcbio.log import logger
from bcbio.upload import shared
def copy_finfo(finfo, storage_dir, pass_uptodate=False):
"""Copy a file into the output storage directory.
"""
if "sample" in finfo:
out_file = os.path.join(storage_dir, "%s-%s%s%s" % (finfo["sample"], finfo["ext"],
"-" if (".txt" in finfo["type"]) else ".",
finfo["type"]))
else:
out_file = os.path.join(storage_dir, os.path.basename(finfo["path"]))
out_file = os.path.abspath(out_file)
if not shared.up_to_date(out_file, finfo):
logger.info("Storing in local filesystem: %s" % out_file)
shutil.copy(finfo["path"], out_file)
return out_file
if pass_uptodate:
return out_file
def copy_finfo_directory(finfo, storage_dir):
"""Copy a directory into the final output directory.
"""
out_dir = os.path.abspath(os.path.join(storage_dir, finfo["ext"]))
if not shared.up_to_date(out_dir, finfo):
logger.info("Storing directory in local filesystem: %s" % out_dir)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.copytree(finfo["path"], out_dir)
for tmpdir in ["tx", "tmp"]:
if os.path.exists(os.path.join(out_dir, tmpdir)):
shutil.rmtree(os.path.join(out_dir, tmpdir))
os.utime(out_dir, None)
return out_dir
def update_file(finfo, sample_info, config, pass_uptodate=False):
"""Update the file in local filesystem storage.
"""
# skip if we have no directory to upload to
if "dir" not in config:
return
if "sample" in finfo:
storage_dir = utils.safe_makedir(os.path.join(config["dir"], finfo["sample"]))
elif "run" in finfo:
storage_dir = utils.safe_makedir(os.path.join(config["dir"], finfo["run"]))
else:
raise ValueError("Unexpected input file information: %s" % finfo)
if finfo.get("type") == "directory":
return copy_finfo_directory(finfo, storage_dir)
else:
return copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
| mit | 977,965,094,330,897,300 | 39.491228 | 102 | 0.603986 | false | 3.539877 | false | false | false |
lowitty/selenium | com/ericsson/xn/x/pm/PmCommons/PmCommon.py | 1 | 29289 | # -*- coding: utf-8 -*-
import time
from datetime import datetime, timedelta
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from com.ericsson.xn.commons.funcutils import find_single_widget, find_all_widgets, \
ne_category_by_ne_type, get_widget_ignore_refrence_error
from com.ericsson.xn.commons import test_logger as test
from com.ericsson.xn.commons.selfutils import compare_lists
def to_pm_management_page(driver):
'''
This function has been abandoned.
:param driver:
:return:
'''
test.info('To the PmManagement page...')
identifier = (By.XPATH, "//div[@class='ebLayout-Navigation']/div/div[1]/span")
find_single_widget(driver, 10, identifier).click()
identifier = (By.XPATH, "//div[@class='ebBreadcrumbs-list']/ul/li[3]/a")
find_single_widget(driver, 10, identifier).click()
def check_in_correct_pm_page(driver):
'''
This function works on the beginning editions and has been abandoned.
:param driver:
:return:
'''
id_search_btn = (By.ID, "idBtn-search")
b_validate = False
try:
find_single_widget(driver, 10, id_search_btn)
b_validate = True
except TimeoutException as e:
# page not loaded
return False
if b_validate:
# check if in the correct page
# id_navi = identifier = (By.XPATH, "//div[@class='ebLayout-Navigation']/div")
# navi = find_single_widget(driver, 10, id_navi)
id_divs = identifier = (By.XPATH, "//div[@class='ebLayout-Navigation']/div/div")
children_divs = find_all_widgets(driver, 20, id_divs)
str_last_navi = find_single_widget(children_divs[-1], 10, (By.XPATH, ".//a")).get_attribute('innerHTML').\
encode('utf-8').strip()
# logger.info(children_divs[-2].get_attribute('innerHTML').encode('utf-8'))
lis = find_all_widgets(children_divs[-2], 10, (By.XPATH, ".//div/ul/li"))
for li in lis:
str_a_li = find_single_widget(li, 10, (By.XPATH, ".//a")).get_attribute('innerHTML').encode('utf-8').strip()
if str_last_navi == str_a_li:
return True
# current page not in parent navigation
return False
def to_pm_management_page_by_url(driver, ne_type, server_info, to_url_pre='#network-overview/pm-management/'):
test.info('Will Navigate to the PMManagement page...')
base_url = 'http://' + server_info.getProperty('host') + ':' + str(server_info.getProperty('port')) + \
server_info.getProperty('preurl')
test.info('Base URL is: ' + base_url)
to_url = base_url + (to_url_pre + 'pm-' + ne_category_by_ne_type(ne_type) + '/' + 'pm-' + ne_type).lower()
test.info('To URL: ' + to_url)
driver.get(to_url)
make_sure_in_pm_page(driver)
def make_sure_in_pm_page(driver):
# btn id: ebBtnSearch
id_btn_interface = (By.ID, 'ebBtnSearch')
try:
find_single_widget(driver, 5, id_btn_interface)
test.error('Page redirect to the interface management page, critical error!')
except TimeoutException:
id_query_btn = (By.ID, "idBtn-search")
try:
pm_query_btn = find_single_widget(driver, 10, id_query_btn)
if pm_query_btn:
test.passed('Found the query button of PM Management page, check passed.')
except TimeoutException:
test.failed('Cannot find the query button of PM Management page.')
def make_in_correct_tab(driver, prefix, postfix):
id_tabs = (By.XPATH, "//div[@class='ebTabs']/div[1]/div[2]/div")
tabs = find_all_widgets(driver, 10, id_tabs)
for tab in tabs:
if prefix + postfix == tab.get_attribute('innerHTML').encode('utf-8').strip():
if not tab.get_attribute('class').encode('utf-8').find('ebTabs-tabItem_selected_true') > -1:
tab.click()
wait_noti_widget_show(driver)
test.info('Now in TAB: ' + prefix + postfix)
def wait_noti_widget_show(driver, wait_time=10):
id_div = (By.XPATH, "//div[@class='noti']/div")
try:
find_single_widget(driver, wait_time, id_div)
test.info('Query result notification shown up.')
except TimeoutException:
test.warning('Query result notification did not shown up, case may or may not fail later.')
def to_tab_by_ne_type(driver, ne_type, logger):
ne_type = ne_type.strip().upper()
tab_index = 1
if 'PGW' == ne_type:
tab_index = 1
elif 'SGW' == ne_type:
tab_index = 2
elif 'SGSN' == ne_type:
tab_index = 3
elif 'MME' == ne_type:
tab_index = 4
elif 'SBC' == ne_type:
tab_index = 5
elif 'OCGAS' == ne_type:
tab_index = 6
identifier = (By.XPATH, "//div[@class='ebTabs-tabArea']/div[" + str(tab_index) + "]")
find_single_widget(driver, 10, identifier).click()
# wait for the notification, maximum 10 seconds
try:
identifier = (By.XPATH, "//div[@class='noti']/div")
WebDriverWait(driver, 10).until(EC.presence_of_element_located(identifier))
except TimeoutException:
pass
def init_and_search(driver, ne_name, end_time=None, start_time=None):
# select the given nename
select_given_ne_name(driver, ne_name)
# select the correct time
if end_time is not None:
test.info('Query end time point set to: ' + end_time.strftime('%H%M%S'))
id_end_time = (By.XPATH, "//div[@class='endtime']/div/span/input")
find_single_widget(driver, 10, id_end_time).click()
set_time_for_query(driver, end_time)
if start_time is not None:
test.info('Query start time point set to: ' + start_time.strftime('%H%M%S'))
id_start_time = (By.XPATH, "//div[@class='starttime']/div/span/input")
find_single_widget(driver, 10, id_start_time).click()
set_time_for_query(driver, start_time)
# click the query button
id_query_btn = (By.ID, "idBtn-search")
find_single_widget(driver, 10, id_query_btn).click()
# wait for the notification, maximum 20 seconds
id_body_date = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/tbody")
find_single_widget(driver, 20, id_body_date)
def wait_until_pm_date_show_up(driver, ne_name, wait_time=720):
select_given_ne_name(driver, ne_name)
end_time = datetime.now() + timedelta(seconds=wait_time)
while datetime.now() < end_time:
id_query_btn = (By.ID, "idBtn-search")
find_single_widget(driver, 10, id_query_btn).click()
id_body_date = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/tbody")
try:
find_single_widget(driver, 10, id_body_date)
test.passed('Successfully found the counters data.')
return
except TimeoutException:
pass
test.error('Wait for ' + str(wait_time) + ' seconds but cannot find any PM datas.')
def select_given_ne_name(driver, ne_name):
identifier = (By.XPATH, "//div[@class='pmcommonarea']/div/div[2]/div[1]/div[2]/input")
input_ne_name = find_single_widget(driver, 10, identifier)
if not '' == input_ne_name.get_attribute('value').strip():
input_ne_name.click()
find_single_widget(driver, 10, (By.ID, "btnAllLeft")).click()
else:
input_ne_name.click()
id_table_candidate = (By.XPATH, "//div[@class='ebLayout-candidateEnbs']/div[2]/div/div[3]/div/div/div/table")
table_candidate = find_single_widget(driver, 20, id_table_candidate)
id_input_search = (By.XPATH, ".//thead/tr[2]/th[2]/input")
candi_input = find_single_widget(table_candidate, 10, id_input_search)
candi_input.clear()
candi_input.send_keys(ne_name.strip())
time.sleep(1.0)
id_checkbox = (By.XPATH, ".//tbody/tr[1]/td[1]/div/div/input")
left_checkbox = find_single_widget(table_candidate, 10, id_checkbox)
if not left_checkbox.is_selected():
left_checkbox.click()
# select to right
id_arrow_to_right = (By.ID, "btnRight")
find_single_widget(driver, 10, id_arrow_to_right).click()
# time.sleep(5.0)
# close select ne dialog
id_btn_choose_ne = (By.CLASS_NAME, "choose")
find_single_widget(driver, 10, id_btn_choose_ne).click()
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable(id_btn_choose_ne))
def wait_until_rounds_ok(driver, rows, rows_of_page, rows_each_period):
'''
This function will check the number of rows that we need to check the PM.
:param driver:
:param rows:
:param rows_of_page:
:param dict_additional:
:param ne_type:
:return: None
'''
id_tbdoy_trs = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/tbody/tr")
# if dict_additional.has_key('check_rows'):
# rows = dict_additional['check_rows']
# if not 0 == rows % dict_additional['number_of_lic']:
# test.error('Number of checked rows should be integer multiples of number of LICs.')
t_start = datetime.now()
# Note that most of the PM need T2-T1, for Node like SBC, we may wait 5 minutes more since SBC don't need T2-T1
# t_end = t_start + timedelta(minutes=5 * (rows // dict_additional['number_of_lic'] + 1) + 2)
t_end = t_start + timedelta(minutes=5 * (rows // rows_each_period + 1) + 2)
while datetime.now() < t_end:
# click the query button
id_query_btn = (By.ID, "idBtn-search")
find_single_widget(driver, 10, id_query_btn).click()
time.sleep(.1)
try:
i_page = rows / rows_of_page
tgt_page_number = i_page if 0 == rows % rows_of_page else i_page + 1
id_tgt_pager = (By.XPATH, ("//div[@class='page']/ul/li[2]/ul/li[" + str(tgt_page_number) + "]"))
time.sleep(.1)
tgt_pager = get_widget_ignore_refrence_error(driver, id_tgt_pager)
if not tgt_pager.get_attribute('class').find('ebPagination-entryAnchor_current') > -1:
tgt_pager.click()
trs = find_all_widgets(driver, 20, id_tbdoy_trs)
if rows % rows_of_page <= len(trs):
test.passed('All the data that we need are ready now.')
return
except TimeoutException:
pass
time.sleep(.5)
test.failed('It seems that the the data we need has not been collected as expectes, case may fail later steps.')
def check_pm_rows_updated(driver, ne_type, dict_counters, rows_of_page, dict_additional):
'''
The main function that check the PM Data accurate, it will first check the data of each row,
then check the GUI time's minutes is multiple of 5,
then check the Lics if the node has many LICs.
:param ne_type: the ne's type
:param dict_counters: the base counter values in dictionary
:param rows_of_page: how many rows each page has on the GUI, default is 10
:param dict_additional: additional information that used for special nodes, (number_of_lic: how many lics of a no
de), (check_rows: how many rows that will be checked, if this value exist, will only check this number of rows,
otherwise the number of rows will checked is equal the size of dict_counters)
:return: None
'''
check_rounds = dict_additional['check_rounds']
number_of_rows_be_checked = check_rounds * dict_additional['number_of_lic']
wait_until_rounds_ok(driver, number_of_rows_be_checked, 10, dict_additional['number_of_lic'])
is_m_lics = True if dict_additional['number_of_lic'] > 1 else False
list_returns = []
id_table = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table")
id_header_trs = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/thead/tr/th")
ths = find_all_widgets(driver, 20, id_header_trs)
list_headers = []
for th in ths:
list_headers.append(th.get_attribute('innerHTML').encode('utf-8').strip())
# if not 0 == number_of_rows_be_checked % dict_additional['number_of_lic']:
# test.error('Number of checked rows should be integer multiples of number of LICs.')
for row_index in range(1, number_of_rows_be_checked + 1):
# check_pm_by_row returns [gui_datettime, lic_name] in List
list_returns.append(check_pm_by_row(driver, id_table, row_index, ne_type, dict_counters, rows_of_page,
list_headers, is_m_lics))
# check GUI time and lic_name
lic_from_gui = []
if number_of_rows_be_checked != len(list_returns):
test.failed('Number of rows need to be checked mis-match with the number we expected.')
else:
number_of_lic = dict_additional['number_of_lic']
for i in range(0, len(list_returns), number_of_lic):
for j in range(number_of_lic):
lic_from_gui.append(list_returns[i + j][1])
gui_time_and_lic_name = list_returns[i + j]
if gui_time_and_lic_name[0] is not None and 0 == gui_time_and_lic_name[0].minute % 5:
test.passed('Row ' + str(i + j) + ' GUI time is correct, is: ' +
gui_time_and_lic_name[0].strftime('%Y-%m-%d %H:%M'))
else:
test.failed('Row ' + str(i + j) + ' GUI time is not multiple of 5, is: ' +
gui_time_and_lic_name[0].strftime('%Y-%m-%d %H:%M'))
if is_m_lics:
msg = 'Node has more than one LIC, '
if list_returns[i][0] == list_returns[i + j][0]:
msg += ' different LICs have the same report time.'
test.passed(msg)
else:
msg += ' different LICs don\'t have the same report time.'
test.failed(msg)
if i + number_of_lic < len(list_returns):
# the pre-condition of this check point is: GUI list data decent by datetime
if 300 == (list_returns[i][0] - list_returns[i + number_of_lic][0]).seconds:
test.passed('Report delta time is 5 minutes.')
else:
test.failed('Report delta time is not 5 minutes.')
# if checked 1 hour PM and node has many LICs, will check the LIC
if 12 == int(number_of_rows_be_checked / dict_additional['number_of_lic']):
if is_m_lics:
expected_lic = [t.split('-', 1)[1].strip() for t in sorted(dict_counters)]
if compare_lists(expected_lic, lic_from_gui):
test.passed('Lic check passed.')
else:
test.failed('Lic check failed, E: ' + str(expected_lic) + ', G: ' + str(lic_from_gui))
# else will check single LIC node, will not cover this edition
def check_me_counters(driver, ne_type, counters_expected, rows_of_page, dict_me_add, me_types):
'''
This function will check the ME counters, the first edition suspect that only one record each 5 minutes.
:param ne_type the type of the node
:param counters_expected: node ME counters that will check with the counters on GUI
:param dict_me_add: additional information, (check_rounds: how many rounds that will be checked), (
rows_each_period: how many rows each period, default is 1, this parameter is for extending later.)
:return: None: the function is for automation testing, critical errors will case program to exit immediately
'''
checked_rounds = dict_me_add['check_rounds']
number_of_rows_be_checked = checked_rounds * dict_me_add['rows_each_period']
wait_until_rounds_ok(driver, number_of_rows_be_checked, 10, dict_me_add['rows_each_period'])
list_returns = []
id_table = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table")
id_header_trs = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/thead/tr/th")
ths = find_all_widgets(driver, 20, id_header_trs)
list_headers = []
for th in ths:
list_headers.append(th.get_attribute('innerHTML').encode('utf-8').strip())
# number_of_rows_be_checked = len(counters_expected)
# if dict_me_add.has_key('check_rounds'):
# if not 0 == number_of_rows_be_checked % dict_me_add['number_of_lic']:
# test.error('Number of checked rows should be integer multiples of number of LICs.')
for row_index in range(1, number_of_rows_be_checked + 1):
# check_pm_by_row returns [gui_datettime, lic_name] in List
time_of_gui = check_me_single_row(driver, id_table, row_index, ne_type, counters_expected,
rows_of_page, list_headers, me_types)
list_returns.append(time_of_gui)
if number_of_rows_be_checked != len(list_returns):
test.failed('Number of rows have been checked mis-match with the number we expected.')
else:
for i in range(len(list_returns)):
if list_returns[i] is not None and 0 == list_returns[i].minute % 5:
test.passed('Row ' + str(i) + ' GUI time is correct, is: ' +
list_returns[i].strftime('%Y-%m-%d %H:%M'))
else:
test.failed('Row ' + str(i) + ' GUI time is correct, is: ' +
list_returns[i].strftime('%Y-%m-%d %H:%M'))
if i + 1 < len(list_returns):
if 300 == (list_returns[i] - list_returns[i + 1]).seconds:
test.passed('Report delta time is 5 minutes.')
else:
test.failed('Report delta time is not 5 minutes.')
def check_pm_rows(driver, logger, ne_type, dict_counters, rows_of_page, dict_additional):
bool_overall = True
list_time = []
id_table = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table")
id_header_trs = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/thead/tr/th")
ths = find_all_widgets(driver, 20, id_header_trs)
list_headers = []
for th in ths:
list_headers.append(th.get_attribute('innerHTML').encode('utf-8').strip())
# table = find_single_widget(driver, 10, id_table)
rounds = len(dict_counters)
if 'SBC' == ne_type:
if dict_additional.has_key('rounds'):
rounds = dict_additional['rounds']
for i in range(1, rounds + 1):
bool_row, gui_time = check_pm_by_row(driver, id_table, logger, i, ne_type, dict_counters, rows_of_page, list_headers)
list_time.append(gui_time)
if not bool_row:
bool_overall = False
logger.error('Row ' + str(i) + " check FAILED. Check the log for detailed information.")
else:
logger.info('Row ' + str(i) + " check PASSED.")
if bool_overall:
if len(list_time) < 1:
bool_overall = False
logger.error('Failed: 0 rounds of PM checked, this does not make any sense.')
elif len(list_time) < 2:
if 'OCGAS' == ne_type:
bool_overall = False
logger.error('Failed: Node OCGAS is supposed to have two LICs, there is only one record of PM Data.')
elif list_time[0] is None:
bool_overall = False
logger.error('Failed: Fail to get the PM data time.')
else:
if 0 != list_time[0].minute % 5:
bool_overall = False
logger.error('Failed: PM Data time is not multiples of 5.')
else:
if ne_type in ['SGW', 'PGW', 'SGSN', 'MME', 'SBC']:
for i in range(0, len(list_time) - 1):
if list_time[i] is None or list_time[i + 1] is None:
bool_overall = False
logger.error('Failed: Fail to get the PM data time.')
break
else:
if 0 != list_time[i].minute % 5 or 0 != list_time[i + 1].minute % 5:
bool_overall = False
logger.error('Failed: PM Data time is not multiples of 5.')
break
if 300 != abs((list_time[i] - list_time[i + 1]).seconds):
bool_overall = False
logger.error('Failed: PM period is not 5 minutes.')
break
elif 'OCGAS' == ne_type:
for i in range(0, len(list_time), 2):
if i != len(list_time) - 2:
if list_time[i] is None or list_time[i + 1] is None or list_time[i + 2] is None:
bool_overall = False
logger.error('Failed: Fail to get the PM data time.')
break
else:
if list_time[i] != list_time[i + 1]:
bool_overall = False
logger.error('Failed: Two LICs of Node OCGAS should be the same.')
break
else:
if 0 != list_time[i].minute % 5 or 0 != list_time[i + 2].minute % 5:
bool_overall = False
logger.error('Failed: PM Data time is not multiples of 5.')
break
elif 300 != abs((list_time[i] - list_time[i + 2]).seconds):
bool_overall = False
logger.error('Failed: PM period is not 5 minutes. ' + str(list_time[i]) + ' '
+ str(list_time[i + 2]))
break
logger.info('GUI times: ' + ', '.join([str(t) for t in list_time]))
if bool_overall:
logger.info("Overall PASSED.")
else:
logger.error("Overall FAILED.")
def check_pm_by_row(driver, id_table, index_row, ne_type, dict_counters, rows_of_page, list_headers, is_m_lics):
test.info('Start to check row: ' + str(index_row))
make_sure_is_correct_page(driver, index_row, rows_of_page)
try:
gui_index_row = rows_of_page if 0 == index_row % rows_of_page else index_row % rows_of_page
id_tr = (By.XPATH, ".//tbody/tr[" + str(gui_index_row) + "]")
table = find_single_widget(driver, 10, id_table)
time.sleep(.5)
tr = find_single_widget(table, 10, id_tr)
gui_str_time = find_single_widget(tr, 10, (By.XPATH, ".//td[2]")).get_attribute('innerHTML').encode('utf-8')
gui_time = datetime.strptime(gui_str_time.strip(), "%Y-%m-%d %H:%M")
except_counter_id = str(gui_time.minute)
id_lic_name = (By.XPATH, ".//td[3]")
lic_name = find_single_widget(tr, 5, id_lic_name).get_attribute('innerHTML').encode('utf-8')
if is_m_lics:
except_counter_id = str(gui_time.minute) + '-' + lic_name
list_row = dict_counters[except_counter_id].split(',')
for i in range(len(list_row)):
try:
id_counter = (By.XPATH, ".//td[" + str(i + 4) + "]")
gui_counter = find_single_widget(tr, 5, id_counter).get_attribute('innerHTML').encode('utf-8')
i_gui_counter = int(gui_counter)
except Exception as e:
i_gui_counter = None
if int(list_row[i].strip()) == i_gui_counter:
msg = list_headers[1] + ": " + gui_str_time.strip() + ",\t" + list_headers[2] + ": " + lic_name + "; " \
+ list_headers[i + 3] + ", GUI is " + str(i_gui_counter) + ",\tExpected is " + str(list_row[i]) \
+ "."
test.passed(msg)
else:
msg = list_headers[1] + ": " + gui_str_time.strip() + ",\t" + list_headers[2] + ": " + lic_name + "; " \
+ list_headers[i + 3] + ", GUI is " + str(i_gui_counter) + ",\tExpected is " + str(list_row[i]) \
+ "."
test.failed(msg)
return [gui_time, lic_name]
except Exception as e:
test.error("Test failed, ERROR: " + str(e))
def check_me_single_row(driver, id_table, index_row, ne_type, dict_counters, rows_of_page, list_headers, me_types):
test.info('Start to check ME row: ' + str(index_row))
make_sure_is_correct_page(driver, index_row, rows_of_page)
try:
gui_index_row = rows_of_page if 0 == index_row % rows_of_page else index_row % rows_of_page
id_tr = (By.XPATH, ".//tbody/tr[" + str(gui_index_row) + "]")
table = find_single_widget(driver, 10, id_table)
time.sleep(.5)
tr = find_single_widget(table, 10, id_tr)
gui_str_time = find_single_widget(tr, 10, (By.XPATH, ".//td[2]")).get_attribute('innerHTML').encode('utf-8')
gui_time = datetime.strptime(gui_str_time.strip(), "%Y-%m-%d %H:%M")
except_counter_id = str(gui_time.minute)
# id_lic_name = (By.XPATH, ".//td[3]")
# lic_name = find_single_widget(tr, 5, id_lic_name).get_attribute('innerHTML').encode('utf-8')
list_row = dict_counters[except_counter_id].split(',')
list_types = me_types['counter_types'].split(',')
for i in range(len(list_row)):
try:
id_counter = (By.XPATH, ".//td[" + str(i + 3) + "]")
gui_counter = find_single_widget(tr, 5, id_counter).get_attribute('innerHTML').encode('utf-8')
# i_gui_counter = int(gui_counter) if 'int' == list_types[i].lower().strip()
if 'int' == list_types[i].lower().strip():
i_gui_counter = int(gui_counter)
i_expected = int(list_row[i].strip())
elif 'float' == list_types[i].lower().strip():
i_gui_counter = float(gui_counter)
i_expected = float(list_row[i].strip())
else:
test.error('Unknown counter type of me counters.')
except Exception as e:
i_gui_counter = None
if i_expected == i_gui_counter:
msg = list_headers[1] + ": " + gui_str_time.strip() + ",\t" + "; " + list_headers[i + 2] + ", GUI is " \
+ str(i_gui_counter) + ",\tExpected is " + str(list_row[i]) + "."
test.passed(msg)
else:
msg = list_headers[1] + ": " + gui_str_time.strip() + ",\t" + "; " + list_headers[i + 2] + ", GUI is " \
+ str(i_gui_counter) + ",\tExpected is " + str(list_row[i]) + "."
test.failed(msg)
return gui_time
except Exception as e:
test.error("Test failed, ERROR: " + str(e))
def to_second_page(driver, logger):
id_next_page = (By.XPATH, "//div[@class='page']/ul/li[3]")
pager = find_single_widget(driver, 10, id_next_page)
# driver.execute_script("arguments[0].scrollIntoView(true);", tds[12])
driver.execute_script("arguments[0].scrollIntoView(true);", pager)
pager.click()
# wait for the notification, maximum 10 seconds
id_body_date = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/tbody")
find_single_widget(driver, 10, id_body_date)
# time.sleep(2.0)
def make_sure_is_correct_page(driver, row_index, rows_of_page):
"""
This function handle the situation that we need to paginate to different to check the PM datas.
:param rows_of_page: how many rows each page has
:param driver: selenium instance
:param row_index: row index of the GUI
:return: None
"""
i_page = row_index / rows_of_page
tgt_page_number = i_page if 0 == row_index % rows_of_page else i_page + 1
id_tgt_pager = (By.XPATH, ("//div[@class='page']/ul/li[2]/ul/li[" + str(tgt_page_number) + "]"))
tgt_pager = find_single_widget(driver, 10, id_tgt_pager)
if not tgt_pager.get_attribute('class').find('ebPagination-entryAnchor_current') > -1:
tgt_pager.click()
# wait for the notification, maximum 10 seconds
id_body_date = (By.XPATH, "//div[@class='ebTabs']/div[2]/div/div/div/div/table/tbody")
find_single_widget(driver, 10, id_body_date)
test.info('Now in page ' + str(tgt_page_number) + '.')
def set_time_for_query(driver, date_time):
# first edition will only set the time part
id_time_holder = (By.XPATH, "//div[@data-namespace='ebTimePicker']")
time_holder = find_single_widget(driver, 10, id_time_holder)
id_hour = (By.XPATH, ".//table[1]/tbody/tr/td[2]/div[2]/input")
hour_input = find_single_widget(time_holder, 10, id_hour)
hour_input.clear()
hour_input.send_keys(date_time.hour)
id_minute = (By.XPATH, ".//table[2]/tbody/tr/td[2]/div[2]/input")
minute_input = find_single_widget(time_holder, 10, id_minute)
minute_input.clear()
minute_input.send_keys(date_time.minute)
id_second = (By.XPATH, ".//table[3]/tbody/tr/td[2]/div[2]/input")
second_input = find_single_widget(time_holder, 10, id_second)
second_input.clear()
# second_input.send_keys(date_time.second)
second_input.send_keys(0)
id_ok_btn = (By.XPATH, "//div[@class='ebDialogBox-actionBlock']/button[1]")
find_single_widget(driver, 10, id_ok_btn).click()
| mit | 1,719,917,007,543,155,700 | 47.252059 | 125 | 0.57776 | false | 3.390323 | true | false | false |
makuto/redditLikedSavedImageDownloader | LikedSavedDownloaderServer.py | 1 | 37101 | #!/usr/bin/env python
import json
import multiprocessing
import os
import random
import threading
import webbrowser
# third-party imports
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.httpclient
import tornado.gen
# local imports
import settings
import LikedSavedDatabase
from downloaders import redditUserImageScraper
from utils import utilities
# Require a username and password in order to use the web interface. See ReadMe.org for details.
#enable_authentication = False
enable_authentication = True
useSSL = True
if enable_authentication:
import PasswordManager
# List of valid user ids (used to compare user cookie)
authenticated_users = []
# If "next" isn't specified from login, redirect here after login instead
landingPage = "/"
class SessionData:
def __init__(self):
# Just in case, because tornado is multithreaded
self.lock = threading.Lock()
self.randomHistory = []
self.randomHistoryIndex = -1
self.favorites = []
self.favoritesIndex = 0
self.currentImage = None
self.randomImageFilter = ''
self.filteredImagesCache = []
self.currentDirectoryPath = ''
self.currentDirectoryCache = []
self.directoryFilter = ''
def acquire(self):
self.lock.acquire()
def release(self):
self.lock.release()
# user id : session data
userSessionData = {}
videoExtensions = ('.mp4', '.webm')
supportedExtensions = ('.gif', '.jpg', '.jpeg', '.png', '.mp4', '.webm', '.riff')
savedImagesCache = []
def generateSavedImagesCache(outputDir):
global savedImagesCache
# Clear cache in case already created
savedImagesCache = []
print('Creating content cache...', flush=True)
for root, dirs, files in os.walk(outputDir):
for file in files:
if file.endswith(supportedExtensions):
savedImagesCache.append(os.path.join(root, file))
print('Finished creating content cache ({} images/videos)'.format(len(savedImagesCache)))
def getRandomImage(filteredImagesCache=None, randomImageFilter=''):
if not savedImagesCache:
generateSavedImagesCache(settings.settings['Output_dir'])
if filteredImagesCache:
randomImage = random.choice(filteredImagesCache)
else:
randomImage = random.choice(savedImagesCache)
print('\tgetRandomImage(): Chose random image {} (filter {})'.format(randomImage, randomImageFilter))
serverPath = utilities.outputPathToServerPath(randomImage)
return randomImage, serverPath
#
# Tornado handlers
#
# See https://github.com/tornadoweb/tornado/blob/stable/demos/blog/blog.py
# https://www.tornadoweb.org/en/stable/guide/security.html
def login_get_current_user(handler):
if enable_authentication:
cookie = handler.get_secure_cookie("user")
if cookie in authenticated_users:
return cookie
else:
print("Bad/expired cookie received")
return None
else:
return "authentication_disabled"
class AuthHandler(tornado.web.RequestHandler):
def get_current_user(self):
return login_get_current_user(self)
class LoginHandler(AuthHandler):
def get(self):
if not enable_authentication:
self.redirect("/")
else:
if PasswordManager.havePasswordsBeenSet():
self.render("templates/Login.html",
next=self.get_argument("next", landingPage),
xsrf_form_html=self.xsrf_form_html())
else:
# New password setup
self.render("templates/LoginCreate.html",
next=self.get_argument("next", landingPage),
xsrf_form_html=self.xsrf_form_html())
def post(self):
global authenticated_users
# Test password
print("Attempting to authorize user {}...".format(self.get_argument("name")))
if enable_authentication and PasswordManager.verify(self.get_argument("password")):
# Generate new authenticated user session
randomGenerator = random.SystemRandom()
cookieSecret = str(randomGenerator.getrandbits(128))
authenticated_user = self.get_argument("name") + "_" + cookieSecret
authenticated_user = authenticated_user.encode()
authenticated_users.append(authenticated_user)
# Set the cookie on the user's side
self.set_secure_cookie("user", authenticated_user)
print("Authenticated user {}".format(self.get_argument("name")))
# Let them in
self.redirect(self.get_argument("next", landingPage))
else:
print("Refused user {} (password doesn't match any in database)".format(self.get_argument("name")))
self.redirect("/login")
class LogoutHandler(AuthHandler):
@tornado.web.authenticated
def get(self):
global authenticated_users
if enable_authentication:
print("User {} logging out".format(self.current_user))
if self.current_user in authenticated_users:
authenticated_users.remove(self.current_user)
self.redirect("/login")
else:
self.redirect("/")
class SetPasswordHandler(AuthHandler):
def get(self):
pass
def post(self):
if not enable_authentication:
self.redirect("/")
else:
print("Attempting to set password")
if PasswordManager.havePasswordsBeenSet():
print("Rejected: Password has already been set!")
elif self.get_argument("password") != self.get_argument("password_verify"):
print("Rejected: password doesn't match verify field!")
else:
PasswordManager.createPassword(self.get_argument("password"))
print("Success: Set password")
self.redirect("/login")
class AuthedStaticHandler(tornado.web.StaticFileHandler):
def get_current_user(self):
return login_get_current_user(self)
@tornado.web.authenticated
def prepare(self):
pass
class HomeHandler(AuthHandler):
@tornado.web.authenticated
def get(self):
self.render('webInterface/index.html')
def settingsToHtmlForm():
settingsInputs = []
for sectionSettingsPair in settings.settingsStructure:
settingsInputs.append('<h2>{}</h2>'.format(sectionSettingsPair[0]))
for sectionOption in sectionSettingsPair[1]:
option = None
optionComment = ''
if type(sectionOption) == tuple:
option = sectionOption[0]
optionComment = '<p class="optionComment">{}</p>'.format(sectionOption[1])
else:
option = sectionOption
if type(settings.settings[option]) == bool:
settingsInputs.append('''<input type="checkbox" id="{option}" name="{option}" value="{optionValue}" {checkedState} />
<label for="{option}">{optionName}</label>{comment}
<br />'''
.format(option=option, optionName=option.replace('_', ' '),
comment=optionComment,
checkedState=('checked' if settings.settings[option] else ''),
optionValue=('1' if settings.settings[option] else '0')))
elif type(settings.settings[option]) == int:
settingsInputs.append('''<label for="{option}">{optionName}</label>
<input type="number" id="{option}" name="{option}" value="{optionValue}" />{comment}
<br />'''
.format(option=option, optionName=option.replace('_', ' '), comment=optionComment,
optionValue=settings.settings[option]))
elif type(settings.settings[option]) == str:
settingsInputs.append('''<label for="{option}">{optionName}</label>
<input type="{type}" id="{option}" name="{option}" value="{optionValue}" />{comment}
<br />'''
.format(option=option, optionName=option.replace('_', ' '),
comment=optionComment, optionValue=settings.settings[option],
type=('password' if 'secret' in option.lower() or 'password' in option.lower() else 'text')))
return ''.join(settingsInputs)
unsupportedSubmissionShownColumns = ['title',
'bodyUrl',
'reasonForFailure']
unsupportedSubmissionColumnLabels = ['Retry', 'Source', 'Title',
'Content URL',
'Reason for Failure']
class UnsupportedSubmissionsHandler(AuthHandler):
def unsupportedSubmissionToTableColumns(self, unsupportedSubmission):
rowHtml = ''
rowHtml += '\t<td><input type="checkbox" name="shouldRetry" value="{}"/></td>\n'.format(unsupportedSubmission['id'])
# Special case source cell
rowHtml += '\t<td><a href="{}">{}</a></td>\n'.format(
'https://reddit.com{}'.format(unsupportedSubmission['postUrl']) if unsupportedSubmission['source'] == 'reddit'
else unsupportedSubmission['postUrl'],
unsupportedSubmission['source'])
for columnName in unsupportedSubmissionShownColumns:
if 'url' in columnName[-3:].lower():
rowHtml += '\t<td><a href="{}">Content</a></td>\n'.format(unsupportedSubmission['bodyUrl'])
else:
rowHtml += '\t<td>{}</td>\n'.format(unsupportedSubmission[columnName])
return rowHtml
def createTableHeader(self):
tableHeaderHtml = '<thead>\n<tr class="header">\n'
for columnName in unsupportedSubmissionColumnLabels:
tableHeaderHtml +='<th>{}</th>'.format(columnName)
tableHeaderHtml += '</tr>\n</thead>\n<tbody>\n'
return tableHeaderHtml
def getPendingFixups(self):
fixupHtml = ''
missingPixivSubmissions = LikedSavedDatabase.db.getMissingPixivSubmissionIds()
if len(missingPixivSubmissions):
if not fixupHtml:
fixupHtml += "<h2>Download missing content</h2>"
fixupHtml += '<p>There was an error which caused {} Pixiv submissions to not be downloaded.</p>'.format(len(missingPixivSubmissions))
fixupHtml += '<button id="FixupPixiv" onclick="fixupPixiv()">Download missing Pixiv submissions</button>'
fixupHtml += '<p>You should only need to do this once. The code error has been fixed.</p>'
return fixupHtml
@tornado.web.authenticated
def get(self):
unsupportedSubmissionsListHtml = self.createTableHeader()
unsupportedSubmissions = LikedSavedDatabase.db.getAllUnsupportedSubmissions()
i = 0
for unsupportedSubmission in reversed(unsupportedSubmissions):
unsupportedSubmissionsListHtml += ('<tr class="{}">{}</tr>\n'
.format('even' if i % 2 == 0 else 'odd',
self.unsupportedSubmissionToTableColumns(unsupportedSubmission)))
i += 1
unsupportedSubmissionsListHtml += '</tbody>\n'
self.render("templates/UnsupportedSubmissions.html",
unsupported_submissions_html=unsupportedSubmissionsListHtml,
length_unsupported_submissions=len(unsupportedSubmissions),
fixup_html=self.getPendingFixups())
class SettingsHandler(AuthHandler):
def doSettings(self, afterSubmit):
htmlSettingsForm = settingsToHtmlForm()
settingsFilename = settings.getSettingsFilename()
self.render("templates/Settings.html",
status_html=('<p><b>Settings updated</b></p>' if afterSubmit else ''),
settings_filename=settingsFilename,
settings_form_html=htmlSettingsForm,
xsrf_form_html=self.xsrf_form_html())
@tornado.web.authenticated
def get(self):
self.doSettings(False)
@tornado.web.authenticated
def post(self):
currentOutputDir = settings.settings['Output_dir']
print('Received new settings')
for option in settings.settings:
newValue = self.get_argument(option, None)
if not newValue:
# It's okay if it's a boolean because POST doesn't send unchecked checkboxes
# This means the user set the value to false
if type(settings.settings[option]) == bool:
settings.settings[option] = False
else:
print('Warning: Option {} unset! The settingsStructure might be out of sync.'
'\n\tIgnore this if the field is intentionally empty'.format(option))
else:
# All false bools are handed in the above if block, so we know they're true here
if type(settings.settings[option]) == bool:
newValue = True
elif type(settings.settings[option]) == int:
newValue = int(newValue)
settings.settings[option] = newValue
# print('\tSet {} = {}'.format(option, newValue))
# Write out the new settings
settings.writeServerSettings()
# Respond with a settings page saying we've updated the settings
self.doSettings(True)
# Refresh the cache in case the output directory changed
if currentOutputDir != settings.settings['Output_dir']:
generateSavedImagesCache(settings.settings['Output_dir'])
class RandomImageBrowserWebSocket(tornado.websocket.WebSocketHandler):
connections = set()
def cacheFilteredImages(self):
# Clear the cache
self.sessionData.filteredImagesCache = []
if not self.sessionData.randomImageFilter:
return
randomImageFilterLower = self.sessionData.randomImageFilter.lower()
for imagePath in savedImagesCache:
if randomImageFilterLower in imagePath.lower():
self.sessionData.filteredImagesCache.append(imagePath)
print('\tFiltered images with "{}"; {} images matching filter'
.format(self.sessionData.randomImageFilter,
len(self.sessionData.filteredImagesCache)))
def changeCurrentDirectory(self, newDirectory):
self.sessionData.currentDirectoryPath = newDirectory
dirList = os.listdir(self.sessionData.currentDirectoryPath)
filteredDirList = []
for fileOrDir in dirList:
# The script spits out a lot of .json files the user probably doesn't want to see
if (not fileOrDir.endswith('.json')
and (not self.sessionData.directoryFilter
or self.sessionData.directoryFilter.lower() in fileOrDir.lower())):
filteredDirList.append(fileOrDir)
self.sessionData.currentDirectoryCache = sorted(filteredDirList)
def open(self):
global userSessionData
currentUser = login_get_current_user(self)
if not currentUser:
# Failed authorization
return None
self.connections.add(self)
if currentUser not in userSessionData:
newSessionData = SessionData()
userSessionData[currentUser] = newSessionData
self.sessionData = userSessionData[currentUser]
self.sessionData.acquire()
# Set up the directory cache with the top-level output
self.changeCurrentDirectory(settings.settings['Output_dir'])
self.sessionData.release()
def on_message(self, message):
currentUser = login_get_current_user(self)
if not currentUser:
# Failed authorization
return None
print('RandomImageBrowserWebSocket: Received message ', message)
parsedMessage = json.loads(message)
command = parsedMessage['command']
print('RandomImageBrowserWebSocket: Command ', command)
action = ''
self.sessionData.acquire()
"""
Random Image Browser
"""
if command == 'imageAddToFavorites':
if self.sessionData.currentImage:
self.sessionData.favorites.append(self.sessionData.currentImage)
self.sessionData.favoritesIndex = len(self.sessionData.favorites) - 1
LikedSavedDatabase.db.addFileToCollection(self.sessionData.currentImage[1], "Favorites")
if command == 'nextFavorite':
self.sessionData.favoritesIndex += 1
if self.sessionData.favoritesIndex >= 0 and self.sessionData.favoritesIndex < len(self.sessionData.favorites):
action = 'setImage'
fullImagePath, serverImagePath = self.sessionData.favorites[self.sessionData.favoritesIndex]
else:
self.sessionData.favoritesIndex = len(self.sessionData.favorites) - 1
if len(self.sessionData.favorites):
action = 'setImage'
fullImagePath, serverImagePath = self.sessionData.favorites[self.sessionData.favoritesIndex]
if command == 'previousFavorite' and len(self.sessionData.favorites):
action = 'setImage'
if self.sessionData.favoritesIndex > 0:
self.sessionData.favoritesIndex -= 1
fullImagePath, serverImagePath = self.sessionData.favorites[self.sessionData.favoritesIndex]
if command == 'nextImage':
action = 'setImage'
if self.sessionData.randomHistoryIndex == -1 or self.sessionData.randomHistoryIndex >= len(self.sessionData.randomHistory) - 1:
fullImagePath, serverImagePath = getRandomImage(self.sessionData.filteredImagesCache, self.sessionData.randomImageFilter)
self.sessionData.randomHistory.append((fullImagePath, serverImagePath))
self.sessionData.randomHistoryIndex = len(self.sessionData.randomHistory) - 1
else:
self.sessionData.randomHistoryIndex += 1
fullImagePath, serverImagePath = self.sessionData.randomHistory[self.sessionData.randomHistoryIndex]
if command == 'previousImage':
action = 'setImage'
if self.sessionData.randomHistoryIndex > 0:
self.sessionData.randomHistoryIndex -= 1
fullImagePath, serverImagePath = self.sessionData.randomHistory[self.sessionData.randomHistoryIndex]
if command in ['nextImageInFolder', 'previousImageInFolder'] and len(self.sessionData.randomHistory):
fullImagePath, serverImagePath = self.sessionData.currentImage
folder = fullImagePath[:fullImagePath.rfind('/')]
imagesInFolder = []
for root, dirs, files in os.walk(folder):
for file in files:
if file.endswith(supportedExtensions):
imagesInFolder.append(os.path.join(root, file))
utilities.sort_naturally(imagesInFolder)
currentImageIndex = imagesInFolder.index(fullImagePath)
if currentImageIndex >= 0:
action = 'setImage'
nextImageIndex = currentImageIndex + (1 if command == 'nextImageInFolder' else -1)
if nextImageIndex == len(imagesInFolder):
nextImageIndex = 0
if nextImageIndex < 0:
nextImageIndex = len(imagesInFolder) - 1
fullImagePath = imagesInFolder[nextImageIndex]
serverImagePath = utilities.outputPathToServerPath(fullImagePath)
if command == 'setFilter':
newFilter = parsedMessage['filter']
if newFilter != self.sessionData.randomImageFilter:
self.sessionData.randomImageFilter = newFilter
self.cacheFilteredImages()
"""
Directory browser
"""
if command == 'setDirectoryFilter':
newFilter = parsedMessage['filter']
if newFilter != self.sessionData.directoryFilter:
self.sessionData.directoryFilter = newFilter
# Refresh cache with new filter
self.changeCurrentDirectory(self.sessionData.currentDirectoryPath)
action = 'sendDirectory'
if command == 'listCurrentDirectory':
action = 'sendDirectory'
if command == 'changeDirectory':
# Reset the filter (chances are the user only wanted to filter at one level
self.sessionData.directoryFilter = ''
self.changeCurrentDirectory('{}/{}'.format(self.sessionData.currentDirectoryPath, parsedMessage['path']));
action = 'sendDirectory'
if command == 'directoryUp':
# Don't allow going higher than output dir
if self.sessionData.currentDirectoryPath != settings.settings['Output_dir']:
upDirectory = (settings.settings['Output_dir'] +
self.sessionData.currentDirectoryPath[len(settings.settings['Output_dir'])
: self.sessionData.currentDirectoryPath.rfind('/')])
# Reset the filter (chances are the user only wanted to filter at one level
self.sessionData.directoryFilter = ''
self.changeCurrentDirectory(upDirectory)
action = 'sendDirectory'
if command == 'directoryRoot':
# Reset the filter (chances are the user only wanted to filter at one level
self.sessionData.directoryFilter = ''
self.changeCurrentDirectory(settings.settings['Output_dir'])
action = 'sendDirectory'
"""
Actions
"""
# Only send a response if needed
if action == 'setImage':
# Stupid hack
if serverImagePath.endswith(videoExtensions):
action = 'setVideo'
self.sessionData.currentImage = (fullImagePath, serverImagePath)
responseMessage = ('{{"responseToCommand":"{}", "action":"{}", "fullImagePath":"{}", "serverImagePath":"{}"}}'
.format(command, action, fullImagePath, serverImagePath))
self.write_message(responseMessage)
if action == 'sendDirectory':
directoryList = ''
for path in self.sessionData.currentDirectoryCache:
isSupportedFile = path.endswith(supportedExtensions)
isFile = '.' in path
if path.endswith(videoExtensions):
fileType = 'video'
elif isSupportedFile:
fileType = 'image'
elif isFile:
fileType = 'file'
else:
fileType = 'dir'
serverPath = 'output' + self.sessionData.currentDirectoryPath[len(settings.settings['Output_dir']):] + '/' + path
directoryList += '{{"path":"{}", "type":"{}", "serverPath":"{}"}},'.format(path, fileType, serverPath)
# Do directoryList[:-1] (yuck) to trim the final trailing comma because JSON doesn't like it
responseMessage = ('{{"responseToCommand":"{}", "action":"{}", "directoryList":[{}]}}'
.format(command, action, directoryList[:-1]))
self.write_message(responseMessage)
self.sessionData.release()
def on_close(self):
self.connections.remove(self)
scriptPipeConnection = None
scriptProcess = None
def startScript(functionToRun, args=None):
global scriptPipeConnection, scriptProcess
# Script already running
if scriptProcess and scriptProcess.is_alive():
return
scriptPipeConnection, childConnection = multiprocessing.Pipe()
if not args:
scriptProcess = multiprocessing.Process(target=functionToRun,
args=(childConnection,))
else:
scriptProcess = multiprocessing.Process(target=functionToRun,
args=(childConnection, args,))
scriptProcess.start()
runScriptWebSocketConnections = set()
class RunScriptWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
if not login_get_current_user(self):
return None
global runScriptWebSocketConnections
runScriptWebSocketConnections.add(self)
def on_message(self, message):
if not login_get_current_user(self):
return None
print('RunScriptWebSocket: Received message ', message)
parsedMessage = json.loads(message)
command = parsedMessage['command']
print('RunScriptWebSocket: Command ', command)
if scriptProcess and scriptProcess.is_alive():
print('RunScriptWebSocket: Script already running')
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('A download process is already running. Please wait until it completes.\\n',
'printMessage'))
self.write_message(responseMessage)
if command == 'runScript':
print('RunScriptWebSocket: Starting script')
startScript(redditUserImageScraper.runLikedSavedDownloader)
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('Running downloader.\\n', 'printMessage'))
self.write_message(responseMessage)
elif command == 'retrySubmissions':
print('RunScriptWebSocket: Starting script')
if parsedMessage['submissionsToRetry']:
submissionIds = []
for submissionId in parsedMessage['submissionsToRetry']:
submissionIds.append(int(submissionId))
startScript(redditUserImageScraper.saveRequestedSubmissions,
submissionIds)
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('Running downloader.\\n', 'printMessage'))
self.write_message(responseMessage)
else:
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('No content selected.\\n', 'printMessage'))
self.write_message(responseMessage)
# Fix the non-unique filenames error
elif command == 'fixupPixivSubmissions':
print('RunScriptWebSocket: Starting pixiv fixup')
missingPixivSubmissions = LikedSavedDatabase.db.getMissingPixivSubmissionIds()
missingPixivSubmissionIds = []
for missingPixivSubmission in missingPixivSubmissions:
missingPixivSubmissionIds.append(int(missingPixivSubmission['id']))
# print(missingPixivSubmissionIds)
startScript(redditUserImageScraper.saveRequestedSubmissions, missingPixivSubmissionIds)
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('Running downloader to download {} missing pixiv submissions.\\n'
.format(len(missingPixivSubmissions)),
'printMessage'))
elif command == 'explicitDownloadUrls':
print('RunScriptWebSocket: Starting script')
if parsedMessage['urls']:
urls = []
urlLines = parsedMessage['urls'].split('\n')
for line in urlLines:
# TODO: It would be a good idea to do some validation here, and maybe even regex extract URLs
urls.append(line)
print(urls)
startScript(redditUserImageScraper.saveRequestedUrls, urls)
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('Running downloader.\\n', 'printMessage'))
self.write_message(responseMessage)
else:
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format('No URLs provided.\\n', 'printMessage'))
self.write_message(responseMessage)
else:
print('RunScriptWebSocket: Error: Received command not understood')
def on_close(self):
global runScriptWebSocketConnections
runScriptWebSocketConnections.remove(self)
def updateScriptStatus():
global scriptPipeConnection
# If no pipe or no data to receive from pipe, we're done
# Poll() is non-blocking whereas recv is blocking
try:
if (not runScriptWebSocketConnections
or not scriptPipeConnection
or not scriptPipeConnection.poll()):
return
except OSError:
scriptPipeConnection = None
return
try:
pipeOutput = scriptPipeConnection.recv()
if pipeOutput:
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format(pipeOutput.replace('\n', '\\n').replace('\t', ''),
'printMessage'))
for client in runScriptWebSocketConnections:
client.write_message(responseMessage)
if redditUserImageScraper.scriptFinishedSentinel in pipeOutput:
# Script finished; refresh image cache
print('Refreshing cache due to script finishing')
generateSavedImagesCache(settings.settings['Output_dir'])
responseMessage = ('{{"action":"{}"}}'
.format('scriptFinished'))
for client in runScriptWebSocketConnections:
client.write_message(responseMessage)
scriptPipeConnection.close()
except EOFError:
scriptPipeConnection = None
print("Lost connection to subprocess!")
responseMessage = ('{{"message":"{}", "action":"{}"}}'
.format("Downloader encountered a problem. Check your server output.",
'printMessage'))
for client in runScriptWebSocketConnections:
client.write_message(responseMessage)
#
# Startup
#
def make_app():
# Each time the server starts up, invalidate all cookies
randomGenerator = random.SystemRandom()
cookieSecret = str(randomGenerator.getrandbits(128))
return tornado.web.Application([
# Home page
(r'/', HomeHandler),
# Login
(r'/login', LoginHandler),
(r'/logout', LogoutHandler),
(r'/setPassword', SetPasswordHandler),
# Configure the script
(r'/settings', SettingsHandler),
# Handles messages for run script
(r'/runScriptWebSocket', RunScriptWebSocket),
# Handles messages for randomImageBrowser
(r'/randomImageBrowserWebSocket', RandomImageBrowserWebSocket),
(r'/unsupportedSubmissions', UnsupportedSubmissionsHandler),
#
# Static files
#
(r'/webInterface/(.*)', AuthedStaticHandler, {'path' : 'webInterface'}),
# Don't change this "output" here without changing the other places as well
(r'/output/(.*)', AuthedStaticHandler, {'path' : settings.settings['Output_dir']}),
# Files served regardless of whether the user is authenticated. Only login page resources
# should be in this folder, because anyone can see them
(r'/webInterfaceNoAuth/(.*)', tornado.web.StaticFileHandler, {'path' : 'webInterfaceNoAuth'}),
],
xsrf_cookies=True,
cookie_secret=cookieSecret,
login_url="/login")
if __name__ == '__main__':
print('Loading settings...')
settings.getSettings()
print('Content output directory: ' + settings.settings['Output_dir'])
if not settings.settings['Output_dir']:
print('WARNING: No output directory specified! This will probably break things')
if not savedImagesCache:
generateSavedImagesCache(settings.settings['Output_dir'])
LikedSavedDatabase.initializeFromSettings(settings.settings)
# Backwards compatibility: Read the old .json files into the database. This can be slow for old
# repositories, so only do it once
if not settings.settings['Database_Has_Imported_All_Submissions']:
# Also scan output_dir because Metadata_output_dir was a late addition
LikedSavedDatabase.importFromAllJsonInDir(settings.settings['Output_dir'])
LikedSavedDatabase.importFromAllJsonInDir(settings.settings['Metadata_output_dir'])
settings.settings['Database_Has_Imported_All_Submissions'] = True
settings.writeServerSettings()
print('Successfully imported "All" Submissions into database')
if not settings.settings['Database_Has_Imported_Unsupported_Submissions']:
LikedSavedDatabase.importUnsupportedSubmissionsFromAllJsonInDir(settings.settings['Output_dir'])
LikedSavedDatabase.importUnsupportedSubmissionsFromAllJsonInDir(settings.settings['Metadata_output_dir'])
print('Removing Unsupported Submissions which have file associations')
LikedSavedDatabase.db.removeUnsupportedSubmissionsWithFileAssociations()
settings.settings['Database_Has_Imported_Unsupported_Submissions'] = True
settings.writeServerSettings()
print('Successfully imported Unsupported Submissions into database')
# TODO
# if not settings.settings['Database_Has_Imported_Comments']:
# LikedSavedDatabase.importFromAllJsonInDir(settings.settings['Output_dir'])
# settings.settings['Database_Has_Imported_Comments'] = True
# This isn't pretty, but it'll get the job done
webSocketSettings = open('webInterface/webSocketSettings.js', 'w')
webSocketSettings.write('useSSL = {};'.format('true' if useSSL else 'false'))
webSocketSettings.close()
port = settings.settings['Port'] if settings.settings['Port'] else 8888
print('\nStarting Content Collector Server on port {}...'.format(port))
app = make_app()
# Generating a self-signing certificate:
# openssl req -x509 -nodes -days 365 -newkey rsa:1024 -keyout certificates/server_jupyter_based.crt.key -out certificates/server_jupyter_based.crt.pem
# (from https://jupyter-notebook.readthedocs.io/en/latest/public_server.html)
# I then had to tell Firefox to trust this certificate even though it is self-signing (because
# I want a free certificate for this non-serious project)
if useSSL:
if os.path.exists("certificates/liked_saved_server.crt.pem"):
app.listen(port, ssl_options={"certfile":"certificates/liked_saved_server.crt.pem",
"keyfile":"certificates/liked_saved_server.crt.key"})
# For backwards compatibility
elif os.path.exists("certificates/server_jupyter_based.crt.pem"):
app.listen(port, ssl_options={"certfile":"certificates/server_jupyter_based.crt.pem",
"keyfile":"certificates/server_jupyter_based.crt.key"})
else:
print('\n\tERROR: Certificates non-existent! Run ./Generate_Certificates.sh to create them')
else:
# Show the warning only if SSL is not enabled
print('\n\tWARNING: Do NOT run this server on the internet (e.g. port-forwarded)'
' nor when\n\t connected to an insecure LAN! It is not protected against malicious use.\n')
app.listen(port)
if settings.settings['Launch_Browser_On_Startup']:
browseUrl ="{}://localhost:{}".format('https' if useSSL else 'http', port)
print("Attempting to launch user's default browser to {}".format(browseUrl))
webbrowser.open(browseUrl)
ioLoop = tornado.ioloop.IOLoop.current()
updateStatusCallback = tornado.ioloop.PeriodicCallback(updateScriptStatus, 100)
updateStatusCallback.start()
ioLoop.start()
| mit | 1,483,246,735,341,435,400 | 42.291715 | 154 | 0.60303 | false | 4.74013 | false | false | false |
DomBennett/pG-lt | setup.py | 1 | 1466 | #! /usr/bin/env python
# D.J. Bennett
# 26/05/2014
"""
setup.py for pglt
"""
import os
import pglt
from setuptools import setup, find_packages
# PACAKAGE INFO
PACKAGES = find_packages()
PACKAGE_DIRS = [p.replace(".", os.path.sep) for p in PACKAGES]
# SETUP
setup(
name="pglt",
version=pglt.__version__,
author="Dominic John Bennett",
author_email="[email protected]",
description=("pG-lt: An automated pipeline for phylogeney generation."),
license="LICENSE.txt",
keywords="phylogenetics ecology evolution conservation",
url="https://github.com/DomBennett/MassPhylogenyEstimation",
packages=PACKAGES,
package_dir=dict(zip(PACKAGES, PACKAGE_DIRS)),
package_data={'pglt': ['parameters.csv', 'gene_parameters.csv',
'dependencies.p']},
scripts=['run_pglt.py', 'pglt_set_dependencies.py'],
test_suite='tests',
long_description=pglt.__doc__,
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
install_requires=['setuptools', 'taxon_names_resolver', 'biopython',
'dendropy', 'numpy', 'tabulate'],
)
print('''
Congratulations -- you`ve installed pglt!
Now use `pglt_set_dependencies.py` to add external programs in order to run
the pipeline.
''')
| gpl-2.0 | 6,158,413,141,660,144,000 | 31.577778 | 76 | 0.653479 | false | 3.347032 | false | false | false |
cwfinn/igmtools | igmtools/data/spectral.py | 1 | 25454 | """
Spectral data representation.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import Data
from .atomic import get_atomdat
from astropy.units import (erg, km, cm, s, angstrom, spectral,
spectral_density, Quantity, UnitsError)
from astropy.constants import m_e, c, e
from astropy.table import Table, Column
from math import pi, sqrt, exp, log10
from warnings import warn
import numpy as np
__all__ = ['Spectrum2D', 'Spectrum1D', 'Absorber', 'EmissionLine']
e2_me_c = (e.esu ** 2 / (m_e.cgs * c.cgs)).to(cm ** 2 / s)
c_kms = c.to(km / s)
atomdat = None
def find_bin_edges(bin_centres):
"""
Find the bin edges given the bin centres.
Parameters
----------
bin_centres : array, shape (N,)
The bin centres.
Returns
-------
bins : array, shape (N + 1,)
The bin edges.
"""
if not isinstance(bin_centres, np.ndarray):
bin_centres = np.asarray(bin_centres)
edges = bin_centres[:-1] + 0.5 * (bin_centres[1:] - bin_centres[:-1])
bins = np.concatenate(([2 * bin_centres[0] - edges[0]], edges,
[2 * bin_centres[-1] - edges[-1]]))
return bins
class Spectrum2D(object):
"""
A 2D spectrum.
Parameters
----------
dispersion : `astropy.units.Quantity` or array, shape (N,)
Spectral dispersion axis.
data : array, shape (N, M)
The spectral data.
unit : `astropy.units.UnitBase` or str, optional
Unit for the dispersion axis.
"""
def __init__(self, dispersion, data, unit=None):
self.dispersion = Quantity(dispersion, unit=unit)
if unit is not None:
self.wavelength = self.dispersion.to(angstrom)
else:
self.wavelength = self.dispersion
self.data = data
class Spectrum1D(object):
"""
A 1D spectrum. Assumes wavelength units unless otherwise specified.
Parameters
----------
dispersion : `astropy.units.Quantity` or array
Spectral dispersion axis.
flux : `igmtools.data.Data`, `astropy.units.Quantity` or array
Spectral flux. Should have the same length as `dispersion`.
error : `astropy.units.Quantity` or array, optional
Error on each flux value.
continuum : `astropy.units.Quantity` or array, optional
An estimate of the continuum flux.
mask : array, optional
Mask for the spectrum. The values must be False where valid and True
where not.
unit : `astropy.units.UnitBase` or str, optional
Spectral unit.
dispersion_unit : `astropy.units.UnitBase` or str, optional
Unit for the dispersion axis.
meta : dict, optional
Meta data for the spectrum.
"""
def __init__(self, dispersion, flux, error=None, continuum=None,
mask=None, unit=None, dispersion_unit=None, meta=None):
_unit = flux.unit if unit is None and hasattr(flux, 'unit') else unit
if isinstance(error, (Quantity, Data)):
if error.unit != _unit:
raise UnitsError('The error unit must be the same as the '
'flux unit.')
error = error.value
elif isinstance(error, Column):
if error.unit != _unit:
raise UnitsError('The error unit must be the same as the '
'flux unit.')
error = error.data
# Set zero error elements to NaN:
if error is not None:
zero = error == 0
error[zero] = np.nan
# Mask these elements:
if mask is not None:
self.mask = mask | np.isnan(error)
else:
self.mask = np.isnan(error)
# If dispersion is a `Quantity`, `Data`, or `Column` instance with the
# unit attribute set, that unit is preserved if `dispersion_unit` is
# None, but overriden otherwise
self.dispersion = Quantity(dispersion, unit=dispersion_unit)
if dispersion_unit is not None:
self.wavelength = self.dispersion.to(angstrom)
else:
# Assume wavelength units:
self.wavelength = self.dispersion
self.flux = Data(flux, error, unit)
if continuum is not None:
self.continuum = Quantity(continuum, unit=unit)
else:
self.continuum = None
self.meta = meta
@classmethod
def from_table(cls, table, dispersion_column, flux_column,
error_column=None, continuum_column=None, unit=None,
dispersion_unit=None):
"""
Initialises a `Spectrum1D` object from an `astropy.table.Table`
instance.
Parameters
----------
table : `astropy.table.Table`
Contains information used to construct the spectrum. Must have
columns for the dispersion axis and the spectral flux.
dispersion_column : str
Name for the dispersion column.
flux_column : str
Name for the flux column.
error_column : str, optional
Name for the error column.
continuum_column : str, optional
Name for the continuum column.
unit : `astropy.units.UnitBase` or str, optional
Spectral unit.
dispersion_unit : `astropy.units.UnitBase` or str, optional
Unit for the dispersion axis.
"""
dispersion = Quantity(table[dispersion_column])
flux = Quantity(table[flux_column])
if error_column is not None:
error = Quantity(table[error_column])
else:
error = None
if continuum_column is not None:
continuum = Quantity(table[continuum_column])
else:
continuum = None
meta = table.meta
mask = table.mask
return cls(dispersion, flux, error, continuum, mask, unit,
dispersion_unit, meta)
def write(self, *args, **kwargs):
"""
Write the spectrum to a file. Accepts the same arguments as
`astropy.table.Table.write`
"""
if self.dispersion.unit is None:
label_string = 'WAVELENGTH'
else:
if self.dispersion.unit.physical_type == 'length':
label_string = 'WAVELENGTH'
elif self.dispersion.unit.physical_type == 'frequency':
label_string = 'FREQUENCY'
elif self.dispersion.unit.physical_type == 'energy':
label_string = 'ENERGY'
else:
raise ValueError('unrecognised unit type')
t = Table([self.dispersion, self.flux, self.flux.uncertainty.value],
names=[label_string, 'FLUX', 'ERROR'])
t['ERROR'].unit = t['FLUX'].unit
if self.continuum is not None:
t['CONTINUUM'] = self.continuum
t.write(*args, **kwargs)
def plot(self, **kwargs):
"""
Plot the spectrum. Accepts the same arguments as
`igmtools.plot.Plot`.
"""
from ..plot import Plot
p = Plot(1, 1, 1, **kwargs)
p.axes[0].plot(self.dispersion.value, self.flux.value,
drawstyle='steps-mid')
if self.flux.uncertainty is not None:
p.axes[0].plot(self.dispersion.value, self.flux.uncertainty.value,
drawstyle='steps-mid')
p.tidy()
p.display()
def normalise_to_magnitude(self, magnitude, band):
"""
Normalises the spectrum to match the flux equivalent to the
given AB magnitude in the given passband.
Parameters
----------
magnitude : float
AB magnitude.
band : `igmtools.photometry.Passband`
The passband.
"""
from ..photometry import mag2flux
mag_flux = mag2flux(magnitude, band)
spec_flux = self.calculate_flux(band)
norm = mag_flux / spec_flux
self.flux *= norm
def calculate_flux(self, band):
"""
Calculate the mean flux for a passband, weighted by the response
and wavelength in the given passband.
Parameters
----------
band : `igmtools.photometry.Passband`
The passband.
Returns
-------
flux : `astropy.units.Quantity`
The mean flux in erg / s / cm^2 / Angstrom.
Notes
-----
This function does not calculate an uncertainty.
"""
if (self.wavelength[0] > band.wavelength[0] or
self.wavelength[-1] < band.wavelength[-1]):
warn('Spectrum does not cover the whole bandpass, '
'extrapolating...')
dw = np.median(np.diff(self.wavelength.value))
spec_wavelength = np.arange(
band.wavelength.value[0],
band.wavelength.value[-1] + dw, dw) * angstrom
spec_flux = np.interp(spec_wavelength, self.wavelength,
self.flux.value)
else:
spec_wavelength = self.wavelength
spec_flux = self.flux.value
i, j = spec_wavelength.searchsorted(
Quantity([band.wavelength[0], band.wavelength[-1]]))
wavelength = spec_wavelength[i:j]
flux = spec_flux[i:j]
dw_band = np.median(np.diff(band.wavelength))
dw_spec = np.median(np.diff(wavelength))
if dw_spec.value > dw_band.value > 20:
warn('Spectrum wavelength sampling interval {0:.2f}, but bandpass'
'sampling interval {1:.2f}'.format(dw_spec, dw_band))
# Interpolate the spectrum to the passband wavelengths:
flux = np.interp(band.wavelength, wavelength, flux)
band_transmission = band.transmission
wavelength = band.wavelength
else:
# Interpolate the band transmission to the spectrum wavelengths:
band_transmission = np.interp(
wavelength, band.wavelength, band.transmission)
# Weight by the response and wavelength, appropriate when we're
# counting the number of photons within the band:
flux = (np.trapz(band_transmission * flux * wavelength, wavelength) /
np.trapz(band_transmission * wavelength, wavelength))
flux *= erg / s / cm ** 2 / angstrom
return flux
def calculate_magnitude(self, band, system='AB'):
"""
Calculates the magnitude in a given passband.
band : `igmtools.photometry.Passband`
The passband.
system : {`AB`, `Vega`}
Magnitude system.
Returns
-------
magnitude : float
Magnitude in the given system.
"""
if system not in ('AB', 'Vega'):
raise ValueError('`system` must be one of `AB` or `Vega`')
f1 = self.calculate_flux(band)
if f1 > 0:
magnitude = -2.5 * log10(f1 / band.flux[system])
if system == 'Vega':
# Add 0.026 because Vega has V = 0.026:
magnitude += 0.026
else:
magnitude = np.inf
return magnitude
def apply_extinction(self, EBmV):
"""
Apply Milky Way extinction.
Parameters
----------
EBmV : float
Colour excess.
"""
from astro.extinction import MWCardelli89
tau = MWCardelli89(self.wavelength, EBmV=EBmV).tau
self.flux *= np.exp(-tau)
if self.continuum is not None:
self.continuum *= np.exp(-tau)
def rebin(self, dispersion):
"""
Rebin the spectrum onto a new dispersion axis.
Parameters
----------
dispersion : float, `astropy.units.Quantity` or array
The dispersion for the rebinned spectrum. If a float, assumes a
linear scale with that bin size.
"""
if isinstance(dispersion, float):
dispersion = np.arange(
self.dispersion.value[0], self.dispersion.value[-1],
dispersion)
old_bins = find_bin_edges(self.dispersion.value)
new_bins = find_bin_edges(dispersion)
widths = np.diff(old_bins)
old_length = len(self.dispersion)
new_length = len(dispersion)
i = 0 # index of old array
j = 0 # index of new array
# Variables used for rebinning:
df = 0.0
de2 = 0.0
nbins = 0.0
flux = np.zeros_like(dispersion)
error = np.zeros_like(dispersion)
# Sanity check:
if old_bins[-1] < new_bins[0] or new_bins[-1] < old_bins[0]:
raise ValueError('Dispersion scales do not overlap!')
# Find the first contributing old pixel to the rebinned spectrum:
if old_bins[i + 1] < new_bins[0]:
# Old dispersion scale extends lower than the new one. Find the
# first old bin that overlaps with the new scale:
while old_bins[i + 1] < new_bins[0]:
i += 1
i -= 1
elif old_bins[0] > new_bins[j + 1]:
# New dispersion scale extends lower than the old one. Find the
# first new bin that overlaps with the old scale:
while old_bins[0] > new_bins[j + 1]:
flux = np.nan
error = np.nan
j += 1
j -= 1
l0 = old_bins[i] # lower edge of contributing old bin
while True:
h0 = old_bins[i + 1] # upper edge of contributing old bin
h1 = new_bins[j + 1] # upper edge of jth new bin
if h0 < h1:
# Count up the decimal number of old bins that contribute to
# the new one and start adding up fractional flux values:
if self.flux.uncertainty.value[i] > 0:
bin_fraction = (h0 - l0) / widths[i]
nbins += bin_fraction
# We don't let `Data` handle the error propagation here
# because a sum of squares will not give us what we
# want, i.e. 0.25**2 + 0.75**2 != 0.5**2 + 0.5**2 != 1**2
df += self.flux.value[i] * bin_fraction
de2 += self.flux.uncertainty.value[i] ** 2 * bin_fraction
l0 = h0
i += 1
if i == old_length:
break
else:
# We have all but one of the old bins that contribute to the
# new one, so now just add the remaining fraction of the new
# bin to the decimal bin count and add the remaining
# fractional flux value to the sum:
if self.flux.uncertainty.value[i] > 0:
bin_fraction = (h1 - l0) / widths[i]
nbins += bin_fraction
df += self.flux.value[i] * bin_fraction
de2 += self.flux.uncertainty.value[i] ** 2 * bin_fraction
if nbins > 0:
# Divide by the decimal bin count to conserve flux density:
flux[j] = df / nbins
error[j] = sqrt(de2) / nbins
else:
flux[j] = 0.0
error[j] = 0.0
df = 0.0
de2 = 0.0
nbins = 0.0
l0 = h1
j += 1
if j == new_length:
break
if hasattr(self.dispersion, 'unit'):
dispersion = Quantity(dispersion, self.dispersion.unit)
if hasattr(self.flux, 'unit'):
flux = Data(flux, error, self.flux.unit)
# Linearly interpolate the continuum onto the new dispersion scale:
if self.continuum is not None:
continuum = np.interp(dispersion, self.dispersion, self.continuum)
else:
continuum = None
return self.__class__(dispersion, flux, continuum=continuum)
class Absorber(object):
"""
Class representation of an absorber.
Parameters
----------
identifier : str
Name of the ion, molecule or isotope, e.g. `HI`.
redshift : float, optional
Redshift of the absorber.
logn : float, optional
Log10 column density (cm^-2).
b : float, optional
Doppler broadening parameter (km/s).
covering_fraction : float, optional
Covering fraction.
atom : `igmtools.data.AtomDat`, optional
Atomic data.
"""
def __init__(self, identifier, redshift=None, logn=None, b=None,
covering_fraction=1, atom=None):
if atom is None:
atom = get_atomdat()
self.identifier = identifier
self.transitions = atom[identifier]
self.redshift = redshift
self.logn = logn
self.b = b
self.covering_fraction = covering_fraction
def __repr__(self):
return 'Absorber({0}, z={1:.2f}, logN={2:.2f}, b={3})'.format(
self.identifier, self.redshift, self.logn, int(self.b))
@classmethod
def from_tau_peak(cls, transition, tau, b):
"""
Initialise an absorber from the optical depth at line centre and
Doppler broadining parameter of a given transition.
Parameters
----------
transition : str
Name of the transition, e.g. `HI 1215'
tau : float
Optical depth at the line centre.
b : float
Doppler broadening parameter (km/s).
"""
atom = get_atomdat()
transition = atom.get_transition(transition)
if isinstance(b, Quantity):
b = b.to(cm / s)
else:
b = (b * km / s).to(cm / s)
wavelength = transition.wavelength.to(cm)
osc = transition.osc
column = tau * b / (sqrt(pi) * e2_me_c * osc * wavelength)
logn = log10(column.value)
return cls(identifier=transition.parent, logn=logn, b=b)
def optical_depth(self, dispersion):
"""
Calculates the optical depth profile for a given spectral
dispersion array.
Parameters
----------
dispersion : array
Spectral dispersion.
Returns
-------
tau : array
The optical depth profile.
"""
from ..calculations import optical_depth, tau_peak
if isinstance(dispersion, Quantity):
dispersion = dispersion.to(angstrom)
elif hasattr(dispersion, 'unit'):
if dispersion.unit is not None:
dispersion = dispersion.to(angstrom)
else:
dispersion = Quantity(dispersion, unit=angstrom)
velocity_range = ([-20000, 20000] * km / s if self.logn > 18
else [-1000, 1000] * km / s)
# Select only transitions with redshifted central wavelengths inside
# `dispersion` +/- 500 km/s:
rest_wavelengths = Quantity([t.wavelength for t in self.transitions])
observed_wavelengths = rest_wavelengths * (1 + self.redshift)
wmin = dispersion[0] * (1 - 500 * km / s / c_kms)
wmax = dispersion[-1] * (1 - 500 * km / s / c_kms)
in_range = ((observed_wavelengths >= wmin) &
(observed_wavelengths <= wmax))
transitions = np.array(self.transitions)[in_range]
tau = np.zeros_like(dispersion.value)
for i, transition in enumerate(transitions):
tau_max = tau_peak(transition, self.logn, self.b)
if 1 - exp(-tau_max) < 1e-3:
continue
observed_wavelength = transition.wavelength * (1 + self.redshift)
dv = ((dispersion - observed_wavelength) /
observed_wavelength * c_kms)
i0, i1 = dv.searchsorted(velocity_range)
tau0 = optical_depth(dv[i0:i1], transition, self.logn, self.b)
tau[i0:i1] += tau0
return tau
class EmissionLine(object):
"""
Class representation of an emission line and its properties.
Parameters
----------
wavelength : float
Rest frame wavelength of the line in Angstrom.
redshift : float
Redshift of the emission line.
flux : `igmtools.data.Data`, optional
Integrated line flux.
cont : `igmtools.data.Data`, optional
Continuum flux at the line centre.
eqw : `igmtools.data.Data`, optional
Equivalent width of the line.
"""
def __init__(self, wavelength, redshift, flux=None, cont=None, eqw=None):
from ..calculations import comoving_distance
if flux and not isinstance(flux, Data):
raise ValueError('flux must be an instance of a Data object')
if cont and not isinstance(cont, Data):
raise ValueError('cont must be an instance of a Data object')
if eqw and not isinstance(eqw, Data):
raise ValueError('eqw must be an instance of a Data object')
if isinstance(wavelength, Quantity):
self.wavelength = wavelength.to(angstrom)
else:
self.wavelength = wavelength * angstrom
self.redshift = redshift
self.wavelength_observed = self.wavelength * (1 + self.redshift)
if flux:
self._flux = flux.to(erg / cm ** 2 / s, equivalencies=spectral())
self.rflux = self._flux * (1 + redshift) ** 2
distance = comoving_distance(self.redshift).cgs
self.luminosity = 4 * pi * distance ** 2 * self._flux
if cont and eqw:
self._cont = cont.to(
erg / cm ** 2 / s / angstrom,
equivalencies=spectral_density(self.wavelength_observed))
self.rcont = self._cont * (1 + redshift) ** 3
self._eqw = eqw.to(angstrom)
self.reqw = self._eqw / (1 + redshift)
elif cont and not eqw:
self._cont = cont.to(
erg / cm ** 2 / s / angstrom,
equivalencies=spectral_density(self.wavelength_observed))
self.rcont = self._cont * (1 + redshift) ** 3
self._eqw = self._flux / self._cont
self.reqw = self._eqw / (1 + redshift)
elif eqw and not cont:
self._eqw = eqw.to(angstrom)
self.reqw = self._eqw / (1 + redshift)
self._cont = self._flux / self._eqw
self.rcont = self._cont * (1 + redshift) ** 3
else:
self._eqw = eqw
self.reqw = None
self._cont = cont
self.rcont = None
elif cont:
self._cont = cont.to(
erg / cm ** 2 / s / angstrom,
equivalencies=spectral_density(self.wavelength_observed))
self.rcont = self._cont * (1 + redshift) ** 3
if eqw:
self._eqw = eqw.to(angstrom)
self.reqw = self._eqw / (1 + redshift)
self._flux = self._cont * self._eqw
self.rflux = self._flux * (1 + redshift) ** 2
distance = comoving_distance(self.redshift).cgs
self.luminosity = 4 * pi * distance ** 2 * self._flux
else:
self._eqw = eqw
self.reqw = None
self._flux = flux
self.rflux = None
self.luminosity = None
elif eqw:
self._eqw = eqw.to(angstrom)
self.reqw = self._eqw / (1 + redshift)
self._flux = flux
self.rflux = None
self._cont = cont
self.rcont = None
self.luminosity = None
else:
self._flux = flux
self.rflux = None
self._cont = cont
self.rcont = None
self._eqw = eqw
self.reqw = None
self.luminosity = None
@property
def flux(self):
return self._flux
@flux.setter
def flux(self, value):
from ..calculations import comoving_distance
if not isinstance(value, Data):
raise ValueError('flux must be an instance of a Data object')
self._flux = value.to(erg / cm ** 2 / s, equivalencies=spectral())
self.rflux = self._flux * (1 + self.redshift) ** 2
distance = comoving_distance(self.redshift).cgs
self.luminosity = 4 * pi * distance ** 2 * self._flux
@property
def cont(self):
return self._cont
@cont.setter
def cont(self, value):
if not isinstance(value, Data):
raise ValueError('cont must be an instance of a Data object')
self._cont = value.to(
erg / cm ** 2 / s / angstrom,
equivalencies=spectral_density(self.wavelength_observed))
self.rcont = self._cont * (1 + self.redshift) ** 3
@property
def eqw(self):
return self._eqw
@eqw.setter
def eqw(self, value):
if not isinstance(value, Data):
raise ValueError('eqw must be an instance of a Data object')
self._eqw = value.to(angstrom)
self.reqw = self._eqw / (1 + self.redshift)
| bsd-3-clause | -3,835,039,443,086,312,400 | 28.15693 | 79 | 0.542626 | false | 3.959863 | false | false | false |
yunstanford/GraphiteSetup | carbon_cache.py | 1 | 1349 | import subprocess
import sys
import string
import os
def start_carbon_cache_instance(name):
path = os.path.realpath(__file__)
subprocess.call(["python", "{0}/carbon-cache.py".format(os.path.dirname(path)), "--instance={0}".format(name), "start"])
def stop_carbon_cache_instance(name):
path = os.path.realpath(__file__)
subprocess.call(["python", "{0}/carbon-cache.py".format(os.path.dirname(path)), "--instance={0}".format(name), "stop"])
def usage():
print("carbon_cache [start/stop] [instance name type: letter or number] [number of instances]")
print("instance names should be continuous")
print("For example: 1, 2, 3,... or a, b, c,...")
print("Usage: python carbon_cache start n 5")
def main():
if len(sys.argv) < 4:
print("Too few arguments")
usage()
return
if len(sys.argv) > 4:
print("Too many arguments")
usage()
return
if sys.argv[1] not in ['start', 'stop']:
print("Wrong operation! start or stop only!")
return;
if sys.argv[2] not in ['n', 'l']:
print("Wrong Type! l or n only!")
return
num = int(sys.argv[3])
if sys.argv[1] == 'start':
func = start_carbon_cache_instance
else:
func = stop_carbon_cache_instance
if sys.argv[2] == 'n':
for i in range(num):
func(i + 1)
else:
li = list(string.ascii_lowercase)[:num]
for i in li:
func(i)
if __name__ == '__main__':
main()
| mit | -7,785,795,469,886,082,000 | 22.666667 | 121 | 0.64344 | false | 2.834034 | false | false | false |
daleloogn/mython | mfcc_diy.py | 1 | 1134 | from scikits.talkbox.features.mfcc import *
__author__ = 'zhangxulong'
def mfcc(input, nwin=800, nfft=512, fs=8000, nceps=13):
# MFCC parameters: taken from auditory toolbox
over = 160
# Pre-emphasis factor (to take into account the -6dB/octave rolloff of the
# radiation at the lips level)
prefac = 0.97
# lowfreq = 400 / 3.
lowfreq = 133.33
# highfreq = 6855.4976
linsc = 200 / 3.
logsc = 1.0711703
nlinfil = 13
nlogfil = 27
nfil = nlinfil + nlogfil
w = hamming(nwin, sym=0)
fbank = trfbank(fs, nfft, lowfreq, linsc, logsc, nlinfil, nlogfil)[0]
# ------------------
# Compute the MFCC
# ------------------
extract = preemp(input, prefac)
framed = segment_axis(extract, nwin, over) * w
# Compute the spectrum magnitude
spec = np.abs(fft(framed, nfft, axis=-1))
# Filter the spectrum through the triangle filterbank
mspec = np.log10(np.dot(spec, fbank.T))
# Use the DCT to 'compress' the coefficients (spectrum -> cepstrum domain)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:, :nceps]
return ceps, mspec, spec
| gpl-2.0 | 4,610,380,613,209,277,000 | 27.35 | 78 | 0.61552 | false | 3 | false | false | false |
tummy-dot-com/tummy-backup | www/tummy-backup/controller.py | 1 | 30004 | #!/usr/bin/env python
import os
import sys
import time
sys.path.append('/usr/local/lib/tummy-backup/www/lib')
sys.path.append('/usr/local/lib/tummy-backup/lib')
import tbsupp
import cherrypy
import cherrypy.lib.auth_basic
from genshi.template import TemplateLoader
from formencode import Invalid, Schema, validators
emptyPng = (
'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x03\x00\x00\x00'
'\x03\x08\x02\x00\x00\x00\xd9J"\xe8\x00\x00\x00\x01sRGB\x00\xae\xce'
'\x1c\xe9\x00\x00\x00\x14IDAT\x08\xd7c```\xf8\xff\xff?\x03\x9cB\xe1'
'\x00\x00\xb3j\x0b\xf52-\x07\x95\x00\x00\x00\x00IEND\xaeB`\x82')
loader = TemplateLoader(
os.path.join(os.path.dirname(__file__), 'templates'),
auto_reload=True)
def dbconnect():
from psycopgwrap import Database as db
db.connect()
return db
def db_close():
from psycopgwrap import Database as db
db.close()
cherrypy.tools.db_close = cherrypy.Tool('on_end_request', db_close)
def url(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and type(args[0]) in (str, unicode):
return cherrypy.url(args[0])
else:
import routes
return cherrypy.url(routes.url_for(*args, **kwargs))
def flash_present():
return 'flash' in cherrypy.session
def flash_get():
data = cherrypy.session.get('flash')
del(cherrypy.session['flash'])
return data
def contextFromLocals(locals):
from genshi.template import Context
data = {
'url': url, 'cherrypy': cherrypy,
'current_dashboard': {},
'current_config': {},
'current_indexdetails': {},
'current_newbackup': {},
'host_menu': False,
'flash_present': flash_present,
'flash_get': flash_get, }
for key, value in locals.items():
if key.startswith('_'):
continue
if key == 'self':
continue
data[key] = value
return(Context(**data))
def processform(validator, defaults, form, list_fields=[]):
'''Simple form validator, see `sysconfig()` for example use. Any field
names listed in `list_fields` will always be converted to a list before
processing.
'''
if cherrypy.request.method != 'POST':
return {}, defaults
# convert fields that may be lists to always be lists
for field in list_fields:
if field in form and not isinstance(form[field], (tuple, list)):
form[field] = (form[field], )
try:
return {}, validator.to_python(form)
except Invalid, e:
return e.unpack_errors(), form
def validateTime(s):
import re
if not re.match(r'^[0-9][0-9]:[0-9][0-9](:[0-9][0-9])?$', s):
raise ValueError('Invalid time format.')
return(s)
def validateExcludeRule(s):
import re
if not re.match(
r'^(include|exclude|merge|dir-merge|merge|\+|1)\s+\S.*$', s):
raise ValueError('Invalid exclude rule format.')
return(s)
def validateHostname(s):
import re
if not re.match(r'^[-a-zA-Z0-9._]+$', s):
raise ValueError('Invalid hostname.')
return(s)
def validateFailureWarn(s):
from psycopg2 import DataError
db = dbconnect()
if s == '':
return None
try:
db.queryone("SELECT %s::INTERVAL", s)
except DataError:
raise ValueError(
'Must be empty (to use default) '
'or a PostgreSQL interval like "3 days".')
return s
def validateServername(s):
db = dbconnect()
row = db.queryone(
"SELECT COUNT(*) FROM backupservers WHERE hostname = %s", s)
if row[0] != 1:
raise ValueError('Invalid backup server name.')
return(s)
class NewHostValidator(Schema):
hostname = validators.Wrapper(
validate_python=validateHostname, not_empty=True)
servername = validators.Wrapper(
validate_python=validateServername, not_empty=True)
class HostConfigValidator(Schema):
from formencode.foreach import ForEach
new_exclude_priority = ForEach(
validators.Int(min=0, max=9, not_empty=True), convert_to_list=True)
new_exclude_rule = ForEach(validators.Wrapper(
validate_python=validateExcludeRule,
not_empty=True), convert_to_list=True)
use_global_excludes = validators.StringBoolean(if_missing=False)
rsync_do_compress = validators.StringBoolean(if_missing=False)
active = validators.StringBoolean(if_missing=False)
rsync_bwlimit = validators.Int(not_empty=False, min=0)
priority = validators.Int(not_empty=True, min=0, max=10)
retain_daily = validators.Int(not_empty=True, min=0)
retain_weekly = validators.Int(not_empty=True, min=0)
retain_monthly = validators.Int(not_empty=True, min=0)
window_start_time = validators.Wrapper(validate_python=validateTime)
window_end_time = validators.Wrapper(validate_python=validateTime)
failure_warn = validators.Wrapper(
validate_python=validateFailureWarn, not_empty=False)
class SystemConfigValidator(Schema):
mail_to = validators.Email()
failure_warn = validators.Wrapper(
validate_python=validateFailureWarn, not_empty=False)
rsync_timeout = validators.Int(not_empty=True, min=60, max=160000)
rsync_username = validators.String(
not_empty=True, strip=True, min=1, max=40)
class Root(object):
def __init__(self):
pass
@cherrypy.expose
def index(self):
current_dashboard = {'class': 'current'}
db = dbconnect()
hosts_needing_attention = tbsupp.getHostsNeedingAttention(db)
hosts = list(db.query(
"SELECT *, "
" (SELECT COUNT(*) FROM backups "
"WHERE host_id = hosts.id AND generation = 'daily' "
"AND successful = 't') AS daily_count, "
" (SELECT COUNT(*) FROM backups "
"WHERE host_id = hosts.id AND generation = 'weekly' "
"AND successful = 't') AS weekly_count, "
" (SELECT COUNT(*) FROM backups "
"WHERE host_id = hosts.id AND generation = 'monthly' "
"AND successful = 't') AS monthly_count, "
"(SELECT NOW() - MAX(start_time) FROM backups "
"WHERE host_id = hosts.id "
"AND successful = 't') AS last_backup "
"FROM hosts "
"ORDER BY hostname"))
active_backups = list(db.query(
"SELECT * FROM backups, hosts "
"WHERE backups.backup_pid IS NOT NULL "
"AND hosts.id = backups.host_id "
"ORDER BY backups.start_time"))
title = 'Tummy-Backup'
graphdate = ''
graphdatajs = ''
graphdatajsmax = ''
for row in db.query(
"SELECT sample_date, AVG(usage_pct) AS usage_pct, "
"MAX(usage_pct) as max_usage_pct "
"FROM serverusage GROUP BY sample_date ORDER BY sample_date;"):
graphdate += '"%s",' % row['sample_date'].strftime('%Y-%m-%d')
graphdatajs += '%.1f,' % row['usage_pct']
graphdatajsmax += '%.1f,' % row['max_usage_pct']
graphdate += ''
graphdatajs += ''
graphdatajsmax += ''
tmpl = loader.load('index.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def detailedindex(self):
import tbsupp
current_indexdetails = {'class': 'current'}
db = dbconnect()
hosts_needing_attention = tbsupp.getHostsNeedingAttention(db)
hosts = list(db.query(
"SELECT *, "
" (SELECT COUNT(*) FROM backups "
"WHERE host_id = hosts.id AND generation = 'daily' "
"AND successful = 't') AS daily_count, "
" (SELECT COUNT(*) FROM backups "
"WHERE host_id = hosts.id AND generation = 'weekly' "
"AND successful = 't') AS weekly_count, "
" (SELECT COUNT(*) FROM backups "
"WHERE host_id = hosts.id AND generation = 'monthly' "
"AND successful = 't') AS monthly_count, "
"(SELECT NOW() - MAX(start_time) FROM backups "
"WHERE host_id = hosts.id "
"AND successful = 't') AS last_backup "
"FROM hosts "
"ORDER BY hostname"))
title = 'Detailed Index'
tmpl = loader.load('index-detailed.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def sysconfig(self, **kwargs):
db = dbconnect()
title = 'Tummy-Backup Configuration'
current_config = {'class': 'current'}
config = db.queryone("SELECT * FROM config")
errors, formdata = processform(SystemConfigValidator(), config, kwargs)
if cherrypy.request.method == 'POST' and not errors:
for field in [
'mail_to', 'failure_warn', 'rsync_timeout',
'rsync_username']:
db.query(
"UPDATE config SET %s = %%s" % field, formdata[field])
db.commit()
cherrypy.session['flash'] = 'Settings saved successfully.'
raise cherrypy.HTTPRedirect(url('/config'))
tmpl = loader.load('sysconfig.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def hostsearch(self, **kwargs):
db = dbconnect()
hostname = kwargs['hostname']
hosts = list(db.query(
"SELECT hostname FROM hosts WHERE hostname ~* %s", hostname))
# redirect if only one match
if len(hosts) == 1:
raise cherrypy.HTTPRedirect(url(str(
'/hosts/%s/' % hosts[0]['hostname'])))
# return search results page
if len(hosts) > 1:
title = 'Host Search: %s' % (hostname,)
tmpl = loader.load('host-search-list.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
# error page if not found
title = 'No Hosts Found'
cherrypy.response.status = 404
tmpl = loader.load('host-search-notfound.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def host(self, hostname):
db = dbconnect()
title = 'Host %s' % hostname
host = db.queryone("SELECT * FROM hosts WHERE hostname = %s", hostname)
# search for host if we didn't find an exact match above
if not host:
hosts = list(db.query(
"SELECT hostname FROM hosts WHERE hostname ~* %s", hostname))
# redirect if only one match
if len(hosts) == 1:
raise cherrypy.HTTPRedirect(url(str(
'/hosts/%s/' % hosts[0]['hostname'])))
# return search results page
if len(hosts) > 1:
title = 'Host Search: %s' % (hostname,)
tmpl = loader.load('host-search-list.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
# error page if not found
title = 'No Hosts Found'
cherrypy.response.status = 404
tmpl = loader.load('host-search-notfound.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
server = db.queryone(
"SELECT * from backupservers WHERE id = %s",
host['backupserver_id'])
backups = db.query(
"SELECT backups.* FROM backups, hosts "
"WHERE backups.host_id = hosts.id AND hosts.hostname = %s "
"ORDER BY backups.id DESC", hostname)
mostrecentbackup = db.queryone(
"SELECT * FROM backups "
"WHERE host_id = %s ORDER BY id DESC LIMIT 1", host['id'])
from tbsupp import describe_rsync_exit_code # NOQA
dategraphdatajs = ''
datagraphdatajs = ''
snapgraphdatajs = ''
maxSize = 0
for row in db.query(
"SELECT "
"sample_date, SUM(used_by_dataset) AS used_by_dataset, "
"SUM(used_by_snapshots) AS used_by_snapshots "
"FROM backupusage "
"WHERE host_id = %s "
"GROUP BY sample_date ORDER BY sample_date;", host['id']):
dategraphdatajs += '"%s",' % (
row['sample_date'].strftime('%Y-%m-%d'))
datagraphdatajs += '%d,' % row['used_by_dataset']
snapgraphdatajs += '%d,' % row['used_by_snapshots']
total = row['used_by_snapshots'] + row['used_by_dataset']
if total > maxSize:
maxSize = total
if datagraphdatajs == '[':
dategraphdatajs = '"%s"' % time.strftime('%Y/%m/%d')
datagraphdatajs = '0,'
snapgraphdatajs = '0,'
yticks = '['
sizel = [
[0, 'B'], [1024, 'KiB'], [1024 ** 2, 'MiB'], [1024 ** 3, 'GiB'],
[1024 ** 4, 'TiB'], [1024 ** 5, 'PiB'], [1024 ** 6, 'EiB']]
for size, ext in sizel:
if maxSize >= size:
order = size
suffix = ext
if maxSize > 0:
val = float(maxSize) / float(order)
else:
val = 0
tweak = 10
rounding = 3
deci = 1
for testmax, testtweak, testround, testdeci in [
[10, 1, 2, 0], [100, 0.1, 1, 0]]:
if val >= testmax:
tweak = testtweak
rounding = testround
deci = testdeci
peak = (int(val * tweak) + rounding) / float(tweak)
yticks += "[0, '0'],"
yticks += "[%d,'%0.*f %s']," % (
peak * order * 0.25, deci, peak * 0.25, suffix)
yticks += "[%d,'%0.*f %s']," % (
peak * order * 0.5, deci, peak * 0.5, suffix)
yticks += "[%d,'%0.*f %s']," % (
peak * order * 0.75, deci, peak * 0.75, suffix)
yticks += "[%d,'%0.*f %s']," % (peak * order, deci, peak, suffix)
yticks += ']'
tmpl = loader.load('host.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def hostconfig(self, hostname, **kwargs):
import datetime
db = dbconnect()
title = 'Configure Host %s' % (hostname,)
now = datetime.datetime.now()
host = db.queryone("SELECT * FROM hosts WHERE hostname = %s", hostname)
if not host:
title = 'Invalid Host'
tmpl = loader.load('hostconfig-invalidhost.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
excludes = db.query(
"SELECT * FROM excludes "
"WHERE (host_id IS NULL AND %s::BOOLEAN) OR host_id = %s "
"ORDER BY priority", host['use_global_excludes'], host['id'])
# strip off any trailing form list entries
while (kwargs.get('new_exclude_priority')
and not kwargs['new_exclude_priority'][-1]
and kwargs.get('new_exclude_rule')
and not kwargs['new_exclude_rule'][-1]):
kwargs['new_exclude_priority'] = kwargs[
'new_exclude_priority'][:-1]
kwargs['new_exclude_rule'] = kwargs['new_exclude_rule'][:-1]
if (not kwargs.get('new_exclude_priority')
and not kwargs.get('new_exclude_rule')):
if 'new_exclude_priority' in kwargs:
del(kwargs['new_exclude_priority'])
if 'new_exclude_rule' in kwargs:
del(kwargs['new_exclude_rule'])
errors, formdata = processform(
HostConfigValidator(),
dict(new_exclude_priority='', new_exclude_rule='', **dict(host)),
dict([
(key, kwargs[key]) for key in kwargs.keys()
if not key.startswith('delete_')]),
['new_exclude_priority', 'new_exclude_rule'])
if cherrypy.request.method == 'POST' and not errors:
for field in [
'active', 'use_global_excludes', 'retain_daily',
'retain_weekly', 'retain_monthly', 'rsync_do_compress',
'rsync_bwlimit', 'priority',
'window_start_time', 'window_end_time', 'failure_warn']:
db.query(
"UPDATE hosts SET %s = %%s WHERE id = %%s" % field,
formdata[field], host['id'])
if (formdata['new_exclude_priority']
and formdata['new_exclude_rule']):
for priority, rule in zip(
formdata['new_exclude_priority'],
formdata['new_exclude_rule']):
db.query(
"INSERT INTO excludes "
"( host_id, priority, rsync_rule ) "
"VALUES ( %s, %s, %s )", host['id'], priority, rule)
deleteList = [
int(x.split('_', 1)[1]) for x in kwargs.keys()
if x.startswith('delete_')]
for deleteId in deleteList:
db.query(
"DELETE FROM excludes WHERE id = %s AND host_id = %s",
deleteId, host['id'])
db.commit()
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, clienthostname=hostname),
'updatekey\0%s\n' % (hostname,))
if output.template_error():
cherrypy.session['flash'] = (
'Error saving settings: %s' % output.template_error())
else:
cherrypy.session['flash'] = 'Settings saved successfully.'
raise cherrypy.HTTPRedirect('config')
rules_data = []
if 'new_exclude_rule' in errors or 'new_exclude_priority' in errors:
import itertools
rules_data = list(itertools.izip_longest(
formdata.get('new_exclude_priority', []),
errors.get('new_exclude_priority', []),
formdata.get('new_exclude_rule', []),
errors.get('new_exclude_rule', [])))
tmpl = loader.load('hostconfig.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def hostdestroy(self, hostname, **kwargs):
db = dbconnect()
title = 'Destroy Host %s' % (hostname,)
host = db.query("SELECT * FROM hosts WHERE hostname = %s", hostname)[0]
if cherrypy.request.method == 'POST':
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, clienthostname=hostname),
'destroyhost\0%s\n' % (hostname,))
if output.template_error():
cherrypy.session['flash'] = (
'Error destroying host: %s' % output.template_error())
raise cherrypy.HTTPRedirect(url('/'))
tmpl = loader.load('hostdestroy.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def backupdestroy(self, hostname, backupid, **kwargs):
db = dbconnect()
title = 'Destroy Backup %s for %s' % (backupid, hostname)
host = db.queryone("SELECT * FROM hosts WHERE hostname = %s", hostname)
backup = db.queryone(
"SELECT * FROM backups WHERE id = %s", backupid)
if cherrypy.request.method == 'POST':
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, backupid=backupid),
'destroybackup\0%s\n' % (backupid,))
if output.template_error():
cherrypy.session['flash'] = (
'Error destroying backup: %s' % output.template_error())
raise cherrypy.HTTPRedirect('..')
tmpl = loader.load('backupdestroy.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def backupctl(self, hostname, action):
db = dbconnect()
command = 'startbackup'
if action.startswith('Kill'):
command = 'killbackup'
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, clienthostname=hostname),
'%s\0%s\n' % (command, hostname))
# wait for child to finish
output.stdout.read()
if output.template_error():
cherrypy.session['flash'] = (
'Error sending command: %s' % output.template_error())
else:
cherrypy.session['flash'] = (
'Backup "%s" message sent' % command[:-6])
raise cherrypy.HTTPRedirect('.')
@cherrypy.expose
def backuplogfiles(self, hostname, backupid, **kwargs):
from genshi.core import Markup
db = dbconnect()
title = 'Logs for Backup %s of %s' % (backupid, hostname)
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, backupid=backupid),
'logfiles\0%s\n' % backupid)
logfileoutput = Markup(output.stdout.read())
webcmd_error = output.template_error()
tmpl = loader.load('backuplogfiles.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def backupajax(self, hostname, backup, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
db = dbconnect()
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, clienthostname=hostname),
'fsbrowsejson\0%s\0%s\n' % (backup, kwargs['key']))
return(output.persistent_stdout())
@cherrypy.expose
def backuprecover(self, hostname, backupid, **kwargs):
db = dbconnect()
title = 'Recovery %s for %s' % (backupid, hostname)
if cherrypy.request.method == 'POST':
import pickle
try:
import json
except ImportError:
import simplejson as json
recoverylist = json.loads(kwargs['recoverylist'])
db = dbconnect()
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, backupid=backupid),
('createtar\0%s\n' % backupid) + pickle.dumps(recoverylist))
filename = 'recovery-%s-%s.tar.gz' % (hostname, backupid)
cherrypy.response.headers['Content-Type'] = 'application/x-tar'
cherrypy.response.headers[
'Content-Disposition'] = 'attachment; filename="%s"' % filename
return output.persistent_stdout()
tmpl = loader.load('backup-recovery.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def backup(self, hostname, backupid):
db = dbconnect()
title = 'Backup %s for %s' % (backupid, hostname)
host = db.queryone("SELECT * FROM hosts WHERE hostname = %s", hostname)
backup = db.queryone(
"SELECT * FROM backups "
"WHERE host_id = %s AND backups.id = %s ",
host['id'], int(backupid))
from tbsupp import describe_rsync_exit_code # NOQA
tmpl = loader.load('backup.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def hostkeys(self, hostname):
db = dbconnect()
title = 'SSH Key for %s' % (hostname,)
output = tbsupp.runWebCmd(
tbsupp.lookupBackupServer(db, clienthostname=hostname),
'hostkey\0%s\n' % hostname)
key = output.stdout.read()
webcmd_error = output.template_error()
tmpl = loader.load('host-keys.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
@cherrypy.expose
def newbackup(self, **kwargs):
db = dbconnect()
title = 'Backup New Host'
current_newbackup = {'class': 'current'}
errors, formdata = processform(
NewHostValidator(), dict(hostname='', servername=''), kwargs)
if cherrypy.request.method == 'POST' and not errors:
db = dbconnect()
output = tbsupp.runWebCmd(tbsupp.lookupBackupServer(
db, backupservername=formdata['servername']),
'newbackup\0%s\n' % formdata['hostname'])
# wait for child to finish
output.stdout.read()
if output.template_error():
cherrypy.session['flash'] = (
'Error creating backup: %s' % output.template_error())
raise cherrypy.HTTPRedirect(url(str(
'/hosts/%s/config' % formdata['hostname'])))
# get list of servers and post-process elements
backupservers = []
for server in list(db.query("""SELECT backupservers.*,
(SELECT usage_pct FROM serverusage
WHERE server_id = backupservers.id
ORDER BY id DESC LIMIT 1) AS usage_pct
FROM backupservers ORDER BY hostname""")):
data = {'usage_pct_string': '', 'selected': None}
data.update(server)
if server['usage_pct']:
data['usage_pct_string'] = ' (%d%%)' % server['usage_pct']
backupservers.append(data)
# select the element with the lowest usage
try:
min_usage = min([
x['usage_pct'] for x in backupservers
if x['usage_pct'] is not None])
[x for x in backupservers if x['usage_pct'] == min_usage][0][
'selected'] = True
except ValueError:
min_usage = None
# hide server list if there's only one server
show_serverlist = {}
if len(backupservers) == 1:
show_serverlist = {'class': 'hidden'}
tmpl = loader.load('newbackup.html')
return tmpl.generate(contextFromLocals(locals())).render(
'html', doctype='html')
def checkpassword(realm, user, passwd):
import crypt
db = dbconnect()
data = db.query(
'SELECT cryptedpassword FROM users '
'WHERE name = %s AND cryptedpassword IS NOT NULL', user)
if data:
data = list(data)
if not data or not data[0]:
return(False)
return data[0][0] == crypt.crypt(passwd, data[0][0])
def routes():
root = Root()
d = cherrypy.dispatch.RoutesDispatcher()
mapper = d.mapper
mapper.explicit = False
mapper.minimization = False
d.connect(
'backup', r'/hosts/{hostname}/{backupid}/', root, action='backup')
d.connect(
'backuplogfiles', r'/hosts/{hostname}/{backupid}/logfiles', root,
action='backuplogfiles')
d.connect(
'backupajax', r'/hosts/{hostname}/{backup}/ajax', root,
action='backupajax')
d.connect(
'backuprecover', r'/hosts/{hostname}/{backupid}/recover', root,
action='backuprecover')
d.connect(
'backupdestroy', r'/hosts/{hostname}/{backupid}/destroy', root,
action='backupdestroy')
d.connect('host', r'/hosts/{hostname}/', root, action='host')
d.connect('hostsearch', r'/hostsearch', root, action='hostsearch')
d.connect(
'hostconfig', r'/hosts/{hostname}/config', root, action='hostconfig')
d.connect(
'hostdestroy', r'/hosts/{hostname}/destroy', root,
action='hostdestroy')
d.connect(
'hostkeys', r'/hosts/{hostname}/keys', root, action='hostkeys')
d.connect(
'backupctrl', r'/hosts/{hostname}/backupctl', root, action='backupctl')
d.connect('newbackup', r'/newbackup', root, action='newbackup')
d.connect(
'detailedindex', r'/index-detailed/', root, action='detailedindex')
d.connect('sysconfig', r'/config', root, action='sysconfig')
d.connect('index', r'/', root)
return d
def config():
# Some global configuration; note that this could be moved into a
# configuration file
cherrypy.config.update({
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'tools.decode.on': True,
'tools.trailing_slash.on': True,
'tools.staticdir.root': os.path.abspath(os.path.dirname(__file__)),
'server.socket_host': '0.0.0.0',
'server.socket_port': 8080,
'log.screen': False,
'log.error_file': '/tmp/site.log',
'environment': 'production',
'show_tracebacks': True,
})
config = {
'/': {
'request.dispatch': routes(),
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'tummy-backup',
'tools.auth_basic.checkpassword': checkpassword,
'tools.db_close.on': True,
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'
},
}
return(config)
def setup_server():
cfg = config()
app = cherrypy.tree.mount(None, config=cfg)
return (cfg, app)
if __name__ == '__main__':
cfg = setup_server()[0]
cfg.update({'log.screen': True})
cherrypy.quickstart(config=cfg)
else:
sys.stdout = sys.stderr
cfg = config()
cherrypy.config.update({
'log.error_file': '/tmp/error.log',
})
# basic auth is implemented in Apache, couldn't get it working here
cfg['/']['tools.auth_basic.on'] = False
cfg['/']['tools.sessions.on'] = True
cfg['/']['tools.sessions.storage_type'] = 'file'
cfg['/']['tools.sessions.storage_path'] = '/tmp/'
cfg['/']['tools.sessions.timeout'] = 600 # in minutes
application = cherrypy.Application(
None, script_name='/tummy-backup', config=cfg)
| gpl-2.0 | 5,892,526,745,934,946,000 | 33.726852 | 79 | 0.559259 | false | 3.886528 | true | false | false |
gabrielStanovsky/props | props/graph_representation/propagate.py | 1 | 1826 | from props.graph_representation.node import isProp, isTime
class Propagate:
"""
class to bunch together all function of propagation on a digraph
Mainly in order to store the graph as a member which all these functions can edit.
"""
def __init__(self,graph):
self.gr = graph
self.applyPropagation()
def applyPropagation(self):
"""
Apply closure propagation algorithms on the graph
"""
change = True
while change:
change = self.propogateFeatures()
def propogateFeatures(self):
"""
handle propagating features between nodes of the graph
@rtype bool
@return True iff this function has changed the graph in some way
"""
ret = False
for curNode in self.gr.nodes():
# for each node in the graph
curNodeNeigbours = self.gr.neighbors(curNode)
for curPropogateNode in curNode.propagateTo:
# for each of its propgated nodes
curPropogateNodeNeigboursIds = [cpn.uid for cpn in self.gr.neighbors(curPropogateNode)]
for curNeigbour in curNodeNeigbours:
if isProp(curNeigbour) or isTime(curNeigbour):
# for each *prop* neigbour
if curNeigbour.uid not in curPropogateNodeNeigboursIds:
# if its not a neigbour of propogated node - add it
self.gr.add_edge(edge=(curPropogateNode,curNeigbour),
label=self.gr.edge_label((curNode,curNeigbour)))
# mark that a change was made to the graph
ret = True
return ret | mit | 6,296,101,994,764,986,000 | 41.52381 | 103 | 0.553669 | false | 4.519802 | false | false | false |
gastrodia/liveusb-creator | liveusb/linux_dialog.py | 1 | 18763 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'data/liveusb-creator.ui'
#
# Created: Fri Dec 12 13:46:45 2014
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(690, 538)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(477, 519))
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget = QtGui.QWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMinimumSize(QtCore.QSize(477, 519))
self.centralwidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(0, 0, 0, -1)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setSpacing(0)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.label = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setStyleSheet(_fromUtf8("background-image: url(:/liveusb-header-bg.png);"))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/liveusb-header-left.png")))
self.label.setScaledContents(False)
self.label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label.setMargin(-1)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_9.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label_2.setStyleSheet(_fromUtf8("background-image: url(:/liveusb-header-bg.png);"))
self.label_2.setText(_fromUtf8(""))
self.label_2.setPixmap(QtGui.QPixmap(_fromUtf8(":/liveusb-header-right.png")))
self.label_2.setScaledContents(False)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_2.setMargin(-1)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_9.addWidget(self.label_2)
self.gridLayout.addLayout(self.horizontalLayout_9, 0, 0, 1, 1)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setContentsMargins(9, -1, 9, -1)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(9)
self.groupBox.setFont(font)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.isoBttn = QtGui.QPushButton(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.isoBttn.sizePolicy().hasHeightForWidth())
self.isoBttn.setSizePolicy(sizePolicy)
self.isoBttn.setObjectName(_fromUtf8("isoBttn"))
self.horizontalLayout_7.addWidget(self.isoBttn)
self.horizontalLayout_4.addWidget(self.groupBox)
self.downloadGroup = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.downloadGroup.sizePolicy().hasHeightForWidth())
self.downloadGroup.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(9)
self.downloadGroup.setFont(font)
self.downloadGroup.setObjectName(_fromUtf8("downloadGroup"))
self.gridLayout_5 = QtGui.QGridLayout(self.downloadGroup)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.downloadCombo = QtGui.QComboBox(self.downloadGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.downloadCombo.sizePolicy().hasHeightForWidth())
self.downloadCombo.setSizePolicy(sizePolicy)
self.downloadCombo.setObjectName(_fromUtf8("downloadCombo"))
self.horizontalLayout_6.addWidget(self.downloadCombo)
self.refreshReleasesButton = QtGui.QPushButton(self.downloadGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.refreshReleasesButton.sizePolicy().hasHeightForWidth())
self.refreshReleasesButton.setSizePolicy(sizePolicy)
self.refreshReleasesButton.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/refresh.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.refreshReleasesButton.setIcon(icon)
self.refreshReleasesButton.setFlat(True)
self.refreshReleasesButton.setObjectName(_fromUtf8("refreshReleasesButton"))
self.horizontalLayout_6.addWidget(self.refreshReleasesButton)
self.gridLayout_5.addLayout(self.horizontalLayout_6, 0, 0, 1, 1)
self.horizontalLayout_4.addWidget(self.downloadGroup)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.groupBox_2 = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(9)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.driveBox = QtGui.QComboBox(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.driveBox.sizePolicy().hasHeightForWidth())
self.driveBox.setSizePolicy(sizePolicy)
self.driveBox.setEditable(False)
self.driveBox.setInsertPolicy(QtGui.QComboBox.InsertAtTop)
self.driveBox.setDuplicatesEnabled(False)
self.driveBox.setObjectName(_fromUtf8("driveBox"))
self.horizontalLayout.addWidget(self.driveBox)
self.refreshDevicesButton = QtGui.QPushButton(self.groupBox_2)
self.refreshDevicesButton.setText(_fromUtf8(""))
self.refreshDevicesButton.setIcon(icon)
self.refreshDevicesButton.setFlat(True)
self.refreshDevicesButton.setObjectName(_fromUtf8("refreshDevicesButton"))
self.horizontalLayout.addWidget(self.refreshDevicesButton)
self.gridLayout_3.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.horizontalLayout_3.addWidget(self.groupBox_2)
self.overlayTitle = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.overlayTitle.sizePolicy().hasHeightForWidth())
self.overlayTitle.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.overlayTitle.setFont(font)
self.overlayTitle.setObjectName(_fromUtf8("overlayTitle"))
self.gridLayout_4 = QtGui.QGridLayout(self.overlayTitle)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.overlaySlider = QtGui.QSlider(self.overlayTitle)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.overlaySlider.sizePolicy().hasHeightForWidth())
self.overlaySlider.setSizePolicy(sizePolicy)
self.overlaySlider.setMaximum(2047)
self.overlaySlider.setOrientation(QtCore.Qt.Horizontal)
self.overlaySlider.setTickPosition(QtGui.QSlider.NoTicks)
self.overlaySlider.setObjectName(_fromUtf8("overlaySlider"))
self.gridLayout_4.addWidget(self.overlaySlider, 0, 0, 1, 1)
self.horizontalLayout_3.addWidget(self.overlayTitle)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.groupBox_3 = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(9)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.nonDestructiveButton = QtGui.QRadioButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.nonDestructiveButton.sizePolicy().hasHeightForWidth())
self.nonDestructiveButton.setSizePolicy(sizePolicy)
self.nonDestructiveButton.setChecked(True)
self.nonDestructiveButton.setObjectName(_fromUtf8("nonDestructiveButton"))
self.horizontalLayout_5.addWidget(self.nonDestructiveButton)
self.destructiveButton = QtGui.QRadioButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.destructiveButton.sizePolicy().hasHeightForWidth())
self.destructiveButton.setSizePolicy(sizePolicy)
self.destructiveButton.setObjectName(_fromUtf8("destructiveButton"))
self.horizontalLayout_5.addWidget(self.destructiveButton)
self.gridLayout_2.addLayout(self.horizontalLayout_5, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_3)
self.textEdit = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.textEdit.setFont(font)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.verticalLayout.addWidget(self.textEdit)
self.progressBar = QtGui.QProgressBar(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.progressBar.sizePolicy().hasHeightForWidth())
self.progressBar.setSizePolicy(sizePolicy)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.verticalLayout.addWidget(self.progressBar)
self.startButton = QtGui.QPushButton(self.centralwidget)
self.startButton.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startButton.sizePolicy().hasHeightForWidth())
self.startButton.setSizePolicy(sizePolicy)
self.startButton.setObjectName(_fromUtf8("startButton"))
self.verticalLayout.addWidget(self.startButton)
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Fedora Live USB Creator", None))
self.groupBox.setWhatsThis(_translate("MainWindow", "This button allows you to browse for an existing Live CD ISO that you have previously downloaded. If you do not select one, a release will be downloaded for you automatically.", None))
self.groupBox.setTitle(_translate("MainWindow", "Use existing Live CD", None))
self.isoBttn.setText(_translate("MainWindow", "Browse", None))
self.isoBttn.setShortcut(_translate("MainWindow", "Alt+B", None))
self.downloadGroup.setWhatsThis(_translate("MainWindow", "If you do not select an existing Live CD, the selected release will be downloaded for you.", None))
self.downloadGroup.setTitle(_translate("MainWindow", "Download Fedora", None))
self.groupBox_2.setWhatsThis(_translate("MainWindow", "This is the USB stick that you want to install your Live CD on. This device must be formatted with the FAT filesystem.", None))
self.groupBox_2.setTitle(_translate("MainWindow", "Target Device", None))
self.overlayTitle.setWhatsThis(_translate("MainWindow", "By allocating extra space on your USB stick for a persistent overlay, you will be able to store data and make permanent modifications to your live operating system. Without it, you will not be able to save data that will persist after a reboot.", "comment!"))
self.overlayTitle.setTitle(_translate("MainWindow", "Persistent Storage (0 MB)", None))
self.groupBox_3.setTitle(_translate("MainWindow", "Method", None))
self.nonDestructiveButton.setToolTip(_translate("MainWindow", "This method uses the \'cp\' command to copy the files from the ISO on to your USB key, without deleting any existing files.", None))
self.nonDestructiveButton.setText(_translate("MainWindow", "Non-destructive (cp)", None))
self.destructiveButton.setToolTip(_translate("MainWindow", "This method uses the \'dd\' comand to copy the ISO directly to your USB device, destroying any pre-existing data/partitions. This method tends to be more reliable with regard to booting, especially with UEFI systems. This method also works with DVD images.", None))
self.destructiveButton.setText(_translate("MainWindow", "Overwrite device (dd)", None))
self.textEdit.setWhatsThis(_translate("MainWindow", "This is the status console, where all messages get written to.", None))
self.progressBar.setWhatsThis(_translate("MainWindow", "This is the progress bar that will indicate how far along in the LiveUSB creation process you are", None))
self.startButton.setWhatsThis(_translate("MainWindow", "This button will begin the LiveUSB creation process. This entails optionally downloading a release (if an existing one wasn\'t selected), extracting the ISO to the USB device, creating the persistent overlay, and installing the bootloader.", None))
self.startButton.setText(_translate("MainWindow", "Create Live USB", None))
import resources_rc
| gpl-2.0 | 8,534,516,105,100,273,000 | 61.963087 | 333 | 0.7308 | false | 4.156624 | false | false | false |
gnuradio/gnuradio | gr-digital/python/digital/test_soft_decisions.py | 6 | 4293 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import numpy, sys
from matplotlib import pyplot
from gnuradio import digital
from .soft_dec_lut_gen import soft_dec_table, calc_soft_dec_from_table, calc_soft_dec
from .psk_constellations import psk_4_0, psk_4_1, psk_4_2, psk_4_3, psk_4_4, psk_4_5, psk_4_6, psk_4_7, sd_psk_4_0, sd_psk_4_1, sd_psk_4_2, sd_psk_4_3, sd_psk_4_4, sd_psk_4_5, sd_psk_4_6, sd_psk_4_7
from .qam_constellations import qam_16_0, sd_qam_16_0
def test_qpsk(i, sample, prec):
qpsk_const_list = [psk_4_0, psk_4_1, psk_4_2, psk_4_3,
psk_4_4, psk_4_5, psk_4_6, psk_4_7]
qpsk_lut_gen_list = [sd_psk_4_0, sd_psk_4_1, sd_psk_4_2, sd_psk_4_3,
sd_psk_4_4, sd_psk_4_5, sd_psk_4_6, sd_psk_4_7]
constel, code = qpsk_const_list[i]()
qpsk_lut_gen = qpsk_lut_gen_list[i]
rot_sym = 1
side = 2
width = 2
c = digital.constellation_rect(constel, code, rot_sym,
side, side, width, width)
# Get max energy/symbol in constellation
constel = c.points()
Es = max([numpy.sqrt(constel_i.real**2 + constel_i.imag**2) for constel_i in constel])
#table = soft_dec_table_generator(qpsk_lut_gen, prec, Es)
table = soft_dec_table(constel, code, prec)
c.gen_soft_dec_lut(prec)
#c.set_soft_dec_lut(table, prec)
y_python_gen_calc = qpsk_lut_gen(sample, Es)
y_python_table = calc_soft_dec_from_table(sample, table, prec, Es)
y_python_raw_calc = calc_soft_dec(sample, constel, code)
y_cpp_table = c.soft_decision_maker(sample)
y_cpp_raw_calc = c.calc_soft_dec(sample)
return (y_python_gen_calc, y_python_table, y_python_raw_calc,
y_cpp_table, y_cpp_raw_calc, constel, code, c)
def test_qam16(i, sample, prec):
sample = sample / 1
qam_const_list = [qam_16_0, ]
qam_lut_gen_list = [sd_qam_16_0, ]
constel, code = qam_const_list[i]()
qam_lut_gen = qam_lut_gen_list[i]
rot_sym = 4
side = 2
width = 2
c = digital.constellation_rect(constel, code, rot_sym,
side, side, width, width)
# Get max energy/symbol in constellation
constel = c.points()
Es = max([abs(constel_i) for constel_i in constel])
#table = soft_dec_table_generator(qam_lut_gen, prec, Es)
table = soft_dec_table(constel, code, prec, 1)
#c.gen_soft_dec_lut(prec)
c.set_soft_dec_lut(table, prec)
y_python_gen_calc = qam_lut_gen(sample, Es)
y_python_table = calc_soft_dec_from_table(sample, table, prec, Es)
y_python_raw_calc = calc_soft_dec(sample, constel, code, 1)
y_cpp_table = c.soft_decision_maker(sample)
y_cpp_raw_calc = c.calc_soft_dec(sample)
return (y_python_gen_calc, y_python_table, y_python_raw_calc,
y_cpp_table, y_cpp_raw_calc, constel, code, c)
if __name__ == "__main__":
index = 0
prec = 8
x_re = 2*numpy.random.random()-1
x_im = 2*numpy.random.random()-1
x = x_re + x_im*1j
#x = -1 + -0.j
if 1:
y_python_gen_calc, y_python_table, y_python_raw_calc, \
y_cpp_table, y_cpp_raw_calc, constel, code, c \
= test_qpsk(index, x, prec)
else:
y_python_gen_calc, y_python_table, y_python_raw_calc, \
y_cpp_table, y_cpp_raw_calc, constel, code, c \
= test_qam16(index, x, prec)
k = numpy.log2(len(constel))
print("Sample: ", x)
print("Python Generator Calculated: ", (y_python_gen_calc))
print("Python Generator Table: ", (y_python_table))
print("Python Raw calc: ", (y_python_raw_calc))
print("C++ Table calc: ", (y_cpp_table))
print("C++ Raw calc: ", (y_cpp_raw_calc))
fig = pyplot.figure(1)
sp1 = fig.add_subplot(1,1,1)
sp1.plot([c.real for c in constel],
[c.imag for c in constel], 'bo')
sp1.plot(x.real, x.imag, 'ro')
sp1.set_xlim([-1.5, 1.5])
sp1.set_ylim([-1.5, 1.5])
fill = int(numpy.log2(len(constel)))
for i,c in enumerate(constel):
sp1.text(1.2*c.real, 1.2*c.imag, bin(code[i])[2:].zfill(fill),
ha='center', va='center', size=18)
pyplot.show()
| gpl-3.0 | -946,524,697,881,018,100 | 33.344 | 198 | 0.589331 | false | 2.592391 | false | false | false |
jlcjunk/pynet_pac | class4/exer08.py | 1 | 2061 | #!/usr/bin/env python
'''
pexpect demo using netmiko to change
'''
# imports
try:
import textwrap
import time
import netmiko
except ImportError:
print "Could not import a required module.\n Exiting"
raise SystemExit
# Variables
RTR1 = {
'host':'pynet-rtr1',
'device_type':'cisco_ios',
'ip':'184.105.247.70',
'username':'pyclass',
'password':'88newclass',
'secret':'',
'port':22,
'timeout':60
}
RTR2 = {
'host':'pynet-rtr2',
'device_type':'cisco_ios',
'ip':'184.105.247.71',
'username':'pyclass',
'password':'88newclass',
'secret':'',
'port':22,
'timeout':60
}
NEW_BUFFER = str(((time.localtime()[3] * 60 + time.localtime()[4]) * 60) + time.localtime()[5] + 4096)
COMMAND_FILE = 'exer08_commands.txt'
DEVICE_LIST = [
RTR1,
RTR2
]
LINE_INDENT = 8
LINE_WIDTH = 100
def main():
'''
main app
'''
for device_to_change in DEVICE_LIST:
# make connection to device
dev_connection = netmiko.ConnectHandler(**device_to_change)
# check config before change
print device_to_change['host']
command_result = dev_connection.send_command('sh run | inc buff|logging con')
for result_line in command_result.splitlines():
print textwrap.fill(
result_line,
width = LINE_WIDTH,
initial_indent = ' ' * LINE_INDENT,
subsequent_indent = ' ' * (LINE_INDENT + 4)
)
# execute commands
dev_connection.send_config_from_file(config_file = COMMAND_FILE)
# check config after change
command_result = dev_connection.send_command('sh run | inc buff|logging con')
for result_line in command_result.splitlines():
print textwrap.fill(
result_line,
width = LINE_WIDTH,
initial_indent = ' ' * LINE_INDENT,
subsequent_indent = ' ' * (LINE_INDENT + 4)
)
if __name__ == "__main__":
main()
| gpl-3.0 | -3,728,452,204,719,985,700 | 22.157303 | 102 | 0.557496 | false | 3.660746 | false | false | false |
boada/ICD | sandbox/legacy_plot_code/collect_stamps.py | 1 | 2196 | #!/usr/bin/env python
# File: collect_stamps.py
# Created on: Fri 15 Jun 2012 10:11:00 AM CDT
# Last Change: Mon 18 Jun 2012 11:10:48 AM CDT
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pyfits as pyf
from mk_galaxy_struc import mk_galaxy_struc
galaxies = mk_galaxy_struc()
f1 = open('lowMass.list','wt')
f2 = open('medMass_lowicd.list','wt')
f3 = open('medMass_highicd.list','wt')
f4 = open('highMass.list','wt')
f1.writelines('#field #ID #ICD_IH #MASS #SPIRAL #ELLIPTICAL #UNCERTAIN\n')
f2.writelines('#field #ID #ICD_IH #MASS #SPIRAL #ELLIPTICAL #UNCERTAIN\n')
f3.writelines('#field #ID #ICD_IH #MASS #SPIRAL #ELLIPTICAL #UNCERTAIN\n')
f4.writelines('#field #ID #ICD_IH #MASS #SPIRAL #ELLIPTICAL #UNCERTAIN\n')
for i in range(len(galaxies)):
if galaxies[i].ston_I >= 30.0:
if galaxies[i].Mass <= 1e9:
f1.writelines(str(galaxies[i].field)+' '+str(galaxies[i].ID)+\
' '+str(galaxies[i].ICD_IH)+' '+str(galaxies[i].Mass)+\
' '+str(galaxies[i].Spiral)+' '+str(galaxies[i].Elliptical)+\
' '+str(galaxies[i].Uncertain)+'\n')
elif 1e9 <= galaxies[i].Mass and galaxies[i].Mass <= 1e11:
if galaxies[i].ICD_IH <= 0.05:
f2.writelines(str(galaxies[i].field)+' '+str(galaxies[i].ID)+\
' '+str(galaxies[i].ICD_IH)+' '+str(galaxies[i].Mass)+\
' '+str(galaxies[i].Spiral)+' '+str(galaxies[i].Elliptical)+\
' '+str(galaxies[i].Uncertain)+'\n')
else:
f3.writelines(str(galaxies[i].field)+' '+str(galaxies[i].ID)+\
' '+str(galaxies[i].ICD_IH)+' '+str(galaxies[i].Mass)+\
' '+str(galaxies[i].Spiral)+' '+str(galaxies[i].Elliptical)+\
' '+str(galaxies[i].Uncertain)+'\n')
elif 1e11 <= galaxies[i].Mass:
f4.writelines(str(galaxies[i].field)+' '+str(galaxies[i].ID)+\
' '+str(galaxies[i].ICD_IH)+' '+str(galaxies[i].Mass)+\
' '+str(galaxies[i].Spiral)+' '+str(galaxies[i].Elliptical)+\
' '+str(galaxies[i].Uncertain)+'\n')
f1.close()
f2.close()
f3.close()
f4.close()
| mit | 4,391,029,819,560,187,000 | 42.92 | 80 | 0.564663 | false | 2.648975 | false | false | false |
PoprostuRonin/pr0gramista | blog/views.py | 1 | 2561 | from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from taggit.models import Tag
from .models import Post, Page
POSTS_PER_PAGE = 5
def index(request):
"""Displays first page with latest posts"""
return index_pagination(request, 1)
def index_pagination(request, pagination):
"""Displays n-th page with latest posts"""
page = int(pagination)
if not request.user.is_authenticated:
posts_published = Post.objects.filter(published=True).order_by('-pub_date')
else:
posts_published = Post.objects.order_by('-pub_date')
paginator = Paginator(posts_published, POSTS_PER_PAGE)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'posts': posts
}
return render(request, 'blog/index.html', context)
def post(request, post_slug):
"""Displays single post"""
if not request.user.is_authenticated:
query = Post.objects.filter(published=True)
else:
query = Post.objects.all()
post = get_object_or_404(query, slug=post_slug)
context = {'post': post}
return render(request, 'blog/post.html', context)
def tag(request, tag_slug):
"""Displays first page with posts with given tag"""
return tag_pagination(request, tag_slug, 1)
def tag_pagination(request, tag_slug, pagination):
"""Displays n-th page with posts with given tag"""
page = int(pagination)
if not request.user.is_authenticated:
posts_with_tag = Post.objects.filter(published=True).filter(tags__slug__in=[tag_slug]).order_by('-pub_date').all()
else:
posts_with_tag = Post.objects.filter(tags__slug__in=[tag_slug]).order_by('-pub_date').all()
paginator = Paginator(posts_with_tag, POSTS_PER_PAGE)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
tag = Tag.objects.get(slug=tag_slug)
context = {
'posts': posts,
'tag': tag
}
return render(request, 'blog/tag.html', context)
def page(request, page_slug):
"""Displays page"""
page = Page.objects.get(slug=page_slug)
context = {'page': page}
return render(request, 'blog/page.html', context)
def youtube(request):
return redirect('https://www.youtube.com/channel/UCHPUGfK2zW0VUNN2SgCHsXg')
| gpl-3.0 | -7,522,999,251,025,674,000 | 28.77907 | 122 | 0.666927 | false | 3.527548 | false | false | false |
trexbob/bioinfo_tools | instance_counter.py | 1 | 1564 | # This program is to count instances of particular values or names in a file.
# It requires two inputs at the command line, the first is the file being read, and the second
# is the file to be written with the keys and values in a csv file.
# Written by Robert Tyx [email protected] on 3/3/2014
# File: instance_counter.py
import csv # Comma Separated variable module
import sys # system module
def count_names(lines): # function for counting names using lines as input
result = {} # create library called result
for name in lines: # at every new line, do the following
name = name.strip() # strip off any spaces etc
if name in result: # if the name key of what we are counting is in the library already
result[name] = result[name]+1 # add one to the name key
else: # otherwise do the following
result[name] = 1 # add the name key and one to the library
return result # return the library to what called the function
if __name__ == '__main__':
reader = open(sys.argv[1],'r') # open file specified by command line
lines = reader.readlines() # read each line into lines
reader.close()
count = count_names(lines) # call function count_names
for name in count: # print out all keys and values onto screen
print name, count[name]
writer = csv.writer(open(sys.argv[2], 'wb')) # write to the second file specified in command line
for key, value in count.items(): # write each key and value
writer.writerow([key,value])
| gpl-2.0 | -3,729,897,447,991,104,000 | 51.931034 | 98 | 0.667519 | false | 3.852217 | false | false | false |
qdamian/htmlvis | htmlvis/plantuml.py | 1 | 1552 | """
Generate http://plantuml.com/sequence-diagram
"""
import logging
from . import formatting, plantuml_text_encoding
from .seqdiag_model import Category
logger = logging.getLogger(__name__)
MSG_TO_TEXTUAL_REPR = {
Category.request: '"{source}" -> "{destination}": {text}\n',
Category.response: '"{destination}" <-- "{source}": {text}\n',
}
NOTE_LOCATION = {
Category.request: 'right',
Category.response: 'left',
}
def html_image(messages):
"""
Generate an HTML img element with an SVG sequence diagram
"""
logger.debug('Generating sequence diagram')
textual_repr = _generate_textual_representation(messages)
encoded_repr = plantuml_text_encoding.encode(textual_repr)
html = '<img src="http://www.plantuml.com/plantuml/svg/%s">' % encoded_repr
return html
def _generate_textual_representation(messages):
textual_repr = ''
for msg in messages:
textual_repr += MSG_TO_TEXTUAL_REPR[msg.category].format(
source=_sanitize(msg.src),
destination=_sanitize(msg.dst),
text=msg.text)
formatters = [
formatting.prettify_json, formatting.shorten_long_strings
]
for fmt in formatters:
fmt(msg)
if msg.note:
textual_repr += 'note ' + NOTE_LOCATION[
msg.category] + '\n' + _indent(msg.note) + '\nend note'
return textual_repr
def _sanitize(participant):
return participant.replace('"', "'")
def _indent(text):
return ' ' + '\n '.join(text.splitlines())
| mit | 7,518,732,632,346,164,000 | 27.218182 | 79 | 0.626289 | false | 3.686461 | false | false | false |
cdrooom/odoo | openerp/sql_db.py | 7 | 23676 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The PostgreSQL connector is a connectivity layer between the OpenERP code and
the database, *not* a database abstraction toolkit. Database abstraction is what
the ORM does, in fact.
"""
from contextlib import contextmanager
from functools import wraps
import logging
import urlparse
import uuid
import psycopg2.extras
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
types_mapping = {
'date': (1082,),
'time': (1083,),
'datetime': (1114,),
}
def unbuffer(symb, cr):
if symb is None:
return None
return str(symb)
def undecimalize(symb, cr):
if symb is None:
return None
return float(symb)
for name, typeoid in types_mapping.items():
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
import tools
from tools.func import frame_codeinfo
from datetime import datetime as mdt
from datetime import timedelta
import threading
from inspect import currentframe
import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0
class Cursor(object):
"""Represents an open transaction to the PostgreSQL DB backend,
acting as a lightweight wrapper around psycopg2's
``cursor`` objects.
``Cursor`` is the object behind the ``cr`` variable used all
over the OpenERP code.
.. rubric:: Transaction Isolation
One very important property of database transactions is the
level of isolation between concurrent transactions.
The SQL standard defines four levels of transaction isolation,
ranging from the most strict *Serializable* level, to the least
strict *Read Uncommitted* level. These levels are defined in
terms of the phenomena that must not occur between concurrent
transactions, such as *dirty read*, etc.
In the context of a generic business data management software
such as OpenERP, we need the best guarantees that no data
corruption can ever be cause by simply running multiple
transactions in parallel. Therefore, the preferred level would
be the *serializable* level, which ensures that a set of
transactions is guaranteed to produce the same effect as
running them one at a time in some order.
However, most database management systems implement a limited
serializable isolation in the form of
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
providing most of the same advantages as True Serializability,
with a fraction of the performance cost.
With PostgreSQL up to version 9.0, this snapshot isolation was
the implementation of both the ``REPEATABLE READ`` and
``SERIALIZABLE`` levels of the SQL standard.
As of PostgreSQL 9.1, the previous snapshot isolation implementation
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
level was introduced, providing some additional heuristics to
detect a concurrent update by parallel transactions, and forcing
one of them to rollback.
OpenERP implements its own level of locking protection
for transactions that are highly likely to provoke concurrent
updates, such as stock reservations or document sequences updates.
Therefore we mostly care about the properties of snapshot isolation,
but we don't really need additional heuristics to trigger transaction
rollbacks, as we are taking care of triggering instant rollbacks
ourselves when it matters (and we can save the additional performance
hit of these heuristics).
As a result of the above, we have selected ``REPEATABLE READ`` as
the default transaction isolation level for OpenERP cursors, as
it will be mapped to the desired ``snapshot isolation`` level for
all supported PostgreSQL version (8.3 - 9.x).
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
read level to serializable before sending it to the database, so it would
actually select the new serializable mode on PostgreSQL 9.1. Make
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
the performance hit is a concern for you.
.. attribute:: cache
Cache dictionary with a "request" (-ish) lifecycle, only lives as
long as the cursor itself does and proactively cleared when the
cursor is closed.
This cache should *only* be used to store repeatable reads as it
ignores rollbacks and savepoints, it should not be used to store
*any* data which may be modified during the life of the cursor.
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
def check(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._closed:
msg = 'Unable to use a closed cursor.'
if self.__closer:
msg += ' It was closed at %s, line %s' % self.__closer
raise psycopg2.OperationalError(msg)
return f(self, *args, **kwargs)
return wrapper
def __init__(self, pool, dbname, dsn, serialized=True):
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
# avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._closed = True
self.__pool = pool
self.dbname = dbname
# Whether to enable snapshot isolation level for this cursor.
# see also the docstring of Cursor.
self._serialized = serialized
self._cnx = pool.borrow(dsn)
self._obj = self._cnx.cursor()
if self.sql_log:
self.__caller = frame_codeinfo(currentframe(), 2)
else:
self.__caller = False
self._closed = False # real initialisation value
self.autocommit(False)
self.__closer = False
self._default_log_exceptions = True
self.cache = {}
def __build_dict(self, row):
return {d.name: row[i] for i, d in enumerate(self._obj.description)}
def dictfetchone(self):
row = self._obj.fetchone()
return row and self.__build_dict(row)
def dictfetchmany(self, size):
return map(self.__build_dict, self._obj.fetchmany(size))
def dictfetchall(self):
return map(self.__build_dict, self._obj.fetchall())
def __del__(self):
if not self._closed and not self._cnx.closed:
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n"
if self.__caller:
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
_logger.info(msg)
self._close(True)
@check
def execute(self, query, params=None, log_exceptions=None):
if '%d' in query or '%f' in query:
_logger.info("SQL queries cannot contain %%d or %%f anymore. Use only %%s:\n%s" % query,
exc_info=_logger.isEnabledFor(logging.DEBUG))
if params and not isinstance(params, (tuple, list, dict)):
_logger.info("SQL query parameters should be a tuple, list or dict; got %r", params)
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
if self.sql_log:
now = mdt.now()
try:
params = params or None
res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.info("Programming error: %s, in query %s", pe, query)
raise
except Exception:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.info("bad query: %s", self._obj.query or query)
raise
# simple query count is always computed
self.sql_log_count += 1
# advanced stats only if sql_log is enabled
if self.sql_log:
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
_logger.debug("query: %s", self._obj.query)
res_from = re_from.match(query.lower())
if res_from:
self.sql_from_log.setdefault(res_from.group(1), [0, 0])
self.sql_from_log[res_from.group(1)][0] += 1
self.sql_from_log[res_from.group(1)][1] += delay
res_into = re_into.match(query.lower())
if res_into:
self.sql_into_log.setdefault(res_into.group(1), [0, 0])
self.sql_into_log[res_into.group(1)][0] += 1
self.sql_into_log[res_into.group(1)][1] += delay
return res
def split_for_in_conditions(self, ids):
"""Split a list of identifiers into one or more smaller tuples
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(self.IN_MAX, set(ids))
def print_log(self):
global sql_counter
if not self.sql_log:
return
def process(type):
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
sum = 0
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
sqllogitems.sort(key=lambda k: k[1][1])
_logger.debug("SQL LOG %s:", type)
sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0]))
for r in sqllogitems:
delay = timedelta(microseconds=r[1][1])
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
sum += r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
process('into')
self.sql_log_count = 0
self.sql_log = False
@check
def close(self):
return self._close(False)
def _close(self, leak=False):
global sql_counter
if not self._obj:
return
del self.cache
if self.sql_log:
self.__closer = frame_codeinfo(currentframe(), 3)
# simple query count is always computed
sql_counter += self.sql_log_count
# advanced stats only if sql_log is enabled
self.print_log()
self._obj.close()
# This force the cursor to be freed, and thus, available again. It is
# important because otherwise we can overload the server very easily
# because of a cursor shortage (because cursors are not garbage
# collected as fast as they should). The problem is probably due in
# part because browse records keep a reference to the cursor.
del self._obj
self._closed = True
# Clean the underlying connection.
self._cnx.rollback()
if leak:
self._cnx.leaked = True
else:
chosen_template = tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
@check
def autocommit(self, on):
if on:
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
# If a serializable cursor was requested, we
# use the appropriate PotsgreSQL isolation level
# that maps to snaphsot isolation.
# For all supported PostgreSQL versions (8.3-9.x),
# this is currently the ISOLATION_REPEATABLE_READ.
# See also the docstring of this class.
# NOTE: up to psycopg 2.4.2, repeatable read
# is remapped to serializable before being
# sent to the database, so it is in fact
# unavailable for use with pg 9.1.
isolation_level = \
ISOLATION_LEVEL_REPEATABLE_READ \
if self._serialized \
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
@check
def commit(self):
""" Perform an SQL `COMMIT`
"""
return self._cnx.commit()
@check
def rollback(self):
""" Perform an SQL `ROLLBACK`
"""
return self._cnx.rollback()
def __enter__(self):
""" Using the cursor as a contextmanager automatically commits and
closes it::
with cr:
cr.execute(...)
# cr is committed if no failure occurred
# cr is closed in any case
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()
self.close()
@contextmanager
@check
def savepoint(self):
"""context manager entering in a new savepoint"""
name = uuid.uuid1().hex
self.execute('SAVEPOINT "%s"' % name)
try:
yield
self.execute('RELEASE SAVEPOINT "%s"' % name)
except:
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
raise
@check
def __getattr__(self, name):
return getattr(self._obj, name)
class TestCursor(Cursor):
""" A cursor to be used for tests. It keeps the transaction open across
several requests, and simulates committing, rolling back, and closing.
"""
def __init__(self, *args, **kwargs):
super(TestCursor, self).__init__(*args, **kwargs)
# in order to simulate commit and rollback, the cursor maintains a
# savepoint at its last commit
self.execute("SAVEPOINT test_cursor")
# we use a lock to serialize concurrent requests
self._lock = threading.RLock()
def acquire(self):
self._lock.acquire()
def release(self):
self._lock.release()
def force_close(self):
super(TestCursor, self).close()
def close(self):
if not self._closed:
self.rollback() # for stuff that has not been committed
self.release()
def autocommit(self, on):
_logger.debug("TestCursor.autocommit(%r) does nothing", on)
def commit(self):
self.execute("RELEASE SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
def rollback(self):
self.execute("ROLLBACK TO SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
class PsycoConnection(psycopg2.extensions.connection):
pass
class ConnectionPool(object):
""" The pool of connections to database(s)
Keep a set of connections to pg databases open, and reuse them
to open cursors for all transactions.
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
def locked(fun):
@wraps(fun)
def _locked(self, *args, **kwargs):
self._lock.acquire()
try:
return fun(self, *args, **kwargs)
finally:
self._lock.release()
return _locked
def __init__(self, maxconn=64):
self._connections = []
self._maxconn = max(maxconn, 1)
self._lock = threading.Lock()
def __repr__(self):
used = len([1 for c, u in self._connections[:] if u])
count = len(self._connections)
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, dsn):
# free dead and leaked connections
for i, (cnx, _) in tools.reverse_enumerate(self._connections):
if cnx.closed:
self._connections.pop(i)
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
continue
if getattr(cnx, 'leaked', False):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
_logger.info('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and cnx._original_dsn == dsn:
try:
cnx.reset()
except psycopg2.OperationalError:
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
# psycopg2 2.4.4 and earlier do not allow closing a closed connection
if not cnx.closed:
cnx.close()
continue
self._connections.pop(i)
self._connections.append((cnx, True))
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
return cnx
if len(self._connections) >= self._maxconn:
# try to remove the oldest connection not used
for i, (cnx, used) in enumerate(self._connections):
if not used:
self._connections.pop(i)
if not cnx.closed:
cnx.close()
self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
break
else:
# note: this code is called only if the for loop has completed (no break)
raise PoolError('The Connection Pool Is Full')
try:
result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection)
except psycopg2.Error:
_logger.info('Connection to the database failed')
raise
result._original_dsn = dsn
self._connections.append((result, True))
self._debug('Create new connection')
return result
@locked
def give_back(self, connection, keep_in_pool=True):
self._debug('Give back connection to %r', connection.dsn)
for i, (cnx, used) in enumerate(self._connections):
if cnx is connection:
self._connections.pop(i)
if keep_in_pool:
self._connections.append((cnx, False))
self._debug('Put connection to %r in pool', cnx.dsn)
else:
self._debug('Forgot connection to %r', cnx.dsn)
cnx.close()
break
else:
raise PoolError('This connection does not below to the pool')
@locked
def close_all(self, dsn=None):
count = 0
last = None
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn is None or cnx._original_dsn == dsn:
cnx.close()
last = self._connections.pop(i)[0]
count += 1
_logger.info('%r: Closed %d connections %s', self, count,
(dsn and last and 'to %r' % last.dsn) or '')
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
def __init__(self, pool, dbname, dsn):
self.dbname = dbname
self.dsn = dsn
self.__pool = pool
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create %scursor to %r', cursor_type, self.dsn)
return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
def test_cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create test %scursor to %r', cursor_type, self.dsn)
return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
serialized_cursor = cursor
def __nonzero__(self):
"""Check if connection is possible"""
try:
_logger.info("__nonzero__() is deprecated. (It is too expensive to test a connection.)")
cr = self.cursor()
cr.close()
return True
except Exception:
return False
def dsn(db_or_uri):
"""parse the given `db_or_uri` and return a 2-tuple (dbname, uri)"""
if db_or_uri.startswith(('postgresql://', 'postgres://')):
# extract db from uri
us = urlparse.urlsplit(db_or_uri)
if len(us.path) > 1:
db_name = us.path[1:]
elif us.username:
db_name = us.username
else:
db_name = us.hostname
return db_name, db_or_uri
_dsn = ''
for p in ('host', 'port', 'user', 'password'):
cfg = tools.config['db_' + p]
if cfg:
_dsn += '%s=%s ' % (p, cfg)
return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri)
_Pool = None
def db_connect(to, allow_uri=False):
global _Pool
if _Pool is None:
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
db, uri = dsn(to)
if not allow_uri and db != to:
raise ValueError('URI connections not allowed')
return Connection(_Pool, db, uri)
def close_db(db_name):
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
global _Pool
if _Pool:
_Pool.close_all(dsn(db_name)[1])
def close_all():
global _Pool
if _Pool:
_Pool.close_all()
| agpl-3.0 | 1,858,184,153,876,903,700 | 36.402844 | 123 | 0.591612 | false | 4.185997 | true | false | false |
maxhutch/glopen | setup.py | 1 | 1089 | #!/usr/bin/env python
from os.path import exists
from setuptools import setup
import re
version_raw = open('glopen/_version.py').read()
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
version_result = re.search(version_regex, version_raw, re.M)
if version_result:
version_string = version_result.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(name='glopen',
version=version_string,
description='Open-like interface to globus remotes',
url='http://github.com/maxhutch/glopen/',
author='https://raw.github.com/maxhutch/glopen/master/AUTHORS.md',
author_email='[email protected]',
maintainer='Max Hutchinson',
maintainer_email='[email protected]',
license='MIT',
keywords='globus ssh open',
install_requires=list(open('requirements.txt').read().strip()
.split('\n')),
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
packages=['glopen'],
zip_safe=True)
| mit | -5,315,596,720,515,044,000 | 34.129032 | 79 | 0.623508 | false | 3.501608 | false | true | false |
axsemantics/axsemantics-cli | axsemantics_cli/analyze.py | 1 | 9316 | import click
from collections.abc import Mapping, Sequence
from collections import OrderedDict, defaultdict
import json
import os
import re
import sys
from .common.formatter import sort_atml3
if sys.version_info[0] == 3 and sys.version_info[1] == 5:
from math import gcd
else:
from fractions import gcd
@click.group()
@click.argument('input', type=click.File('r'))
@click.pass_obj
def analyze(obj, input):
"""
analyze a directory of atml3 files
"""
print(input.name)
try:
fcontents = json.load(input, encoding='utf-8',
object_hook=OrderedDict,
object_pairs_hook=OrderedDict)
obj['input'] = fcontents
except Exception as e:
print(e)
@analyze.command()
@click.pass_obj
def show(obj):
"""
merely print the contents of the dict
"""
return
print(json.dumps(obj['input'], indent=4))
@click.group(chain=True, invoke_without_command=True)
@click.argument('path', type=click.Path(exists=True))
@click.pass_context
def atml3file(ctx, path):
ctx.obj = {}
if not os.path.isdir(path):
click.echo('{} is not a directory'.format(path), err=True)
return
def atml3file_iter(path):
for root, dirs, files in os.walk(path):
for name in files:
if name[-6:].lower() == '.atml3':
fname = os.path.join(root, name)
try:
with open(fname, 'r') as f:
fcontents = f.read()
atml3 = json.loads(fcontents, encoding='utf-8',
object_hook=OrderedDict,
object_pairs_hook=OrderedDict)
item = {
'fcontents': fcontents,
'atml3': atml3,
'filename': fname,
}
yield item
except Exception as e:
item = {
'filename': fname,
'atml3': {},
'fcontents': '',
'error': str(e),
}
yield item
@atml3file.resultcallback()
def process_commands(processors, path):
def echo_name(iterator):
for item in iterator:
click.echo(item['filename'])
yield item
iterator = atml3file_iter(path)
for processor in processors:
iterator = processor(iterator)
for item in iterator:
pass
@atml3file.command('collect-errors')
def atml3_errors():
def processor(iterator):
counter = 0
fails = 0
errors = []
for item in iterator:
counter += 1
if 'error' in item:
fails += 1
errors.append('{}: {}'.format(item['filename'], item['error']))
yield item
click.echo('\n\n{}/{} failed to validate:'.format(fails, counter))
click.echo('\n'.join(errors))
return processor
@atml3file.command('keys')
@click.pass_obj
def atml3_rootkeys(obj):
obj['allkeys'] = set()
obj['orderings'] = set()
def processor(iterator):
for item in iterator:
for key in item['atml3'].keys():
obj['allkeys'].add(key)
orderstring = '", "'.join(item['atml3'].keys())
obj['orderings'].add('"{}"'.format(orderstring))
yield item
click.echo('\n')
click.echo('all keys:\n"{}"'.format('", "'.join(obj['allkeys'])))
click.echo('all orderings:')
orderings = list(obj['orderings'])
orderings.sort()
for line in orderings:
click.echo(line)
return processor
@atml3file.command('reindent')
def atml3_reindent():
def processor(iterator):
for item in iterator:
if item.get('atml3', '') and item.get('filename', ''):
filename = item['filename']
atml3 = item['atml3']
click.echo('writing reindented file to {}'.format(filename))
with open(filename, 'w') as f:
json.dump(atml3, f, indent=2)
yield item
return processor
@atml3file.command('showkey')
@click.argument('key')
def atml3_print_keyvalue(key):
def processor(iterator):
for item in iterator:
if key in item['atml3']:
click.echo('{} {}: "{}"'.format(item['filename'], key, item['atml3'][key]))
yield item
return processor
@atml3file.command('sort')
def atml3_sort():
def processor(iterator):
for item in iterator:
if item['atml3']:
print(json.dumps(sort_atml3(item['atml3']), indent=2))
yield item
return processor
@atml3file.command('guess-indentation')
def atml3_guess_indentation():
def processor(iterator):
for item in iterator:
if item['fcontents']:
tabdivisor = 0
spacedivisor = 0
for line in item['fcontents'].split('\n'):
tabs = 0
spaces = 0
for letter in line:
if letter == ' ':
spaces += 1
elif letter == '\t':
tabs += 1
else:
break
tabdivisor = gcd(tabdivisor, tabs)
spacedivisor = gcd(spacedivisor, spaces)
if spacedivisor > 0:
click.echo('{}: {} spaces'.format(item['filename'], spacedivisor))
elif tabdivisor > 0:
click.echo('{}: {} tabs'.format(item['filename'], tabdivisor))
yield item
return processor
class TypeSuggester(object):
def __init__(self):
self.suggestions = defaultdict(int)
def suggest(self, typename):
self.suggestions[typename] += 1
def print_suggestions(self):
d = self.suggestions
total = sum(d.values())
sorter = ((k, d[k]) for k in sorted(d, key=d.get, reverse=True))
key, value = list(sorter)[0]
print('{}% {}'.format(value / total * 100.0,
key))
def highest_suggestion(self):
d = self.suggestions
sorter = ((k, d[k]) for k in sorted(d, key=d.get, reverse=True))
key, _ = list(sorter)[0]
return key
keys = defaultdict(TypeSuggester)
@atml3file.command('find-data')
def atml3_find_data():
data_re = re.compile(r'\#[^) ,]+')
def recurse_data(item, path):
global keys
if isinstance(item, str):
if path.endswith('mappingExpression') or path.endswith('truthExpression'):
variables = data_re.findall(item)
if variables:
for key in variables:
if 'str({})'.format(key) in item:
keys[key].suggest('string')
elif 'numeric({})'.format(key) in item:
keys[key].suggest('string')
else:
keys
elif isinstance(item, Mapping):
for key, value in item.items():
recurse_data(value,
'{}.{}'.format(path,
key))
elif isinstance(item, Sequence):
for pos, element in enumerate(item):
recurse_data(element,
'{}.{}'.format(path,
pos))
def processor(iterator):
global keys
for item in iterator:
if item['atml3']:
atml3 = item['atml3']
recurse_data(atml3, '')
yield item
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
},
}
list_re = re.compile(r'\[\d+\]$')
for key in sorted(keys.keys()):
isarray = False
if '.' in key:
prop = schema['properties']
accessor = key.lstrip('#').split('.')
for snippet in accessor:
match = list_re.search(snippet)
if match:
isarray = True
# is a list
key_without_index = snippet[:snippet.rfind('[')]
prop[key_without_index] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {}
}
}
prop = prop[key_without_index]['items']['properties']
else:
prop[snippet] = {
'type': 'object',
'properties': {}
}
prop = prop[snippet]['properties']
if isarray:
print(json.dumps(schema, indent=2, sort_keys=True))
return processor
| mit | -7,680,584,266,771,286,000 | 30.157191 | 91 | 0.472198 | false | 4.38813 | false | false | false |
impactlab/eemeter | eemeter/processors/location.py | 1 | 2513 | import logging
from eemeter.weather.location import (
zipcode_to_usaf_station,
zipcode_to_tmy3_station,
)
from eemeter.weather.noaa import ISDWeatherSource
from eemeter.weather.tmy3 import TMY3WeatherSource
logger = logging.getLogger(__name__)
def get_weather_source(project):
''' Finds most relevant WeatherSource given project site.
Parameters
----------
project : eemeter.structures.Project
Project for which to find weather source data.
Returns
-------
weather_source : eemeter.weather.ISDWeatherSource
Closest data-validated weather source in the same climate zone as
project ZIP code, if available.
'''
zipcode = project.site.zipcode
station = zipcode_to_usaf_station(zipcode)
if station is None:
logger.error(
"Could not find ISD station for zipcode {}."
.format(zipcode)
)
return None
logger.info(
"Mapped ZIP code {} to ISD station {}"
.format(zipcode, station)
)
try:
weather_source = ISDWeatherSource(station)
except ValueError:
logger.error(
"Could not create ISDWeatherSource for station {}."
.format(station)
)
return None
logger.info("Created ISDWeatherSource using station {}".format(station))
return weather_source
def get_weather_normal_source(project):
''' Finds most relevant WeatherSource given project site.
Parameters
----------
project : eemeter.structures.Project
Project for which to find weather source data.
Returns
-------
weather_source : eemeter.weather.TMY3WeatherSource
Closest data-validated weather source in the same climate zone as
project ZIP code, if available.
'''
zipcode = project.site.zipcode
station = zipcode_to_tmy3_station(zipcode)
if station is None:
logger.error(
"Could not find appropriate TMY3 station for zipcode {}."
.format(zipcode)
)
return None
logger.info(
"Mapped ZIP code {} to TMY3 station {}"
.format(zipcode, station)
)
try:
weather_normal_source = TMY3WeatherSource(station)
except ValueError:
logger.error(
"Could not create TMY3WeatherSource for station {}."
.format(station)
)
return None
logger.info("Created TMY3WeatherSource using station {}".format(station))
return weather_normal_source
| mit | -4,050,567,537,289,151,000 | 24.642857 | 77 | 0.636291 | false | 4.099511 | false | false | false |
hraban/YABFD | parsers/customblacklist.py | 1 | 1052 | import csv
from datetime import datetime as dt
import logging
import sys
from parsers import BaseParser
_logger = logging.getLogger('yabfd.' + __name__)
class Parser(BaseParser):
def __init__(self, name, blacklist, bandate=dt.max, hitweight=sys.maxint):
super(Parser, self).__init__(name)
self.load_logs([blacklist])
self.weight = hitweight
self.date = bandate
def _parse(self, blacklist):
_logger.debug('%s reading %r.', self, blacklist)
r = csv.reader(open(blacklist, 'rb'))
for row in r:
try:
host = row.pop(0)
except ValueError:
_logger.error('Blacklist %r malformed at line %d, skipping.',
blacklist, r.line_num)
continue
date = dt.strptime(row.pop(0), '%Y-%m-%d').date() if row else dt.max
weight = int(row.pop(0)) if row else self.weight
yield (date, host, weight)
_logger.debug('%s read %d hosts from %r.', self, r.line_num, blacklist)
| mit | -1,210,551,539,955,328,800 | 34.066667 | 80 | 0.574144 | false | 3.825455 | false | false | false |
alhashash/odoomrp-wip | mrp_production_traceability/wizard/mrp_product_produce.py | 23 | 2304 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class MrpProductProduce(models.TransientModel):
_inherit = 'mrp.product.produce'
def default_lot_id(self):
if 'active_id' in self.env.context and (
self.env.context.get('active_model') == 'mrp.production'):
production_obj = self.env['mrp.production']
production = production_obj.browse(self.env.context['active_id'])
return production.mapped('move_lines2.prod_parent_lot')[:1]
lot_id = fields.Many2one(default=default_lot_id)
@api.multi
def do_produce(self):
track_lot_obj = self.env['mrp.track.lot']
result = super(MrpProductProduce, self).do_produce()
production = self.env['mrp.production'].browse(
self.env.context['active_id'])
for data in self:
if data.lot_id:
for move in production.move_lines2:
if not move.prod_parent_lot:
move.prod_parent_lot = data.lot_id.id
track_lot_obj.create(
{'component': move.product_id.id,
'component_lot': move.restrict_lot_id.id,
'product': production.product_id.id,
'product_lot': data.lot_id.id,
'production': production.id,
'st_move': move.id})
return result
| agpl-3.0 | -4,497,781,787,498,152,000 | 43.307692 | 78 | 0.55599 | false | 4.330827 | false | false | false |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/test/test_cgi.py | 2 | 19794 | from test.support import check_warnings
import cgi
import os
import sys
import tempfile
import unittest
import warnings
from collections import namedtuple
from io import StringIO, BytesIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = BytesIO(buf.encode('latin-1')) # FieldStorage expects bytes
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
encoding = 'latin-1'
fake_stdin = BytesIO(data.encode(encoding))
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ, encoding=encoding)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_parse_multipart(self):
fp = BytesIO(POSTDATA.encode('latin1'))
env = {'boundary': BOUNDARY.encode('latin1'),
'CONTENT-LENGTH': '558'}
result = cgi.parse_multipart(fp, env)
expected = {'submit': [b' Add '], 'id': [b'1234'],
'file': [b'Testing 123.\n'], 'title': [b'']}
self.assertEqual(result, expected)
def test_fieldstorage_properties(self):
fs = cgi.FieldStorage()
self.assertFalse(fs)
self.assertIn("FieldStorage", repr(fs))
self.assertEqual(list(fs), list(fs.keys()))
fs.list.append(namedtuple('MockFieldStorage', 'name')('fieldvalue'))
self.assertTrue(fs)
def test_fieldstorage_invalid(self):
self.assertRaises(TypeError, cgi.FieldStorage, "not-a-file-obj",
environ={"REQUEST_METHOD":"PUT"})
self.assertRaises(TypeError, cgi.FieldStorage, "foo", "bar")
fs = cgi.FieldStorage(headers={'content-type':'text/plain'})
self.assertRaises(TypeError, bool, fs)
def test_escape(self):
# cgi.escape() is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'cgi\.escape',
DeprecationWarning)
self.assertEqual("test & string", cgi.escape("test & string"))
self.assertEqual("<test string>", cgi.escape("<test string>"))
self.assertEqual(""test string"", cgi.escape('"test string"', True))
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s method GET" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s method POST" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertCountEqual(expect.keys(), fs.keys())
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertIn(key, fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists(os.devnull):
cgi.logfp = None
cgi.logfile = os.devnull
cgi.initlog("%s", "Testing log 3")
self.addCleanup(cgi.closelog)
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("wb+"))
self.addCleanup(f.close)
f.write(b'x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
self.addCleanup(fs.file.close)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertGreater(f.numcalls, 2)
f.close()
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '558'}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_leading_whitespace(self):
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '560'}
# Add some leading whitespace to our post data that will cause the
# first line to not be the innerboundary.
fp = BytesIO(b"\r\n" + POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_non_ascii(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH':'558'}
for encoding in ['iso-8859-1','utf-8']:
fp = BytesIO(POSTDATA_NON_ASCII.encode(encoding))
fs = cgi.FieldStorage(fp, environ=env,encoding=encoding)
self.assertEqual(len(fs.list), 1)
expect = [{'name':'id', 'filename':None, 'value':'\xe7\xf1\x80'}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_maxline(self):
# Issue #18167
maxline = 1 << 16
self.maxDiff = None
def check(content):
data = """---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
%s
---123--
""".replace('\n', '\r\n') % content
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'REQUEST_METHOD': 'POST',
}
self.assertEqual(gen_result(data, environ),
{'upload': content.encode('latin1')})
check('x' * (maxline - 1))
check('x' * (maxline - 1) + '\r')
check('x' * (maxline - 1) + '\r' + 'y' * (maxline - 1))
def test_fieldstorage_multipart_w3c(self):
# Test basic FieldStorage multipart parsing (W3C sample)
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY_W3),
'CONTENT_LENGTH': str(len(POSTDATA_W3))}
fp = BytesIO(POSTDATA_W3.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 2)
self.assertEqual(fs.list[0].name, 'submit-name')
self.assertEqual(fs.list[0].value, 'Larry')
self.assertEqual(fs.list[1].name, 'files')
files = fs.list[1].value
self.assertEqual(len(files), 2)
expect = [{'name': None, 'filename': 'file1.txt', 'value': b'... contents of file1.txt ...'},
{'name': None, 'filename': 'file2.gif', 'value': b'...contents of file2.gif...'}]
for x in range(len(files)):
for k, exp in expect[x].items():
got = getattr(files[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_part_content_length(self):
BOUNDARY = "JfISa01"
POSTDATA = """--JfISa01
Content-Disposition: form-data; name="submit-name"
Content-Length: 5
Larry
--JfISa01"""
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': str(len(POSTDATA))}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 1)
self.assertEqual(fs.list[0].name, 'submit-name')
self.assertEqual(fs.list[0].value, 'Larry')
def test_fieldstorage_as_context_manager(self):
fp = BytesIO(b'x' * 10)
env = {'REQUEST_METHOD': 'PUT'}
with cgi.FieldStorage(fp=fp, environ=env) as fs:
content = fs.file.read()
self.assertFalse(fs.file.closed)
self.assertTrue(fs.file.closed)
self.assertEqual(content, 'x' * 10)
with self.assertRaisesRegex(ValueError, 'I/O operation on closed file'):
fs.file.read()
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': b'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urllib.parse.'
'parse_qs instead', DeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urllib.parse.'
'parse_qsl instead', DeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
self.assertEqual(
cgi.parse_header('form-data; name="files"; filename="fo\\"o;bar"'),
("form-data", {"name": "files", "filename": 'fo"o;bar'}))
BOUNDARY = "---------------------------721837373350705526688164684"
POSTDATA = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
POSTDATA_NON_ASCII = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
\xe7\xf1\x80
-----------------------------721837373350705526688164684
"""
# http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4
BOUNDARY_W3 = "AaB03x"
POSTDATA_W3 = """--AaB03x
Content-Disposition: form-data; name="submit-name"
Larry
--AaB03x
Content-Disposition: form-data; name="files"
Content-Type: multipart/mixed; boundary=BbC04y
--BbC04y
Content-Disposition: file; filename="file1.txt"
Content-Type: text/plain
... contents of file1.txt ...
--BbC04y
Content-Disposition: file; filename="file2.gif"
Content-Type: image/gif
Content-Transfer-Encoding: binary
...contents of file2.gif...
--BbC04y--
--AaB03x--
"""
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,058,882,502,354,449,000 | 36.136961 | 183 | 0.552642 | false | 3.547312 | true | false | false |
cmcginty/PyWeather | setup.py | 1 | 1249 | #! /usr/bin/env python
#
# PyWeather
# (c) 2010 Patrick C. McGinty <[email protected]>
# (c) 2005 Christopher Blunck <[email protected]>
#
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
import os
from distutils.core import setup
import weather as pkg
name = pkg.__name__
def _read(*path_name):
return open(os.path.join(os.path.dirname(__file__), *path_name)).read()
setup(name=name,
version=pkg.__version__,
license="GNU GPL",
description=pkg.__doc__,
long_description=_read('README'),
author="Patrick C. McGinty, Christopher Blunck",
author_email="[email protected], [email protected]",
url="http://github.com/cmcginty/PyWeather",
download_url="https://github.com/cmcginty/PyWeather/archive/%s.zip" %
pkg.__version__,
packages=[
name,
name + '.services',
name + '.stations',
name + '.units',
],
scripts=['scripts/weatherpub.py'],
)
| gpl-3.0 | 8,256,031,794,039,481,000 | 27.386364 | 75 | 0.65012 | false | 3.269634 | false | false | false |
Jan-zou/LeetCode | python/Stack/155_min_stack.py | 1 | 2757 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
Tags: Stack, Design
'''
class MinStack(object):
# O(n) runtime, O() space – Minor space optimization
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.minstack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
if not self.minstack:
self.minstack.append(x)
elif x <= self.minstack[-1]:
self.minstack.append(x)
def pop(self):
"""
:rtype: void
"""
if self.stack:
x = self.stack.pop()
if x == self.minstack[-1]:
self.minstack.pop()
def top(self):
"""
:rtype: int
"""
if self.stack:
return self.stack[-1]
def getMin(self):
"""
:rtype: int
"""
if self.minstack != []:
return self.minstack[-1]
class MinStack2(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack, self.minstack = [], []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
if self.minstack != []:
if x < self.minstack[-1][0]:
self.minstack.append([x, 1])
elif x == self.minstack[-1][0]:
self.minstack[-1][1] += 1
else:
self.minstack.append([x, 1])
def pop(self):
"""
:rtype: void
"""
x = self.stack.pop()
if x == self.minstack[-1][0]:
self.minstack[-1][1] -= 1
if self.minstack[-1][1] == 0:
self.minstack.pop()
def top(self):
"""
:rtype: int
"""
return self.stack[-1]
def getMin(self):
"""
:rtype: int
"""
if self.minstack != []:
return self.minstack[-1][0]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| mit | 7,952,908,537,007,913,000 | 21.958333 | 101 | 0.483485 | false | 3.673333 | false | false | false |
SuperTaiyaki/paifu-tools | start.py | 1 | 1201 | #!/usr/bin/env python
import glob
import random
import cgi
import cgitb
import zipfile
import bz2
import tarfile
import subprocess
cgitb.enable()
#files = glob.glob("hands/*")
# Yes this is stupid. But so is the zip format - it doesn't compress between
# files. The tarfile module doesn't allow streaming out single files.
#smallblob = bz2.BZ2File("hands.zip.bz2")
#blob = zipfile.ZipFile("hands.zip")
#blob = tarfile.open("hands.tar.bz2")
#hand = random.choice(blob.getnames())
hands = subprocess.check_output(["tar", "tf", "hands.tar.gz"]).splitlines()
hand = random.choice(hands)
#hand = 'hands/hand1346582280164'
# maybe filter out empty files?
form = cgi.FieldStorage()
score = form.getfirst('score', 0)
try:
score = int(score)
except Exception, e:
score = 0
target = '/cgi-bin/gamestate.py?hand=%s' % hand[6:]
if score:
target += '&score=%d' % score
hands = form.getfirst('hands', 0)
try:
hands = int(hands)
except Exception, e:
hands = 0
if hands:
target += '&hands=%d' % hands
print 'Status: 200 OK' # This seems to make the browser hide the real URL... good for hiding the score
print 'Content-Type: text/plain'
print 'Location: %s' % target
print
# Should put a HTML redirect here
| gpl-3.0 | 5,272,151,780,071,715,000 | 25.108696 | 102 | 0.711907 | false | 2.95086 | false | false | false |
puhitaku/Booth-algorithm | division.py | 1 | 3792 | from bitwise import *
def split(l, c=0):
"""Split the given list in two."""
return (l[: int(len(l)/2)], l[int(len(l)/2) : None if c == 0 else c])
def main():
print("This program excecutes (Non-)restoring division algorithm.\n")
print("The formula it's going to calculate is: X / Y = ?")
print("Choose which algorithm (R)estoring or (N)on-restoring [r/n]: ", end="")
while True:
inp = str(input())[0]
if inp in ["n", "N"]:
algorithm = "n"
break
elif inp in ["r", "R"]:
algorithm = "r"
break
else:
print("Input R or N. ", end="")
algorithm = inp
print("Input the bit length of SECOND variable Y: ", end="")
ylen = int(input())
xlen = ylen * 2
print("(The bit length of X is: len(Y)*2 = %d)" % xlen)
print("Input the number of first variable X: ", end="")
x = int(input())
if x < 0:
x = TwoComp( ("{0:0%db}" % xlen).format(x) ) #Calculate the two's complement number of x
else:
x = ("{0:0%db}" % xlen).format(x) #Convert to bits and assign directly
print("Input the number of second variable Y: ", end="")
y = int(input())
if y < 0:
y = TwoComp( ("{0:0%db}" % ylen).format(y) )
else:
y = ("{0:0%db}" % ylen).format(y)
n = ylen
c = ""
#----- Prepare internal variables -----#
print("Internal variables:")
print("X = %s %s" % (x[:ylen], x[ylen:]))
print("Y =", y)
print("n =", n)
print("")
#----- Algorithm start -----#
print("#Algorithm start: %s\n" % ("Restoring" if algorithm == "r" else "Non-restoring"))
if not "1" in y:
print("Y is zero. Aborting.")
return
print("X1 = X1 - Y\t\t", end="")
x = BitAdd(x, TwoComp(y) + GenZeroStr(ylen), xlen)
print("X = %s %s" % split(x))
if x[0] == "0":
print("X1 is positive or zero. Aborting.")
return
x = BitShift(x, -1)
print("[X1][X2][C] << 1\tX = %s %sC" % split(x, -1))
print("X1 = X1 + Y\t\t", end="")
x = BitAdd(x, y + GenZeroStr(ylen), xlen)
print("X = %s %sC" % split(x, -1))
print("n = n - 1 = %d" % (n-1))
n -= 1
#--- Go into the loop --- #
print("\n#Into the loop...\n")
if algorithm == "r":
pass
elif algorithm == "n":
for i in range(n): # X1 != 0
print("Step %d:" % (i+1))
if x[0] == "0": # X1 >= 0
c = "1"
print("X1 >= 0 -> c = 1", end="")
x = x[:-1] + c #[X1][X2][C] << 1
print("\tX = %s %s" % split(x))
x = BitShift(x, -1) #Shift bits leftward
print("[X1][X2][C] << 1\tX = %s %sC" % split(x, -1))
x = BitAdd(x, TwoComp(y) + GenZeroStr(ylen), xlen) #X1 = X1 - Y
print("X1 = X1 - Y\t\tX = %s %sC" % split(x, -1))
else:
c = "0"
print("X1 < 0 -> c = 0", end="")
x = x[:-1] + c
print("\t\tX = %s %s" % split(x))
x = BitShift(x, -1)
print("[X1][X2][C] << 1\tX = %s %sC" % split(x, -1))
x = BitAdd(x, y + GenZeroStr(ylen), xlen) #X1 = X1 + Y
print("X1 = X1 + Y\t\tX = %s %sC" % split(x, -1))
print("")
if x[0] == "0": # X1 >= 0
print("X1 >= 0 -> C = 1")
c = "1"
x = x[:-1] + c
else:
print("X1 < 0 -> C = 0")
c = "0"
x = x[:-1] + c
x = BitAdd(x, y + GenZeroStr(ylen), xlen)
print("X1 = X1 + Y")
print("X = %s %s" % split(x))
print("")
print("The answer is: R = %s, Q = %s" % split(x))
if __name__ == "__main__":
main() | unlicense | -3,129,163,567,736,293,400 | 29.58871 | 99 | 0.427479 | false | 2.944099 | false | false | false |
karstenw/nodebox-pyobjc | examples/Path/pathops.py | 1 | 1435 | size(600, 1000)
# pathmatics functions
nofill()
stroke(0)
def label(s,x,y):
"""put a black label preserving fill color."""
push()
c = fill()
fill(0)
text(s,x,y)
fill(c)
pop()
def circlepath(x, y, r):
"""Make a circle with curveto."""
r2 = r * 0.5555 #
autoclosepath(close=True)
beginpath(x, y-r)
curveto(x+r2, y-r, x+r, y-r2, x+r, y)
curveto(x+r, y+r2, x+r2, y+r, x , y+r)
curveto(x-r2, y+r, x-r, y+r2, x-r, y)
curveto(x-r, y-r2, x-r2, y-r, x, y-r)
return endpath(draw=False)
# normal
c1 = circlepath( 200, 100, 100)
c2 = circlepath( 300, 100, 100)
drawpath(c1)
drawpath(c2)
label("Normal", 420, 100)
print "Path c1 intersects path c2:", c1.intersects(c2)
# flatness should always be 0.5
var("flatness", NUMBER, 0.6, 0.1, 5.0)
print "flatness:", flatness
# union
c1 = circlepath( 200, 300, 100)
c2 = circlepath( 300, 300, 100)
drawpath(c1.union(c2, flatness=flatness))
label("Union", 420, 300)
# difference
c1 = circlepath( 200, 500, 100)
c2 = circlepath( 300, 500, 100)
drawpath(c1.difference(c2, flatness=flatness))
label("Difference", 420, 500)
# intersect
c1 = circlepath( 200, 700, 100)
c2 = circlepath( 300, 700, 100)
drawpath(c1.intersect(c2, flatness=flatness))
label("Intersection", 420, 700)
# xor
fill(0)
c1 = circlepath( 200, 900, 100)
c2 = circlepath( 300, 900, 100)
drawpath(c1.xor(c2, flatness=flatness))
label("XOR", 420, 900)
| mit | -7,814,168,234,586,585,000 | 18.391892 | 54 | 0.630662 | false | 2.348609 | false | false | false |
adnane1deev/Hook | bin/cmd_line.py | 1 | 46914 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import argparse
import cli_browser
import helper_functions as helpers
import hook_system_variables as hook
from colorama import init, Fore, Back
from HTMLParser import HTMLParser
from datetime import datetime
import os_operations as op
import cli_download_manager as idm
import package_manager as manager
import os
import helper_functions as helper
import app_setup as app
import time
import subprocess as sp
class cmd_line(object):
def __init__(self):
self.__parser = argparse.ArgumentParser(
prog=hook.application_name,
description=hook.application_description,
#epilog=Back.BLUE + hook.additional_description + Back.RESET,
usage=hook.usage_syntax,
version=Fore.YELLOW + hook.application_name + ' ' + hook.application_version + Fore.RESET,
conflict_handler='resolve'
)
def __cmd_init(self, _interactive=False, _pversion=False, _commands=None):
created_time = datetime.today().strftime('%H:%M:%S - %b, %d %Y')
print '[+] : Created at {0}'.format(created_time)
print '[+] : Installed packages {0}'.format(0)
print '[+] : Updates at the working directory {0}'.format(0)
packages = []
if _interactive:
print
want_more = 'Y'
while want_more in ('y', 'Y'):
repository = raw_input("Offer your package name: ")
if repository in ('q', 'quit', 'exit'):
break
print
cmd_browser = cli_browser.cli_browser()
cmd_browser.setRequestedURL("https://github.com/search?q={0}&type=Repositories&ref=searchresults".format(repository))
response = cmd_browser.submit()
repos_list = cmd_browser.parseResponse(response)
parser = HTMLParser()
length = len(repos_list)
for repo_index in range(length):
tpl = repos_list[repo_index]
print parser.unescape("[{0:2}] : {1} {2}".format((repo_index+1), tpl[0][1:], '- '+re.sub(r'<em>|</em>', '', tpl[2]).strip()))
if length > 0:
print
package_number = -1
while package_number < 0 or package_number > length:
try:
_input = raw_input("Choose your package number (1: DEFAULT, 0: IGNORE): ")
package_number = int(_input)
except ValueError:
package_number = 1
if package_number == 0:
continue
package_name = repos_list[(package_number-1)][0][1:]
package_version = '*'
if _pversion:
cmd_browser.setRequestedURL('https://github.com/{0}/tags'.format(package_name))
response = cmd_browser.submit()
versions = cmd_browser.parseVersions(response)
if len(versions) > 0:
for vr in versions:
print vr, ', ',
print
package_version = raw_input("Choose your package number (latest: DEFAULT): ")
if package_version == '':
package_version = versions[0]
else:
print Back.RED+"There is no releases"+Back.RESET
po = {"package": package_name, "version": package_version}
packages.append(po)
print
print "[+] : {0} added to your target packages".format(package_name)
print
else:
print "There is no package named: {0}".format(repository)
want_more = raw_input("Do you want more packages (y/N): ")
cmd_browser.closeConnections()
else:
_packages = _commands[1:]
for pkg in _packages:
dtls = pkg.split(':')
packages.append({"package": dtls[0], "version": dtls[1]})
#print
#d.pretty_print(packages)
self.__setup_workspace(packages, {"created_at": created_time, "installed_packages": [], "workspace_updates": []})
def __cmd_self_install(self):
app.setup()
def __cmd_create(self, _args, _version=False, _foundation=False, _bootstrap=False):
step = ""
if not _version:
if len(_args) < 3:
print Fore.YELLOW + "Not enough arguments" + Fore.RESET
return
else:
if len(_args) < 4:
print Fore.YELLOW + "Not enough arguments" + Fore.RESET
return
project_type = _args[0]
project_name = _args[2]
if project_type != "dynamic" and project_type != "static":
print Back.RED + " Unrecognized command " + Back.RESET
return
if project_type == "dynamic":
try:
step = "php"
print "checking if php is installed on the system ..."
sp.check_call(['php', '-v'])
print "php is verified successfully"
print "downloading composer to your current working directory ..."
if not op.is_exits('composer.phar'):
ps = sp.Popen(('php', '-r', "readfile('https://getcomposer.org/installer');"), stdout=sp.PIPE)
output = sp.check_output('php', stdin=ps.stdout)
ps.wait()
print "composer is successfully downloaded, you can use separately by typing composer.phar [options] command [arguments] in your command prompt"
else:
print "composer downloading operation is canceled, composer is found in your current working directory"
if not _version:
sp.call(['php', 'composer.phar', 'create-project', _args[1], project_name])
elif _version:
sp.call(['php', 'composer.phar', 'create-project', _args[1], project_name, _args[3]])
except:
if step == "php":
print Fore.YELLOW + "php is not found in your system's environment path, you may first setup your local development environment and try again" + Fore.RESET
elif step == "composer":
print Fore.YELLOW + "composer is not found in your system's environment path, use --getcomposer option instead" + Fore.RESET
elif project_type == "static":
pass
def __cmd_add(self, _args):
file_name = 'hook.json'
if not op.is_exits(file_name):
print "hook can't find " + file_name
return
if len(_args) == 0:
return
require_list = helpers.load_json_file(file_name)
for arg in _args:
try:
repository, version = arg.split(':')
if not helper.is_repository(repository):
print repository + " is not a valid repository name"
continue
package = {"package": repository, "version": version}
require_list['require'].append(package)
except ValueError:
print arg + " is not a valid argument"
pass
helper.prettify(require_list)
op.create_file(file_name, _content=helper.object_to_json(require_list))
def __cmd_install(self):
browser_object = cli_browser.cli_browser()
browser_connection = browser_object.getHttpConnection()
download_manager = idm.download_manager()
download_manager.plugInBrowserWithDownloadManager(browser_connection)
if not op.is_exits('hook.json'):
print "hook can't find hook.json"
return
require_list = helpers.load_json_file('hook.json')['require']
#helpers.prettify(list)
urls = []
repositories = []
for pkg in require_list:
name = pkg['package']
version = ''
if pkg['version'] == '*':
browser_object.setRequestedURL('https://github.com/{0}/tags'.format(name))
response = browser_object.submit()
versions_list = browser_object.parseVersions(response)
if len(versions_list) == 0:
print Back.RED+'No releases for {0}'.format(name)+Back.RESET
version = 'master'
else:
version = versions_list[0]
else:
version = pkg['version']
#url = 'https://github.com/fabpot/Twig/archive/v1.16.0.zip'
url = 'https://github.com/{0}/archive/{1}.zip'.format(name, version)
repositories.append(name)
#print url
urls.append(url)
download_manager.startQueue(urls, _repositories=repositories)
browser_connection.close()
browser_object.closeConnections()
def __cmd_search(self, _package, _surfing=False, _current="1", _av_pages=-1):
if len(_package) == 0:
print "No package is provided"
return
cmd_browser = cli_browser.cli_browser()
while True:
current = _current
av_pages = _av_pages
prompt_message = "Choose your package number (1: DEFAULT, q: QUIT): "
repository = _package[0]
cmd_browser.setRequestedURL("https://github.com/search?q={0}&p={1}&type=Repositories&ref=searchresults".format(repository, current))
response = ''
new_line = ''
while True:
response = cmd_browser.submit()
if response is not None:
if new_line == "#":
print "\n"
break
new_line = "#"
#cmd_browser.closeConnections()
#cmd_browser = cli_browser.cli_browser()
#cmd_browser.setRequestedURL("https://github.com/search?q={0}&p={1}&type=Repositories&ref=searchresults".format(repository, current))
repos_list = cmd_browser.parseResponse(response)
parser = HTMLParser()
length = len(repos_list)
if length > 0:
print Fore.BLUE + "{0:6} {1}\n".format("Num", "Repository - Description") + Fore.RESET
for repo_index in range(length):
tpl = repos_list[repo_index]
print parser.unescape("[{0:2}] : {1} {2}".format((repo_index+1), tpl[0][1:], '- '+re.sub(r'<em>|</em>', '', tpl[2]).strip()))
if length > 0:
if _surfing:
#print
current = cmd_browser.getCurrentPage(response)
print "\nCurrent page: {0}".format(current)
print "Available pages: ",
pages = cmd_browser.parsePagination(response)
if av_pages == -1 and pages is not None:
#print pages[-1]
#print pages
av_pages = int(pages[-1])
#if pages is not None:
print av_pages,
else:
if av_pages == -1:
av_pages = 1
print av_pages if av_pages != -1 else 1,
prompt_message = "\nChoose your (package number/action) (1: DEFAULT, p: PREVIOUS, n: NEXT, r: RESET, q: QUIT): "
#print
package_number = -1
_input = ''
try:
print
_input = raw_input(prompt_message)
package_number = int(_input)
except ValueError:
#print av_pages
if _surfing and _input in ('p', 'n', 'r', 'q') and 0 < int(current) <= av_pages:
print
if _input == 'p':
_current = str(int(current)-1)
_av_pages = av_pages
elif _input == 'n':
crnt = int(current)+1
if crnt > av_pages:
crnt = av_pages
_current = str(crnt)
_av_pages = av_pages
elif _input == 'r':
_current = '1'
_av_pages = av_pages
else:
print "Hook is quitting ..."
cmd_browser.closeConnections()
return
continue
elif _input == 'q':
print "\nHook is quitting ..."
cmd_browser.closeConnections()
return
else:
package_number = 1
if package_number < 0:
package_number = 1
elif package_number > length:
package_number = length
package_name = repos_list[(package_number-1)][0][1:]
cmd_browser.setRequestedURL('https://github.com/{0}/tags'.format(package_name))
response = cmd_browser.submit()
versions = cmd_browser.parseVersions(response)
print "\n" + Back.BLUE + package_name + Back.RESET + " versions" + "\n"
if len(versions) > 0:
for vr in versions:
print vr, ', ',
else:
print Back.RED+"There is no releases"+Back.RESET
else:
print "There is no package named: {0}".format(Fore.YELLOW + repository + Fore.RESET)
break
cmd_browser.closeConnections()
def __cmd_list(self):
manager.get_list_of_installed_packages()
def __cmd_info(self):
print "information"
def __cmd_update(self, args):
if not args:
browser_object = cli_browser.cli_browser()
browser_connection = browser_object.getHttpConnection()
download_manager = idm.download_manager()
download_manager.plugInBrowserWithDownloadManager(browser_connection)
installed_list = manager.get_installed_packages()
length = len(installed_list)
for package_index in range(length):
pkg = installed_list[package_index]
name, version = re.search(r'(.+?)\-([\d\w\.]*)\.zip', pkg['package'], re.IGNORECASE).groups()
#print
browser_object.setRequestedURL('https://github.com/{0}/tags'.format(pkg['repository']))
response = browser_object.submit()
versions_list = browser_object.parseVersions(response)
if len(versions_list) == 0 and version == 'master':
print Fore.GREEN + name + Fore.RESET + " is already up-to-date"
continue
elif versions_list[0] == version or versions_list[0] == 'v' + version:
print Fore.GREEN + name + Fore.RESET + " is already up-to-date"
continue
print 'Update process: ' + Fore.YELLOW + version + Fore.RESET + ' -> ' + Fore.GREEN + versions_list[0] + Fore.RESET
message = " is going to be updated to " + Fore.GREEN + name + ' (' + versions_list[0] + ')' + Fore.RESET
print "\t" + Fore.YELLOW + "{0} ({1})".format(name, version) + Fore.RESET + message
url = 'https://github.com/{0}/archive/{1}.zip'.format(pkg['repository'], versions_list[0])
download_manager.startQueue(url, _params={"repository": pkg['repository'], "type": "update", "old_pkg": pkg['package']})
browser_connection.close()
browser_object.closeConnections()
return
package = args[0]
matching_list = manager.match_package(package)
if len(matching_list) > 0:
print Back.BLUE + " Package(s) matching " + Back.RESET + " ({0})\n".format(package)
self.__update_helper_interface(manager.match_package(package))
else:
print Fore.YELLOW + "No package matches " + Fore.RESET + "({0})\n".format(package)
def __update_helper_interface(self, installed_list):
browser_object = cli_browser.cli_browser()
browser_connection = browser_object.getHttpConnection()
download_manager = idm.download_manager()
download_manager.plugInBrowserWithDownloadManager(browser_connection)
length = len(installed_list)
_input = ''
try:
print Fore.BLUE+"{0:4} {1:28}{2:28}{3:28}".format("Num", "Installed at", "Name", "Version")+Fore.RESET
print
for item_index in range(length):
pkg = installed_list[item_index]
installed_at = pkg['installed_at']
name, version = re.search(r'(.+?)\-([\d\w\.]*)\.zip', pkg['package'], re.IGNORECASE).groups()
print "[{0:2}] {1:28}{2:28}{3:28}".format((item_index+1), installed_at, name, version)
print
while True:
_input = raw_input("Choose your package number (1: DEFAULT, q: QUIT): ")
if _input == "":
_input = 1
package_index = int(_input)
if 0 < package_index <= length:
pkg = installed_list[package_index-1]
name, version = re.search(r'(.+?)\-([\d\w\.]*)\.zip', pkg['package'], re.IGNORECASE).groups()
print
browser_object.setRequestedURL('https://github.com/{0}/tags'.format(pkg['repository']))
response = browser_object.submit()
versions_list = browser_object.parseVersions(response)
if len(versions_list) == 0 and version == 'master':
print name + " is already up-to-date"
browser_connection.close()
browser_object.closeConnections()
return
elif versions_list[0] == version or versions_list[0] == 'v' + version:
print name + " is already up-to-date"
browser_connection.close()
browser_object.closeConnections()
return
print 'Update process: ' + Fore.YELLOW + version + Fore.RESET + ' -> ' + Fore.GREEN + versions_list[0] + Fore.RESET
message = " is going to be updated to " + Fore.GREEN + name + ' (' + versions_list[0] + ')' + Fore.RESET + ". Are you sure (y,N): "
while True:
confirmation = raw_input("\n\t" + Fore.YELLOW + "{0} ({1})".format(name, version) + Fore.RESET + message)
if confirmation in ('y', 'Y', 'yes'):
url = 'https://github.com/{0}/archive/{1}.zip'.format(pkg['repository'], versions_list[0])
download_manager.startQueue(url, _params={"repository": pkg['repository'], "type": "update", "old_pkg": pkg['package']})
browser_connection.close()
browser_object.closeConnections()
print "\nUpdate action on "+name
break
elif confirmation in ('', 'n', 'N', 'no'):
print "\nOperation is canceled"
print "Hook is quitting"
break
break
except ValueError:
if _input not in ('q', 'quit'):
print "No value was specified"
print "Hook is quitting"
except AttributeError as e:
print e.message
browser_connection.close()
browser_object.closeConnections()
def __uninstall_helper_interface(self, installed_list):
length = len(installed_list)
_input = ''
try:
print Fore.BLUE+"{0:4} {1:28}{2:28}{3:28}".format("Num", "Installed at", "Name", "Version")+Fore.RESET
print
for item_index in range(length):
pkg = installed_list[item_index]
installed_at = pkg['installed_at']
name, version = re.search(r'(.+?)\-([\d\w\.]*)\.zip', pkg['package'], re.IGNORECASE).groups()
print "[{0:2}] {1:28}{2:28}{3:28}".format((item_index+1), installed_at, name, version)
print
while True:
_input = raw_input("Choose your package number (1: DEFAULT, q: QUIT): ")
if _input == "":
_input = 1
package_index = int(_input)
if 0 < package_index <= length:
pkg = installed_list[package_index-1]
name, version = re.search(r'(.+?)\-([\d\w\.]*)\.zip', pkg['package'], re.IGNORECASE).groups()
print
print Back.RED + " DANGER ZONE " + Back.RESET + " Selected[{0}]".format(Fore.YELLOW + name + ' (' + version + ')' + Fore.RESET)
while True:
confirmation = raw_input("\n\t" + Fore.RED + "{0} ({1})".format(name, version) + Fore.RESET + " is going to be deleted. Are you sure (y,N): ")
if confirmation in ('y', 'Y', 'yes'):
manager.uninstall_package(name, version)
print "Delete action on "+name
break
elif confirmation in ('', 'n', 'N', 'no'):
print "\nOperation is canceled"
print "Hook is quitting"
break
break
except ValueError:
if _input not in ('q', 'quit'):
print "No value was specified"
print "Hook is quitting"
except AttributeError as e:
print e.message
def __cmd_uninstall(self, _packages=None):
if _packages is None or _packages == []:
installed_list = manager.get_installed_packages()
if len(installed_list) > 0:
self.__uninstall_helper_interface(installed_list)
else:
print Fore.YELLOW + "No packages were installed yet" + Fore.RESET
return
item_to_uninstall = _packages[0]
matching_list = manager.match_package(item_to_uninstall)
if len(matching_list) > 0:
print Back.BLUE + " Package(s) matching " + Back.RESET + " ({0})\n".format(item_to_uninstall)
self.__uninstall_helper_interface(manager.match_package(item_to_uninstall))
else:
print Fore.YELLOW + "No package matches " + Fore.RESET + "({0})\n".format(item_to_uninstall)
def __cmd_profile(self):
if not op.is_exits('.hook/workspace_settings.json'):
manager.settings_not_found_error_print()
return
if not op.is_exits("hook.json"):
print "You're not in the working directory. Switch to the working directory and try again"
return
settings = helper.load_json_file('.hook/workspace_settings.json')
print Back.BLUE + " Workspace: " + Back.RESET + " %0.2f MB\n" % op.get_folder_size(os.getcwd())['mb']
print "\t" + Fore.BLUE + "Created at:" + Fore.RESET + " {0}\n".format(settings['created_at'])
print Back.BLUE + " Components: " + Back.RESET + " %0.1f MB\n" % op.get_folder_size('components')['mb']
components = os.listdir('components')
print "\t" + Fore.BLUE+"{0:32}{1:14}{2:14}\n".format("Name", "Size(mb)", "Size(kb)")+Fore.RESET
for item in components:
size = op.get_folder_size('components/' + item)
print "\t" + "{0:32}{1:14}{2:14}".format(item, ("%0.2f" % size['mb']), ("%d" % size['kb']))
def __cmd_home(self, _repository):
url = ''
if helper.is_ssh_url(_repository):
url = 'https://github.com/' + re.search(r':(.+?).git$', _repository, re.IGNORECASE).group(1)
elif helper.is_http_url(_repository):
url = _repository
elif helper.is_repository(_repository):
url = 'https://github.com/' + _repository
if url == '':
print "No proper information was given"
return
cmd_browser = cli_browser.cli_browser()
cmd_browser.setRequestedURL(url)
print Fore.GREEN + 'Requesting' + Fore.RESET + ' -> ' + url
response = cmd_browser.submit(_return_status_code=True)
try:
response_status = int(response)
print Fore.YELLOW + str(response_status) + Fore.RESET + ': ' + cmd_browser.status_code_desc(response_status)
except ValueError as e:
print Fore.GREEN + 'Opening' + Fore.RESET + ' -> ' + url + ' in the default web browser'
op.open_url(url)
def __cache_list(self):
cache_list = op.list_dir(op.get_home() + op.separator() + hook.data_storage_path)
length = len(cache_list)
print Fore.BLUE + "{0:4} {1:35}{2:10}".format("Num", "File name", "Type") + Fore.RESET
print
for index in range(length):
try:
cached_file = cache_list[index]
name, type = re.search(r'(.+?)\.(zip|rar|gzip|bzip2|tar)', cached_file, re.IGNORECASE).groups()
print "[{0:2}] {1:35}{2:10}".format((index+1), name, type)
except Exception:
pass
def __cache_remove(self, _args):
separator = op.separator()
cache_path = op.get_home() + separator + hook.data_storage_path
cache_list = op.list_dir(cache_path)
if len(_args) > 0:
file_name = _args[0]
matching_list = manager.match_package_by_list(cache_list, file_name)
length = len(matching_list)
if length == 0:
print Fore.YELLOW + 'No file matches ' + Fore.RESET + "({0})\n".format(file_name)
return
"""
if length == 1:
file_name = matching_list[0]
print Back.RED + " DANGER ZONE " + Back.RESET
while True:
confirmation = raw_input("\n\t" + Fore.RED + file_name + Fore.RESET + " is going to be deleted. Are you sure (y,N): ")
if confirmation in ('y', 'Y', 'yes'):
op.remove_file(cache_path + separator + file_name)
print "\n" + Fore.YELLOW + file_name + Fore.RESET + " has been deleted"
break
elif confirmation in ('', 'n', 'N', 'no'):
print "\nOperation is canceled"
print "Hook is quitting"
break
return
"""
_input = ''
try:
print Back.BLUE + ' File(s) matching ' + Back.RESET + " ({0})".format(file_name)
print
print Fore.BLUE + "{0:4} {1:30}".format('Num', 'File name') + Fore.RESET
print
for index in range(length):
print "[{0:2}] {1:30}".format((index + 1), matching_list[index])
print
while True:
_input = raw_input("Choose your file number (1: DEFAULT, q: QUIT): ")
if _input == "":
_input = 1
file_index = int(_input)
if 0 < file_index <= length:
file_name = matching_list[(file_index - 1)]
print "\n" + Back.RED + " WARNING " + Back.RESET + " Selected[{0}]".format(Fore.YELLOW + file_name + Fore.RESET)
while True:
confirmation = raw_input("\n\t" + Fore.RED + file_name + Fore.RESET + " is going to be deleted. Are you sure (y,N): ")
if confirmation in ('y', 'Y', 'yes'):
op.remove_file(cache_path + separator + file_name)
print "\n" + Fore.YELLOW + file_name + Fore.RESET + " has been deleted"
break
elif confirmation in ('', 'n', 'N', 'no'):
print "\nOperation is canceled"
print "Hook is quitting"
break
break
except ValueError:
if _input not in ('q', 'quit'):
print "No value was specified"
print "Hook is quitting"
except AttributeError as e:
print e.message
return
_input = ''
try:
length = len(cache_list)
print Fore.BLUE + "{0:4} {1:30}".format('Num', 'File name') + Fore.RESET
print
for index in range(length):
print "[{0:2}] {1:30}".format((index + 1), cache_list[index])
print
while True:
_input = raw_input("Choose your file number (1: DEFAULT, q: QUIT): ")
if _input == "":
_input = 1
file_index = int(_input)
if 0 < file_index <= length:
file_name = cache_list[(file_index - 1)]
print "\n" + Back.RED + " WARNING " + Back.RESET + " Selected[{0}]".format(Fore.YELLOW + file_name + Fore.RESET)
while True:
confirmation = raw_input("\n\t" + Fore.RED + file_name + Fore.RESET + " is going to be deleted. Are you sure (y,N): ")
if confirmation in ('y', 'Y', 'yes'):
op.remove_file(cache_path + separator + file_name)
print "\n" + Fore.YELLOW + file_name + Fore.RESET + " has been deleted"
break
elif confirmation in ('', 'n', 'N', 'no'):
print "\nOperation is canceled"
print "Hook is quitting"
break
break
except ValueError:
if _input not in ('q', 'quit'):
print "No value was specified"
print "Hook is quitting"
except AttributeError as e:
print e.message
def __cache_info(self):
separator = op.separator()
cache_path = op.get_home() + separator + hook.data_storage_path
cache_list = op.list_dir(cache_path)
length = len(cache_list)
print Fore.BLUE + "{0:4} {1:35}{2:8}{3:28}{4:14}{5:14}".format("Num", "File name", "Type", "Downloaded at", "Size(mb)", "Size(kb)") + Fore.RESET
print
for index in range(length):
try:
cached_file = cache_list[index]
name, type = re.search(r'(.+?)\.(zip|rar|gzip|bzip2|tar)', cached_file, re.IGNORECASE).groups()
file_size = op.get_file_size(cache_path + separator + cached_file)
t = os.path.getmtime(cache_path + separator + cached_file) # returns seconds
m = time.strftime("%H:%M:%S - %b, %d %Y", time.gmtime(t))
print "[{0:2}] {1:35}{2:8}{3:28}{4:14}{5:14}".format((index+1), name, type, m, ("%0.2f" % file_size['mb']), ("%d" % file_size['kb']))
except Exception:
pass
def __cache_rename(self, _args):
old_name = _args[0]
new_name = _args[1]
separator = op.separator()
cache_path = op.get_home() + separator + hook.data_storage_path
cache_list = op.list_dir(cache_path)
matching_list = manager.match_package_by_list(cache_list, old_name)
length = len(matching_list)
if length == 0:
print Fore.YELLOW + 'No file matches ' + Fore.RESET + "({0})\n".format(old_name)
return
if length == 1:
old_name = matching_list[0]
oldname_file_extension = manager.get_file_extension(old_name)
newname_file_extension = manager.get_file_extension(new_name)
extension = manager.choose_extension(oldname_file_extension, newname_file_extension)
if extension is not None:
new_name += '.'+extension
op.rename_file(cache_path + separator + old_name, cache_path + separator + new_name)
print Fore.YELLOW + old_name + Fore.RESET + ' renamed -> ' + Fore.GREEN + new_name + Fore.RESET
return
_input = ''
try:
print Back.BLUE + ' File(s) matching ' + Back.RESET + " ({0})".format(old_name)
print
print Fore.BLUE + "{0:4} {1:30}".format('Num', 'File name') + Fore.RESET
print
for index in range(length):
print "[{0:2}] {1:30}".format((index + 1), matching_list[index])
print
while True:
_input = raw_input("Choose your file number (1: DEFAULT, q: QUIT): ")
if _input == "":
_input = 1
file_index = int(_input)
if 0 < file_index <= length:
old_name = matching_list[(file_index - 1)]
print "\n" + Back.RED + " WARNING " + Back.RESET + " Selected[{0}]".format(Fore.YELLOW + old_name + Fore.RESET)
while True:
confirmation = raw_input("\n\tAre you sure (y,N): ")
if confirmation in ('y', 'Y', 'yes'):
oldname_file_extension = manager.get_file_extension(old_name)
newname_file_extension = manager.get_file_extension(new_name)
extension = manager.choose_extension(oldname_file_extension, newname_file_extension)
if extension is not None:
new_name += '.'+extension
op.rename_file(cache_path + separator + old_name, cache_path + separator + new_name)
print "\n" + Fore.YELLOW + old_name + Fore.RESET + ' renamed -> ' + Fore.GREEN + new_name + Fore.RESET
break
elif confirmation in ('', 'n', 'N', 'no'):
print "\nOperation is canceled"
print "Hook is quitting"
break
break
except ValueError:
if _input not in ('q', 'quit'):
print "No value was specified"
print "Hook is quitting"
except AttributeError as e:
print e.message
def __cache_help(self):
print "usage: hook cache <command> [<args>] [<options>]\n"
print "Commands:"
print "{0}{1:19}{2}".format((" " * 2), 'help', "show this help message")
print "{0}{1:19}{2}".format((" " * 2), 'list', "list cached files")
print "{0}{1:19}{2}".format((" " * 2), 'info', "show basic information about cached files")
print "{0}{1:19}{2}".format((" " * 2), 'remove', "remove selected file from cache")
print "{0}{1:19}{2}".format((" " * 2), 'rename', "rename selected cached file")
print "{0}{1:19}{2}".format((" " * 2), 'register', "register package manually in your local cache")
print "{0}{1:19}{2}".format((" " * 2), 'repackage', "modify and repackage an already register package in your cache")
print "{0}{1:19}{2}".format((" " * 2), 'load', "load a package to your current working directory")
def __cache_register(self, _args):
separator = op.separator()
cache_path = op.get_home() + separator + hook.data_storage_path
if len(_args) > 0:
str_list = filter(None, _args[0].split(separator))
_foldername = _args[0]
_zipfilename = str_list[-1]
print _zipfilename
op.compress_folder(_foldername, cache_path + separator + _zipfilename + '.zip')
else:
print Fore.YELLOW + "Not enough arguments" + Fore.RESET
def __cache_repackage(self, _args):
print 'repackage'
def __cache_load(self, _args):
if not op.is_exits('.hook/workspace_settings.json'):
manager.settings_not_found_error_print()
return
separator = op.separator()
cache_path = op.get_home() + separator + hook.data_storage_path
if len(_args) > 0:
_filename = _args[0]
if manager.is_in_cache(_filename + '.zip'):
download_manager = idm.download_manager()
download_manager.load_from_cache(_filename + '.zip', op.get_file_size(cache_path + separator + _filename + '.zip')['kb'])
manager.register_installed_package(_filename + '.zip', _filename + '/' + _filename)
else:
print Fore.YELLOW + _filename + " is not registered in the cache" + Fore.RESET
else:
print Fore.YELLOW + "Not enough arguments" + Fore.RESET
def __cmd_cache(self, cache_cmd):
"""
cache related commands: list, remove, info, rename
"""
if cache_cmd[0] == 'list':
self.__cache_list()
elif cache_cmd[0] == 'remove':
self.__cache_remove(cache_cmd[1:])
elif cache_cmd[0] == 'info':
self.__cache_info()
elif cache_cmd[0] == 'rename':
self.__cache_rename(cache_cmd[1:])
elif cache_cmd[0] == 'help':
self.__cache_help()
elif cache_cmd[0] == 'register':
self.__cache_register(cache_cmd[1:])
elif cache_cmd[0] == 'repackage':
self.__cache_repackage(cache_cmd[1:])
elif cache_cmd[0] == 'load':
self.__cache_load(cache_cmd[1:])
else:
print Back.RED + " Unrecognized command " + Back.RESET
self.__cache_help()
def __cmd_download(self, _args):
browser_object = cli_browser.cli_browser()
browser_connection = browser_object.getHttpConnection()
download_manager = idm.download_manager()
download_manager.plugInBrowserWithDownloadManager(browser_connection)
urls = []
repositories = []
if len(_args) > 0:
repository = ''
version = ''
for arg in _args:
if helper.is_http_url(arg):
info = helper.http_url_package(arg)
repository = info[0] + '/' + info[1]
version = '*'
elif helper.is_ssh_url(arg):
info = helper.ssh_url_package(arg)
repository = info[0] + '/' + info[1]
version = '*'
elif helper.is_repository(arg):
repository = arg
version = '*'
else:
try:
repository, version = arg.split(':')
except ValueError:
print arg + " is not a valid argument"
continue
if version == '*':
browser_object.setRequestedURL('https://github.com/{0}/tags'.format(repository))
response = browser_object.submit()
versions_list = browser_object.parseVersions(response)
if len(versions_list) == 0:
print Back.RED + 'No releases for {0}'.format(repository) + Back.RESET
version = 'master'
else:
version = versions_list[0]
#url = 'https://github.com/fabpot/Twig/archive/v1.16.0.zip'
url = 'https://github.com/{0}/archive/{1}.zip'.format(repository, version)
repositories.append(repository)
#print url
urls.append(url)
download_manager.startQueue(urls, _repositories=repositories, _params={'type': 'download'})
browser_connection.close()
browser_object.closeConnections()
else:
print Fore.YELLOW + "Not enough arguments" + Fore.RESET
def __initCommands(self):
self.__parser.add_argument('commands', nargs="*")
self.__parser.add_argument('self-install', help="Setup working environment of hook it self", nargs="?")
self.__parser.add_argument('init', help="Interactively create a hook.json file", nargs="?")
self.__parser.add_argument('create', help="Create dynamic/static project with a specific package", nargs="*")
self.__parser.add_argument('download', help="Download package in your current working directory", nargs="?")
self.__parser.add_argument('add', help="Add a package(s) to your hook.json", nargs="*")
#self.__parser.add_argument('create', help="Setting up environment for the project", nargs="*")
self.__parser.add_argument("install", help="Install a package(s) locally", nargs="*")
self.__parser.add_argument("search", help="Search for a package by name", nargs="?")
self.__parser.add_argument("list", help="List local packages", nargs="?")
self.__parser.add_argument("info", help="Show information of a particular package", nargs="?")
self.__parser.add_argument("update", help="Update a local package", nargs="?")
self.__parser.add_argument("uninstall", help="Remove a local package", nargs="?")
self.__parser.add_argument("profile", help="Show memory usage of the working directory", nargs="?")
self.__parser.add_argument("home", help="Opens a package homepage into your default browser", nargs="?")
self.__parser.add_argument("cache", help="Manage hook cache", nargs="?")
def __initOptions(self):
#self.__parser.add_argument("-f", "--force", help="Makes various commands more forceful", action="store_true")
self.__parser.add_argument("-j", "--json", help="Output consumable JSON", action="store_true")
self.__parser.add_argument("-i", "--interactive", help="Makes various commands work interactively", action="store_true")
self.__parser.add_argument("-p", "--pversion", help="Tells if you want to get specific version of packages", action="store_true")
self.__parser.add_argument("-s", "--surf", help="Allows you to paginate packages list", action="store_true")
self.__parser.add_argument("-c", "--getversion", help="specify your target version", action="store_true")
self.__parser.add_argument("-b", "--getbootstrap", help="setup web application using twitter bootstrap", action="store_true")
self.__parser.add_argument("-f", "--getfoundation", help="setup web application using zurb foundation", action="store_true")
def __setup_workspace(self, _packages, _settings):
op.create_directory('.hook')
op.create_file('.hook/workspace_settings.json', op.object_to_json(_settings))
#op.hide_directory('.hook')
op.show_directory('.hook')
op.create_directory('components')
op.generate_json_file("hook.json", _packages)
print "Initialized empty HooK workspace in {0}".format(op.get_current_path())
print "Generating hook.json ..."
def __is_workspace_setup(self):
hook_exists = op.is_exits('.hook')
workspace_settings_exists = op.is_exits('.hook/workspace_settings.json')
components_exists = op.is_exits('components')
hook_json = op.is_exits('hook.json')
if hook_exists and workspace_settings_exists and components_exists and hook_json:
return True
return False
def __parseArguments(self):
return self.__parser.parse_args()
def initializeCommandLineTool(self):
self.__initCommands()
self.__initOptions()
return self.__parseArguments()
def logoPrint(self, _logo=''):
init()
if _logo == 'init':
print hook.application_logo
return
print
def execute(self, args):
try:
commands = args.commands
self.logoPrint(commands[0])
if commands[0] == 'init':
if not self.__is_workspace_setup():
self.__cmd_init(args.interactive, args.pversion, commands)
else:
print "Workspace is already setup"
elif commands[0] == 'self-install':
self.__cmd_self_install()
elif commands[0] == 'create':
self.__cmd_create(commands[1:], args.getversion, args.getfoundation, args.getbootstrap)
elif commands[0] == 'download':
self.__cmd_download(commands[1:])
elif commands[0] == 'add':
self.__cmd_add(commands[1:])
elif commands[0] == 'install':
self.__cmd_install()
elif commands[0] == 'search':
self.__cmd_search(commands[1:], args.surf)
elif commands[0] == 'list':
self.__cmd_list()
elif commands[0] == 'info':
self.__cmd_info()
elif commands[0] == 'update':
if not self.__is_workspace_setup():
manager.settings_not_found_error_print()
return
self.__cmd_update(commands[1:])
elif commands[0] == 'uninstall':
if not self.__is_workspace_setup():
manager.settings_not_found_error_print()
return
self.__cmd_uninstall(commands[1:])
elif commands[0] == 'profile':
self.__cmd_profile()
elif commands[0] == 'home':
self.__cmd_home(commands[1])
elif commands[0] == 'cache':
try:
self.__cmd_cache(commands[1:])
except Exception:
print Fore.YELLOW + "Not enough arguments" + Fore.RESET
else:
self.__parser.print_help()
except IndexError:
print "\n" + Back.RED + Fore.WHITE + ' Not enough arguments ' + Fore.RESET + Back.RESET
self.__parser.print_help()
| mit | -1,618,545,090,819,251,700 | 42.119485 | 175 | 0.506245 | false | 4.304037 | false | false | false |
ryfeus/lambda-packs | pytorch/source/caffe2/python/muji_test.py | 2 | 3058 | import numpy as np
import unittest
from caffe2.python import core, workspace, muji, test_util
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
"testblob_gpu_" + str(id),
shape=[1, 2, 3, 4],
value=float(id + 1),
device_option=muji.OnGPU(id)
)
allreduce_function(
net, ["testblob_gpu_" + str(i)
for i in gpu_ids], "_reduced", gpu_ids
)
workspace.RunNetOnce(net)
target_value = sum(gpu_ids) + len(gpu_ids)
all_blobs = workspace.Blobs()
all_blobs.sort()
for blob in all_blobs:
print('{} {}'.format(blob, workspace.FetchBlob(blob)))
for idx in gpu_ids:
blob = workspace.FetchBlob("testblob_gpu_" + str(idx) + "_reduced")
np.testing.assert_array_equal(
blob,
target_value,
err_msg="gpu id %d of %s" % (idx, str(gpu_ids))
)
def testAllreduceFallback(self):
self.RunningAllreduceWithGPUs(
list(range(workspace.NumGpuDevices())), muji.AllreduceFallback
)
def testAllreduceSingleGPU(self):
for i in range(workspace.NumGpuDevices()):
self.RunningAllreduceWithGPUs([i], muji.Allreduce)
def testAllreduceWithTwoGPUs(self):
pattern = workspace.GetGpuPeerAccessPattern()
if pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):
self.RunningAllreduceWithGPUs([0, 1], muji.Allreduce2)
else:
print('Skipping allreduce with 2 gpus. Not peer access ready.')
def testAllreduceWithFourGPUs(self):
pattern = workspace.GetGpuPeerAccessPattern()
if pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
self.RunningAllreduceWithGPUs([0, 1, 2, 3], muji.Allreduce4)
else:
print('Skipping allreduce with 4 gpus. Not peer access ready.')
def testAllreduceWithFourGPUsAndTwoGroups(self):
pattern = workspace.GetGpuPeerAccessPattern()
if pattern.shape[0] >= 4 and np.all(pattern[:2, :2]) and np.all(pattern[2:4, 2:4]):
self.RunningAllreduceWithGPUs([0, 1, 2, 3], muji.Allreduce4Group2)
else:
print('Skipping allreduce with 4 gpus and 2 groups. Not peer access ready.')
def testAllreduceWithEightGPUs(self):
pattern = workspace.GetGpuPeerAccessPattern()
if (
pattern.shape[0] >= 8 and np.all(pattern[:4, :4]) and
np.all(pattern[4:, 4:])
):
self.RunningAllreduceWithGPUs(
list(range(8)), muji.Allreduce8)
else:
print('Skipping allreduce with 8 gpus. Not peer access ready.')
if __name__ == '__main__':
unittest.main()
| mit | -1,122,516,690,096,160,900 | 36.292683 | 91 | 0.584369 | false | 3.684337 | true | false | false |
slmtpz/RED-Game-House | GameSlot.py | 1 | 9811 | from tkinter import *
from Bill import *
from consumption_parser import foods, drinks
import CafeDB as db
class GameSlot(Frame):
def __init__(self, game_slots_frame, slot_number, game_info):
Frame.__init__(self, game_slots_frame, highlightbackground="black", highlightthickness=3)
self.game_info = game_info
self.slot_number = slot_number
self.game_type = game_info['type']
self.game_status = 0
self.number_of_players = 0
self.time_passed_in_sec = IntVar()
self.bill = Bill()
self.clickable_chidren = []
self.set_inner_widgets()
def set_bill(self, bill):
self.bill = bill
self.pay_bill_button.pack(side=RIGHT)
self.add_extra_button.pack(side=LEFT)
self.transact_game_slot_button.pack(side=LEFT)
def set_inner_widgets(self):
self.pack_propagate(0)
self.top_frame = Frame(self)
self.top_frame.pack(side=TOP, fill=X)
self.clickable_chidren.append(self.top_frame)
self.middle_frame = Frame(self)
self.middle_frame.pack(side=BOTTOM, fill=X)
self.clickable_chidren.append(self.middle_frame)
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM)
self.clickable_chidren.append(self.bottom_frame)
self.top_left_frame = Frame(self.top_frame, highlightthickness=1)
self.top_left_frame.pack(side=LEFT)
self.clickable_chidren.append(self.top_left_frame)
self.top_right_frame = Frame(self.top_frame, highlightthickness=1)
self.top_right_frame.pack(side=RIGHT)
self.clickable_chidren.append(self.top_right_frame)
self.middle_left_frame = Frame(self.middle_frame, highlightthickness=1)
self.middle_left_frame.pack(side=LEFT)
self.clickable_chidren.append(self.middle_left_frame)
self.middle_right_frame = Frame(self.middle_frame, highlightthickness=1)
self.middle_right_frame.pack(side=RIGHT)
self.clickable_chidren.append(self.middle_right_frame)
# number_label = Label(self.top_left_frame, text=self.slot_number, font=("Helvetica", 16))
# number_label.pack(side=LEFT)
self.game_slot_name_label = Label(self.top_left_frame, text=self.game_info['name'], font=("Helvetica", 16))
self.game_slot_name_label.pack(side=LEFT)
self.clickable_chidren.append(self.game_slot_name_label)
# game_type_label = Label(self.top_right_frame, text=self.game_type['name'], font=("Helvetica", 16))
# game_type_label.pack(side=RIGHT)
self.number_of_players_var = IntVar(self.middle_left_frame)
self.number_of_players_option = OptionMenu(self.middle_left_frame, self.number_of_players_var, *[1, 2, 3, 4])
self.number_of_players_var.set(2)
self.number_of_players_option.pack(side=LEFT)
self.start_button = Button(self.middle_left_frame, text="Başlat", fg="green", font=("Helvetica", 12))
self.start_button.bind("<Button-1>", self.start)
self.start_button.pack(side=LEFT)
# removed due to customer feedback
# self.change_number_of_players_button = Button(self.middle_left_frame, text="Değiştir", fg="orange", font=("Helvetica", 12))
# self.change_number_of_players_button.bind("<Button-1>", self.change_number_of_players)
self.finish_button = Button(self.middle_right_frame, text="Bitir", fg="red", font=("Helvetica", 12))
self.finish_button.bind("<Button-1>", self.finish)
self.pay_bill_button = Button(self.middle_right_frame, text="Kapat", fg="red", font=("Helvetica", 12))
self.pay_bill_button.bind("<Button-1>", self.pay_bill)
self.game_status_text = StringVar()
self.game_status_text.set(str(self.bill.get_total_charge(self.game_type, self.number_of_players, self.time_passed_in_sec.get())) + " / " + str(self.number_of_players))
self.charge_label = Label(self.bottom_frame, textvariable=self.game_status_text, font=("Helvetica", 26))
self.clickable_chidren.append(self.charge_label)
self.add_extra_button = Button(self.middle_left_frame, text="Ekle", fg="purple", font=("Helvetica", 10))
self.add_extra_button.bind("<Button-1>", self.add_extra)
## ## debug
## self.time_label = Label(self.top_left_frame, textvariable=self.time_passed_in_sec, font=("Helvetica", 16))
## self.time_label.pack(side=LEFT)
## ## debug
def second_hit(self):
if self.game_status == 1:
self.time_passed_in_sec.set(self.time_passed_in_sec.get() + 1)
self.game_status_text.set(str(self.bill.get_total_charge(self.game_type, self.number_of_players, self.time_passed_in_sec.get())) + " / " + str(self.number_of_players))
self.after(1000, self.second_hit)
def set_clicked(self):
self.config(highlightbackground="red")
def set_released(self):
self.config(highlightbackground="black")
def start(self, event):
self.game_status = 1
self.bill.is_active = True
self.bill.startingTime = datetime.datetime.now()
self.set_start_ui()
self.update() # update before delay
self.time_passed_in_sec.set(-1)
self.second_hit() # delay
def set_start_ui(self):
self.charge_label.config(fg="red")
self.number_of_players_option.forget()
self.start_button.pack_forget()
self.number_of_players = self.number_of_players_var.get()
self.game_status_text.set(str(self.bill.get_total_charge(self.game_type, self.number_of_players, self.time_passed_in_sec.get())) + " / " + str(self.number_of_players))
self.charge_label.pack()
self.pay_bill_button.pack_forget()
self.transact_game_slot_button.forget()
self.finish_button.pack(side=RIGHT)
#self.change_number_of_players_button.pack(side=LEFT)
self.add_extra_button.pack(side=LEFT)
# removed due to customer feedback
# def change_number_of_players(self, event):
# self.bill.add_game(self.game_type, self.number_of_players, self.time_passed_in_sec.get())
# self.number_of_players = self.number_of_players_var.get()
# self.time_passed_in_sec.set(0)
# self.game_status_text.set(str(self.bill.get_total_charge(self.game_type, self.number_of_players, self.time_passed_in_sec.get())) + " / " + str(self.number_of_players))
def finish(self, event):
self.bill.add_game(self.game_type, self.number_of_players, self.time_passed_in_sec.get())
self.game_status = 0
self.number_of_players = 0
self.set_finish_ui()
def set_finish_ui(self):
self.charge_label.config(fg="black")
self.finish_button.pack_forget()
self.pay_bill_button.pack(side=RIGHT)
#self.change_number_of_players_button.forget()
self.game_status_text.set(self.bill.total_charge)
self.add_extra_button.forget()
self.number_of_players_option.pack(side=LEFT)
self.after(1000, self.start_button.pack(side=LEFT))
self.add_extra_button.pack(side=LEFT)
self.transact_game_slot_button.pack(side=LEFT)
def pay_bill(self, event):
self.set_pay_bill_ui()
self.bill.endingTime = datetime.datetime.now()
db.saveBill(self.bill)
self.bill = Bill()
def set_pay_bill_ui(self):
self.pay_bill_button.pack_forget()
self.charge_label.pack_forget()
self.add_extra_button.forget()
self.transact_game_slot_button.forget()
def add_extra(self, event):
menu = Toplevel()
menu.title("Ekle...")
row = 0
column = 0
for extra in foods:
# PRINT(extra['name'] + " row: " + str(row) + " column: " + str(column))
food_button = Button(menu, text=extra['name']+": "+str(extra['charge']), anchor=W, font=("Helvetica", 8), command=lambda extra=extra:self.bill.add_extra(extra))
food_button.grid(row=row, column=column, sticky=W+E+S+N)
row += 1
if row > 20:
row = 0
column += 1
row = 0
column += 1
for extra in drinks:
drink_button = Button(menu, text=extra['name']+": "+str(extra['charge']), anchor=W, font=("Helvetica", 8), command=lambda extra=extra:self.bill.add_extra(extra))
drink_button.grid(row=row, column=column, sticky=W+E+S+N)
row += 1
if row > 20:
row = 1
column += 1
add_other_button = Button(menu, text="Başka...", fg="blue", font=("Helvetica", 12), command=self.add_other_popup)
add_other_button.grid(columnspan=2, sticky=W+E+S+N)
quit_button = Button(menu, text="Kapat", fg="red", font=("Helvetica", 12), command=menu.destroy)
quit_button.grid(columnspan=2, sticky=W+E+S+N)
def add_other_popup(self):
menu = Toplevel()
menu.title("Başka ekle...")
desc_label = Label(menu, text="Açıklama:")
charge_label = Label(menu, text="Ücret:")
description = StringVar()
desc_entry = Entry(menu, textvariable=description)
charge = DoubleVar()
charge_entry = Entry(menu, textvariable=charge)
desc_label.grid(row=0)
charge_label.grid(row=1)
desc_entry.grid(row=0, column=1)
charge_entry.grid(row=1, column=1)
submit_button = Button(menu, text="Ekle", command=lambda: self.bill.add_other(description.get(), float(charge.get())))
submit_button.grid(columnspan=2)
| apache-2.0 | -3,954,311,048,807,907,000 | 42.559091 | 179 | 0.615832 | false | 3.310706 | false | false | false |
tensorflow/models | official/modeling/progressive/utils.py | 1 | 1987 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util classes and functions."""
from absl import logging
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.training.tracking import tracking
class VolatileTrackable(tracking.AutoTrackable):
"""A util class to keep Trackables that might change instances."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def reassign_trackable(self, **kwargs):
for k, v in kwargs.items():
delattr(self, k) # untrack this object
setattr(self, k, v) # track the new object
class CheckpointWithHooks(tf.train.Checkpoint):
"""Same as tf.train.Checkpoint but supports hooks.
In progressive training, use this class instead of tf.train.Checkpoint.
Since the network architecture changes during progressive training, we need to
prepare something (like switch to the correct architecture) before loading the
checkpoint. This class supports a hook that will be executed before checkpoint
loading.
"""
def __init__(self, before_load_hook, **kwargs):
self._before_load_hook = before_load_hook
super(CheckpointWithHooks, self).__init__(**kwargs)
# override
def read(self, save_path, options=None):
self._before_load_hook(save_path)
logging.info('Ran before_load_hook.')
super(CheckpointWithHooks, self).read(save_path=save_path, options=options)
| apache-2.0 | -2,801,626,757,163,497,500 | 34.482143 | 80 | 0.736789 | false | 3.974 | false | false | false |
ITDevLtd/MCVirt | source/mcvirt-daemon/usr/lib/python2.7/dist-packages/mcvirt/parser.py | 1 | 13854 | """Provides argument parser."""
# Copyright (c) 2014 - I.T. Dev Ltd
#
# This file is part of MCVirt.
#
# MCVirt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# MCVirt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCVirt. If not, see <http://www.gnu.org/licenses/>
import argparse
import os
from mcvirt.exceptions import (ArgumentParserException,
AuthenticationError)
from mcvirt.client.rpc import Connection
from mcvirt.system import System
from mcvirt.parser_modules.virtual_machine.start_parser import StartParser
from mcvirt.parser_modules.virtual_machine.stop_parser import StopParser
from mcvirt.parser_modules.virtual_machine.reset_parser import ResetParser
from mcvirt.parser_modules.virtual_machine.shutdown_parser import ShutdownParser
from mcvirt.parser_modules.virtual_machine.create_parser import CreateParser
from mcvirt.parser_modules.virtual_machine.delete_parser import DeleteParser
from mcvirt.parser_modules.clear_method_lock_parser import ClearMethodLockParser
from mcvirt.parser_modules.iso_parser import IsoParser
from mcvirt.parser_modules.virtual_machine.register_parser import RegisterParser
from mcvirt.parser_modules.virtual_machine.unregister_parser import UnregisterParser
from mcvirt.parser_modules.virtual_machine.update_parser import UpdateParser
from mcvirt.parser_modules.virtual_machine.migrate_parser import MigrateParser
from mcvirt.parser_modules.virtual_machine.info_parser import InfoParser
from mcvirt.parser_modules.permission_parser import PermissionParser
from mcvirt.parser_modules.network_parser import NetworkParser
from mcvirt.parser_modules.hard_drive_parser import HardDriveParser
from mcvirt.parser_modules.group_parser import GroupParser
from mcvirt.parser_modules.user_parser import UserParser
from mcvirt.parser_modules.virtual_machine.list_parser import ListParser
from mcvirt.parser_modules.virtual_machine.duplicate_parser import DuplicateParser
from mcvirt.parser_modules.virtual_machine.clone_parser import CloneParser
from mcvirt.parser_modules.virtual_machine.move_parser import MoveParser
from mcvirt.parser_modules.cluster_parser import ClusterParser
from mcvirt.parser_modules.storage_parser import StorageParser
from mcvirt.parser_modules.node_parser import NodeParser
from mcvirt.parser_modules.verify_parser import VerifyParser
from mcvirt.parser_modules.resync_parser import ResyncParser
from mcvirt.parser_modules.drbd_parser import DrbdParser
from mcvirt.parser_modules.virtual_machine.backup_parser import BackupParser
from mcvirt.parser_modules.virtual_machine.lock_parser import LockParser
from mcvirt.parser_modules.watchdog_parser import WatchdogParser
class ThrowingArgumentParser(argparse.ArgumentParser):
"""Override the ArgumentParser class, in order to change the handling of errors."""
def error(self, message):
"""Override the error function."""
# Force the argument parser to throw an MCVirt exception on error.
print '\nError: %s\n' % message
self.print_help()
raise ArgumentParserException(message)
class Parser(object):
"""Provide an argument parser for MCVirt."""
AUTH_FILE = '.mcvirt-auth'
def __init__(self, verbose=True):
"""Configure the argument parser object."""
self.print_output = []
self.username = None
self.session_id = None
self.rpc = None
self.auth_cache_file = os.getenv('HOME') + '/' + self.AUTH_FILE
self.verbose = verbose
self.parent_parser = ThrowingArgumentParser(add_help=False)
self.global_option = self.parent_parser.add_argument_group('Global optional arguments')
self.global_option.add_argument('--username', '-U', dest='username',
help='MCVirt username')
self.global_option.add_argument('--password', dest='password',
help='MCVirt password')
self.global_option.add_argument('--cache-credentials', dest='cache_credentials',
action='store_true',
help=('Store the session ID, so it can be used for '
'multiple MCVirt calls.'))
self.global_option.add_argument('--ignore-failed-nodes', dest='ignore_failed_nodes',
help='Ignores nodes that are inaccessible',
action='store_true')
self.global_option.add_argument('--accept-failed-nodes-warning',
dest='accept_failed_nodes_warning',
help=argparse.SUPPRESS, action='store_true')
self.global_option.add_argument('--ignore-drbd', dest='ignore_drbd',
help='Ignores Drbd state', action='store_true')
argparser_description = "\nMCVirt - Managed Consistent Virtualisation\n\n" + \
'Manage the MCVirt host'
argparser_epilog = "\nFor more information, see http://mcvirt.itdev.co.uk\n"
# Create an argument parser object
self.parser = ThrowingArgumentParser(description=argparser_description,
epilog=argparser_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.subparsers = self.parser.add_subparsers(dest='action', metavar='Action',
help='Action to perform')
# Add arguments for starting a VM
StartParser(self.subparsers, self.parent_parser)
# Add arguments for stopping a VM
StopParser(self.subparsers, self.parent_parser)
# Add arguments for resetting a VM
ResetParser(self.subparsers, self.parent_parser)
# Add arguments for shutting down a VM
ShutdownParser(self.subparsers, self.parent_parser)
# Add arguments for fixing deadlock on a vm
ClearMethodLockParser(self.subparsers, self.parent_parser)
# Add arguments for ISO functions
IsoParser(self.subparsers, self.parent_parser)
# Add arguments for managing users
UserParser(self.subparsers, self.parent_parser)
# Add arguments for creating a VM
CreateParser(self.subparsers, self.parent_parser)
# Get arguments for deleting a VM
DeleteParser(self.subparsers, self.parent_parser)
RegisterParser(self.subparsers, self.parent_parser)
UnregisterParser(self.subparsers, self.parent_parser)
# Get arguments for updating a VM
UpdateParser(self.subparsers, self.parent_parser)
PermissionParser(self.subparsers, self.parent_parser)
GroupParser(self.subparsers, self.parent_parser)
# Create subparser for network-related commands
NetworkParser(self.subparsers, self.parent_parser)
# Get arguments for getting VM information
InfoParser(self.subparsers, self.parent_parser)
# Get arguments for listing VMs
ListParser(self.subparsers, self.parent_parser)
# Get arguments for cloning a VM
CloneParser(self.subparsers, self.parent_parser)
# Get arguments for cloning a VM
DuplicateParser(self.subparsers, self.parent_parser)
# Get arguments for migrating a VM
MigrateParser(self.subparsers, self.parent_parser)
# Create sub-parser for moving VMs
MoveParser(self.subparsers, self.parent_parser)
# Create sub-parser for cluster-related commands
ClusterParser(self.subparsers, self.parent_parser)
StorageParser(self.subparsers, self.parent_parser)
HardDriveParser(self.subparsers, self.parent_parser)
# Create subparser for commands relating to the local node configuration
NodeParser(self.subparsers, self.parent_parser)
# Create sub-parser for VM verification
VerifyParser(self.subparsers, self.parent_parser)
# Create sub-parser for VM Disk resync
ResyncParser(self.subparsers, self.parent_parser)
# Create sub-parser for Drbd-related commands
DrbdParser(self.subparsers, self.parent_parser)
# Create sub-parser for watchdog
WatchdogParser(self.subparsers, self.parent_parser)
# Create sub-parser for backup commands
BackupParser(self.subparsers, self.parent_parser)
# Create sub-parser for managing VM locks
LockParser(self.subparsers, self.parent_parser)
self.exit_parser = self.subparsers.add_parser('exit', help='Exits the MCVirt shell',
parents=[self.parent_parser])
def print_status(self, status):
"""Print if the user has specified that the parser should print statuses."""
if self.verbose:
print status
else:
self.print_output.append(status)
def check_ignore_failed(self, args):
"""Check ignore failed."""
if args.ignore_failed_nodes:
# If the user has specified to ignore the cluster,
# print a warning and confirm the user's answer
if not args.accept_failed_nodes_warning:
self.print_status(('WARNING: Running MCVirt with --ignore-failed-nodes'
' can leave the cluster in an inconsistent state!'))
continue_answer = System.getUserInput('Would you like to continue? (Y/n): ')
if continue_answer.strip() is not 'Y':
self.print_status('Cancelled...')
return
return True
return False
def authenticate_saved_session(self, ignore_cluster):
"""Attempt to authenticate using saved session."""
# Try logging in with saved session
auth_session = None
try:
with open(self.auth_cache_file, 'r') as cache_fh:
auth_username = cache_fh.readline().strip()
auth_session = cache_fh.readline().strip()
except IOError:
pass
if auth_session:
try:
self.rpc = Connection(username=auth_username, session_id=auth_session,
ignore_cluster=ignore_cluster)
self.session_id = self.rpc.session_id
self.username = self.rpc.username
except AuthenticationError:
# If authentication fails with cached session,
# print error, attempt to remove sessionn file and
# remove rpc connection
self.print_status('Authentication error occured when using saved session.')
try:
os.remove(self.auth_cache_file)
except OSError:
pass
self.rpc = None
def authenticate_username_password(self, args, ignore_cluster):
"""Authenticate using username and password."""
# Check if user/password have been passed. Else, ask for them.
username = args.username if args.username else System.getUserInput(
'Username: '
).rstrip()
if args.password:
password = args.password
else:
password = System.getUserInput(
'Password: ', password=True
).rstrip()
self.rpc = Connection(username=username, password=password,
ignore_cluster=ignore_cluster)
self.session_id = self.rpc.session_id
self.username = self.rpc.username
def store_cached_session(self, args):
"""Store session details in temporary file"""
# If successfully authenticated then store session ID and username in auth file
if args.cache_credentials:
try:
with open(self.auth_cache_file, 'w') as cache_fh:
cache_fh.write("%s\n%s" % (self.rpc.username, self.rpc.session_id))
except OSError:
pass
def parse_arguments(self, script_args=None):
"""Parse arguments and performs actions based on the arguments."""
# If arguments have been specified, split, so that
# an array is sent to the argument parser
if script_args is not None:
script_args = script_args.split()
args = self.parser.parse_args(script_args)
ignore_cluster = self.check_ignore_failed(args)
if self.session_id and self.username:
self.rpc = Connection(username=self.username, session_id=self.session_id,
ignore_cluster=ignore_cluster)
else:
# Obtain connection to Pyro server
if not (args.password or args.username):
self.authenticate_saved_session(ignore_cluster)
if not self.rpc:
self.authenticate_username_password(args, ignore_cluster)
self.store_cached_session(args)
if args.ignore_drbd:
self.rpc.ignore_drbd()
# If a custom parser function has been defined, used this and exit
# instead of running through (old) main parser workflow
if 'func' in dir(args):
args.func(args=args, p_=self)
else:
raise ArgumentParserException('No handler registered for parser')
| gpl-2.0 | -3,621,780,881,024,561,700 | 43.121019 | 98 | 0.650931 | false | 4.482045 | false | false | false |
anitahitouch/mediadrop | mediadrop/lib/js_delivery.py | 10 | 6049 | # -*- coding: utf-8 -*-
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code in this file is dual licensed under the MIT license or
# the GPLv3 or (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from decimal import Decimal
import simplejson
from simplejson.encoder import JSONEncoderForHTML
from sqlalchemy.orm.properties import NoneType
__all__ = ['InlineJS', 'Script', 'Scripts']
class Script(object):
def __init__(self, url, async=False, key=None):
self.url = url
self.async = async
self.key = key
def render(self):
async = self.async and ' async="async"' or ''
return '<script src="%s"%s type="text/javascript"></script>' % (self.url, async)
def __unicode__(self):
return self.render()
def __repr__(self):
return 'Script(%r, async=%r, key=%r)' % (self.url, self.async, self.key)
def __eq__(self, other):
# please note that two Script instances are considered equal when they
# point to the same URL. The async attribute is not checked, let's not
# include the same source code twice.
if not hasattr(other, 'url'):
return False
return self.url == other.url
def __ne__(self, other):
return not (self == other)
class InlineJS(object):
def __init__(self, code, key=None, params=None):
self.code = code
self.key = key
self.params = params
def as_safe_json(self, s):
return simplejson.dumps(s, cls=JSONEncoderForHTML)
def _escaped_parameters(self, params):
escaped_params = dict()
for key, value in params.items():
if isinstance(value, (bool, NoneType)):
# this condition must come first because "1 == True" in Python
# but "1 !== true" in JavaScript and the "int" check below
# would pass True unmodified
escaped_params[key] = self.as_safe_json(value)
elif isinstance(value, (int, long, float)):
# use these numeric values directly as format string
# parameters - they are mapped to JS types perfectly and don't
# need any escaping.
escaped_params[key] = value
elif isinstance(value, (basestring, dict, tuple, list, Decimal)):
escaped_params[key] = self.as_safe_json(value)
else:
klassname = value.__class__.__name__
raise ValueError('unknown type %s' % klassname)
return escaped_params
def render(self):
js = self.code
if self.params is not None:
js = self.code % self._escaped_parameters(self.params)
return '<script type="text/javascript">%s</script>' % js
def __unicode__(self):
return self.render()
def __repr__(self):
return 'InlineJS(%r, key=%r)' % (self.code, self.key)
def __eq__(self, other):
# extremely simple equality check: two InlineJS instances are equal if
# the code is exactly the same! No trimming of whitespaces or any other
# analysis is done.
if not hasattr(other, 'render'):
return False
return self.render() == other.render()
def __ne__(self, other):
return not (self == other)
class SearchResult(object):
def __init__(self, item, index):
self.item = item
self.index = index
class ResourcesCollection(object):
def __init__(self, *args):
self._resources = list(args)
def replace_resource_with_key(self, new_resource):
result = self._find_resource_with_key(new_resource.key)
if result is None:
raise AssertionError('No script with key %r' % new_resource.key)
self._resources[result.index] = new_resource
def render(self):
markup = u''
for resource in self._resources:
markup = markup + resource.render()
return markup
def __len__(self):
return len(self._resources)
# --- internal api ---------------------------------------------------------
def _get(self, resource):
result = self._find_resource(resource)
if result is not None:
return result
raise AssertionError('Resource %r not found' % resource)
def _get_by_key(self, key):
result = self._find_resource_with_key(key)
if result is not None:
return result
raise AssertionError('No script with key %r' % key)
def _find_resource(self, a_resource):
for i, resource in enumerate(self._resources):
if resource == a_resource:
return SearchResult(resource, i)
return None
def _find_resource_with_key(self, key):
for i, resource in enumerate(self._resources):
if resource.key == key:
return SearchResult(resource, i)
return None
class Scripts(ResourcesCollection):
def add(self, script):
if script in self._resources:
if not hasattr(script, 'async'):
return
# in case the same script is added twice and only one should be
# loaded asynchronously, use the non-async variant to be on the safe
# side
older_script = self._get(script).item
older_script.async = older_script.async and script.async
return
self._resources.append(script)
def add_all(self, *scripts):
for script in scripts:
self.add(script)
# --- some interface polishing ---------------------------------------------
@property
def scripts(self):
return self._resources
def replace_script_with_key(self, script):
self.replace_resource_with_key(script)
| gpl-3.0 | -4,727,133,492,058,727,000 | 33.565714 | 88 | 0.579435 | false | 4.326896 | false | false | false |
tux-mind/platform_external_subversion | tools/dev/gen-javahl-errors.py | 5 | 2892 | #!/usr/bin/env python
#
# gen-javahl-errors.py: Generate a Java class containing an enum for the
# C error codes
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
import sys, os
try:
from svn import core
except ImportError, e:
sys.stderr.write("ERROR: Unable to import Subversion's Python bindings: '%s'\n" \
"Hint: Set your PYTHONPATH environment variable, or adjust your " \
"PYTHONSTARTUP\nfile to point to your Subversion install " \
"location's svn-python directory.\n" % e)
sys.stderr.flush()
sys.exit(1)
def get_errors():
errs = {}
for key in vars(core):
if key.find('SVN_ERR_') == 0:
try:
val = int(vars(core)[key])
errs[val] = key
except:
pass
return errs
def gen_javahl_class(error_codes, output_filename):
jfile = open(output_filename, 'w')
jfile.write(
"""/** ErrorCodes.java - This file is autogenerated by gen-javahl-errors.py
*/
package org.tigris.subversion.javahl;
/**
* Provide mappings from error codes generated by the C runtime to meaningful
* Java values. For a better description of each error, please see
* svn_error_codes.h in the C source.
*/
public class ErrorCodes
{
""")
keys = sorted(error_codes.keys())
for key in keys:
# Format the code name to be more Java-esque
code_name = error_codes[key][8:].replace('_', ' ').title().replace(' ', '')
code_name = code_name[0].lower() + code_name[1:]
jfile.write(" public static final int %s = %d;\n" % (code_name, key))
jfile.write("}\n")
jfile.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
output_filename = sys.argv[1]
else:
output_filename = os.path.join('..', '..', 'subversion', 'bindings',
'javahl', 'src', 'org', 'tigris',
'subversion', 'javahl', 'ErrorCodes.java')
gen_javahl_class(get_errors(), output_filename)
| apache-2.0 | 6,232,930,087,033,425,000 | 32.627907 | 86 | 0.606846 | false | 3.810277 | false | false | false |
USGSDenverPychron/pychron | pychron/pipeline/plot/layout.py | 1 | 1316 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Str, Int
# ============= standard library imports ========================
# ============= local library imports ==========================
class LayoutItem(HasTraits):
row = Int
column = Int
kind = Str
identifier = Str
class FigureLayout(HasTraits):
rows = Int(1)
columns = Int(2)
fixed = Str('cols')
def add_item(self, kind):
self.items.append(LayoutItem(kind=kind))
# ============= EOF =============================================
| apache-2.0 | -103,423,669,881,972,930 | 33.631579 | 81 | 0.535714 | false | 4.768116 | false | false | false |
jenfly/monsoon-onset | testing/testing-indices-onset_HOWI.py | 1 | 3804 | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import atmos as atm
import merra
from indices import onset_HOWI, summarize_indices, plot_index_years
# ----------------------------------------------------------------------
# Compute HOWI indices (Webster and Fasullo 2003)
datadir = atm.homedir() + 'datastore/merra/daily/'
datafile = datadir + 'merra_vimt_ps-300mb_apr-sep_1979-2014.nc'
lat1, lat2 = -20, 30
lon1, lon2 = 40, 100
with xray.open_dataset(datafile) as ds:
uq_int = ds['uq_int'].load()
vq_int = ds['vq_int'].load()
npts = 100
#npts = 50
pre_days = 'May 18-24'
post_days = 'June 8-14'
namestr = 'HOWI_%dpts_' % npts
exts = ['png', 'eps']
isave = True
howi, ds = onset_HOWI(uq_int, vq_int, npts)
# ----------------------------------------------------------------------
# MAPS
# ----------------------------------------------------------------------
# Plot climatological VIMT composites
lat = atm.get_coord(ds, 'lat')
lon = atm.get_coord(ds, 'lon')
x, y = np.meshgrid(lon, lat)
axlims = (lat1, lat2, lon1, lon2)
plt.figure(figsize=(12,10))
plt.subplot(221)
m = atm.init_latlon(lat1, lat2, lon1, lon2)
m.quiver(x, y, ds['uq_bar_pre'], ds['vq_bar_pre'])
plt.title(pre_days + ' VIMT Climatology')
plt.subplot(223)
m = atm.init_latlon(lat1, lat2, lon1, lon2)
m.quiver(x, y, ds['uq_bar_post'], ds['vq_bar_post'])
plt.title(post_days + ' VIMT Climatology')
# Plot difference between pre- and post- composites
plt.subplot(222)
m = atm.init_latlon(lat1, lat2, lon1, lon2)
#m, _ = atm.pcolor_latlon(ds['vimt_bar_diff'], axlims=axlims, cmap='hot_r')
m.quiver(x, y, ds['uq_bar_diff'], ds['vq_bar_diff'])
plt.title(post_days + ' minus ' + pre_days + ' VIMT Climatology')
# Top N difference vectors
plt.subplot(224)
m, _ = atm.pcolor_latlon(ds['vimt_bar_diff_masked'],axlims=axlims, cmap='hot_r')
plt.title('Magnitude of Top %d Difference Fluxes' % npts)
# Plot vector VIMT fluxes for a few individual years
ylist = [0, 1, 2, 3]
plt.figure(figsize=(12, 10))
for yr in ylist:
plt.subplot(2, 2, yr + 1)
m = atm.init_latlon(lat1, lat2, lon1, lon2)
m.quiver(x, y, ds['uq'][yr].mean(dim='day'), ds['vq'][yr].mean(dim='day'))
m.contour(x, y, ds['mask'].astype(float), [0.99], colors='red')
plt.title('%d May-Sep VIMT Fluxes' % ds.year[yr])
# ----------------------------------------------------------------------
# TIMESERIES
# ----------------------------------------------------------------------
onset = howi.onset
retreat = howi.retreat
length = retreat - onset
days = howi.day
years = howi.year.values
yearstr = '%d-%d' % (years[0], years[-1])
nroll = howi.attrs['nroll']
# Timeseries with and without rolling mean
def index_tseries(days, ind, ind_roll, titlestr):
plt.plot(days, ind, label='daily')
plt.plot(days, ind_roll, label='%d-day rolling' % nroll)
plt.grid()
plt.legend(loc='lower right')
plt.title(titlestr)
plt.figure(figsize=(12, 10))
plt.subplot(221)
index_tseries(days, ds.howi_clim_norm, ds.howi_clim_norm_roll,
'HOWI ' + yearstr + ' Climatology')
for yr in [0, 1, 2]:
plt.subplot(2, 2, yr + 2)
index_tseries(days, ds.howi_norm[yr], ds.howi_norm_roll[yr],
'HOWI %d' % years[yr])
# ----------------------------------------------------------------------
# Onset and retreat indices
summarize_indices(years, onset, retreat, 'HOWI')
# ----------------------------------------------------------------------
# Plot timeseries of each year
plot_index_years(howi)
# ----------------------------------------------------------------------
# Save figures
if isave:
for ext in exts:
atm.savefigs(namestr, ext)
| mit | -3,972,350,699,173,093,000 | 31.512821 | 80 | 0.564932 | false | 2.840926 | false | false | false |
pycroscopy/pycroscopy | pycroscopy/simulation/afm_lib.py | 1 | 33619 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 04 09:21:54 2017
@author: Enrique Alejandro
Description: this library contains the core algorithms for numerical simulations
You need to have installed:
numba -- > this can be easily installed if you have the anaconda distribution via pip install method: pip install numba
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from numba import jit
import sys
sys.path.append('d:\github\pycroscopy')
from pycroscopy.simulation.afm_calculations import amp_phase, e_diss, v_ts
def verlet(zb, Fo1, Fo2, Fo3, Q1, Q2, Q3, k_L1, k_L2, k_L3, time, z1, z2,z3, v1,v2,v3, z1_old, z2_old, z3_old, Fts, dt,
fo1, fo2, fo3, f1, f2, f3):
"""This function performs verlet algorithm (central difference) for numerical integration.
It integrates the differential equations of three harmonic oscillator equations (each corresponding to a distinct
cantilever eigenmode)
This function does not assume ideal Euler-Bernoulli scaling but instead the cantilever parameters are passed to the
function
The dynamics of the cantilever are assumed to be contained in the first three flexural modes
This function will be called each simulation timestep by a main wrap around function which will contain the specific
contact-mechanics model.
Parameters:
----------
zb : float
z equilibrium position (average tip postion with respect to the sample)
Fo1 : float
amplitude of the sinuosidal excitation force term (driving force) for the first eigenmode
Fo2 : float
amplitude of the sinuosidal excitation force term (driving force) for the second eigenmode
Fo3 : float
amplitude of the sinuosidal excitation force term (driving force) for the third eigenmode
Q1 : float
first eigenmode's quality factor
Q2 : float
second eigenmode's quality factor
Q3 : float
third eigenmode's quality factor
k_L1 : float
1st eigenmode's stiffness
k_L2 : float
2nd eigenmode's stiffness
k_L3 : float
3rd eigenmode's stiffness
z1 : float
instant 1st eigenmode deflection contribution
z2 : float
instant 2nd eigenmode deflection contribution
z3 : float
instant 3rd eigenmode deflection contribution
v1 : float
instant 1st eigenmode velocity
v2 : float
instant 2nd eigenmode velocity
v3 : float
instant 3rd eigenmode velocity
z1_old : float
instant 1st eigenmode deflection contribution corresponding to previous timestep
z2_old : float
instant 2nd eigenmode deflection contribution corresponding to previous timestep
z3_old : float
instant 3rd eigenmode deflection contribution corresponding to previous timestep
Fts : float
tip-sample interacting force
dt : float
simulation timestep
fo1 : float
1st eigenmode resonance frequency
fo2 : float
2nd eigenmode resonance frequency
fo3 : float
3rd eigenmode resonance frequency
f1 : float
1st sinusoidal excitation frequency
f2 : float
2nd sinusoidal excitation frequency
f3 : float
3rd sinusoidal excitation frequency
Returns:
-------
tip: float
instant tip position for new simulation timestep
z1 : float
instant 1st eigenmode deflection contribution for new simulation timestep
z2 : float
instant 2nd eigenmode deflection contribution for new simulation timestep
z3 : float
instant 3rd eigenmode deflection contribution for new simulation timestep
v1 : float
instant 1st eigenmode velocity for new simulation timestep
v2 : float
instant 2nd eigenmode velocity for new simulation timestep
v3 : float
instant 3rd eigenmode velocity for new simulation timestep
z1_old : float
instant 1st eigenmode deflection contribution corresponding to current timestep
z2_old : float
instant 2nd eigenmode deflection contribution corresponding to current timestep
z3_old : float
instant 3rd eigenmode deflection contribution corresponding to current timestep
"""
# TODO: Simplify inputs and outputs for this function. Consider wrapping up parameters for each eignenmode into an object or use lists for all k, Q, fo, etc.
a1 = ( -z1 - v1/(Q1*(fo1*2*np.pi)) + ( Fo1*np.cos((f1*2*np.pi)*time) + Fo2*np.cos((f2*2*np.pi)*time) + Fo3*np.cos((f3*2*np.pi)*time) + Fts)/k_L1 )* (fo1*2.0*np.pi)**2
a2 = ( -z2 - v2/(Q2*(fo2*2*np.pi)) + ( Fo1*np.cos((f1*2*np.pi)*time) + Fo2*np.cos((f2*2*np.pi)*time) + Fo3*np.cos((f3*2*np.pi)*time) + Fts)/k_L2 )* (fo2*2.0*np.pi)**2
a3 = ( -z3 - v3/(Q3*(fo3*2*np.pi)) + ( Fo1*np.cos((f1*2*np.pi)*time) + Fo2*np.cos((f2*2*np.pi)*time) + Fo3*np.cos((f3*2*np.pi)*time) + Fts)/k_L3 )* (fo3*2.0*np.pi)**2
# Verlet algorithm (central difference) to calculate position of the tip
z1_new = 2*z1 - z1_old + a1*pow(dt, 2)
z2_new = 2*z2 - z2_old + a2*pow(dt, 2)
z3_new = 2*z3 - z3_old + a3*pow(dt, 2)
# central difference to calculate velocities
v1 = (z1_new - z1_old)/(2*dt)
v2 = (z2_new - z2_old)/(2*dt)
v3 = (z3_new - z3_old)/(2*dt)
# Updating z1_old and z1 for the next run
z1_old = z1
z1 = z1_new
z2_old = z2
z2 = z2_new
z3_old = z3
z3 = z3_new
tip = z1 + z2 + z3 + zb
return tip, z1, z2, z3, v1, v2, v3, z1_old, z2_old, z3_old
numba_verlet = jit()(verlet) #it is important to keep this line out of the effectively accelerate the function when called
def gen_maxwell_lr(G, tau, R, dt, startprint, simultime, fo1, fo2, fo3, k_m1, k_m2, k_m3, A1, A2, A3, zb, printstep = 1, Ge = 0.0, Q1=100, Q2=200, Q3=300, H=2.0e-19):
"""This function is designed for multifrequency simulation performed over a Generalized Maxwell (Wiechert) viscoelastic surface.
The contact mechanics are performed over the framework of Lee and Radok (Lee, E. Ho, and Jens Rainer Maria Radok. "The contact problem for viscoelastic bodies." Journal of Applied Mechanics 27.3 (1960): 438-444.)
The cantilever dynamics are assumed to be contained in the first three eigenmodes.
The numerical integration is performed with the aid of the verlet function(defined above)
Parameters:
----------
G : numpy.ndarray
moduli of the springs in the Maxwell arms of a generalized Maxwell model (also called Wiechert model)
tau: numpy.ndarray
relaxation times of the Maxwell arms
R : float
tip radius
dt : float
simulation timestep
fo1 : float
1st eigenmode resonance frequency
fo2 : float
2nd eigenmode resonance frequency
fo3 : float
3rd eigenmode resonance frequency
k_m1 : float
1st eigenmode's stiffness
k_m2 : float
2nd eigenmode's stiffness
k_m3 : float
3rd eigenmode's stiffness
A1 : float
target oscillating amplitude of 1st cantilever eigenmode
A2 : float
target oscillating amplitude of 2nd cantilever eigenmode
A3 : float
target oscillating amplitude of 3rd cantilever eigenmode
zb : float
cantilever equilibrium position (average tip-sample distance)
printstep : float, optional
how often the data will be stored, default is timestep
Ge : float, optional
rubbery modulus, the default value is zero
Q1 : float, optional
first eigenmode's quality factor
Q2 : float, optional
second eigenmode's quality factor
Q3 : float, optional
third eigenmode's quality factor
H : float, optional
Hammaker constant
Returns:
-------
np.array(t_a) : numpy.ndarray
time trace
np.array(tip_a) : numpy.ndarray
array containing the tip trajectory
np.array(Fts_a) : numpy.ndarray
array containing the tip-sample interacting force
np.array(xb_a) : numpy.ndarray
numpy array containing the instant position of the viscoelastic surface
"""
# TODO: Simplify inputs for this function. Consider useing lists for all k, Q, fo, etc.
G_a = []
tau_a = []
"""
this for loop is to make sure tau passed does not contain values lower than time step which would make numerical
integration unstable
"""
for i in range(len(G)):
if tau[i] > dt*10.0:
G_a.append(G[i])
tau_a.append(tau[i])
G = np.array(G_a)
tau = np.array(tau_a)
f1 = fo1
f2 = fo2
f3 = fo3
"""
Calculating the force amplitude to achieve the given free amplitude from amplitude response of tip excited
oscillator
"""
# Amplitude of 1st mode's force to achieve target amplitude based on amplitude response of a tip excited harmonic oscillator:
Fo1 = k_m1*A1/(fo1*2*np.pi)**2*(((fo1*2*np.pi)**2 - (f1*2*np.pi)**2 )**2 + (fo1*2*np.pi*f1*2*np.pi/Q1)**2)**0.5
# Amplitude of 2nd mode's force to achieve target amplitude based on amplitude response of a tip excited harmonic oscillator:
Fo2 = k_m2*A2/(fo2*2*np.pi)**2*(((fo2*2*np.pi)**2 - (f2*2*np.pi)**2 )**2 + (fo2*2*np.pi*f2*2*np.pi/Q2)**2)**0.5
# Amplitude of 3rd mode's force to achieve target amplitude based on amplitude response of a tip excited harmonic oscillator
Fo3 = k_m3*A3/(fo3*2*np.pi)**2*(((fo3*2*np.pi)**2 - (f3*2*np.pi)**2 )**2 + (fo3*2*np.pi*f3*2*np.pi/Q3)**2)**0.5
a = 0.2e-9 # interatomic distance
eta = tau*G
Gg = Ge
for i in range(len(tau)):
Gg = Gg + G[i]
t_a = []
Fts_a = []
xb_a = []
tip_a = []
printcounter = 1
if printstep == 1:
printstep = dt
t = 0.0 # initializing time
Fts = 0.0
xb = 0.0
pb = 0.0
pc, pc_rate = np.zeros(len(tau)), np.zeros(len(tau))
xc, xc_rate = np.zeros(len(tau)), np.zeros(len(tau))
alfa = 16.0/3.0*np.sqrt(R)
# Initializing Verlet variables
z1, z2, z3, v1, v2, v3, z1_old, z2_old, z3_old = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
sum_Gxc = 0.0
sum_G_pb_pc = 0.0
while t < simultime:
t = t + dt
tip, z1, z2, z3, v1, v2, v3, z1_old, z2_old, z3_old = numba_verlet(zb, Fo1, Fo2, Fo3, Q1, Q2, Q3, k_m1, k_m2, k_m3, t, z1, z2,z3, v1,v2,v3, z1_old, z2_old, z3_old, Fts, dt, fo1,fo2,fo3, f1,f2,f3)
if t > ( startprint + printstep*printcounter):
t_a.append(t)
Fts_a.append(Fts)
xb_a.append(xb)
tip_a.append(tip)
printcounter = printcounter + 1
sum_Gxc = 0.0
sum_G_pb_pc = 0.0
if tip > xb: # aparent non contact
for i in range(len(tau)):
sum_Gxc = sum_Gxc + G[i]*xc[i]
if sum_Gxc/Gg > tip: # contact, the sample surface surpassed the tip in the way up
xb = tip
pb = (-xb)**1.5
for i in range(len(tau)):
sum_G_pb_pc = sum_G_pb_pc + G[i]*(pb - pc[i])
Fts = alfa*( Ge*pb + sum_G_pb_pc )
# get postion of dashpots
for i in range(len(tau)):
pc_rate[i] = G[i]/eta[i] * (pb - pc[i])
pc[i] = pc[i] + pc_rate[i]*dt
xc[i] = -(pc[i])**(2.0/3)
else: # true non-contact
xb = sum_Gxc/Gg
Fts = 0.0
for i in range(len(tau)):
xc_rate[i] = G[i]*(xb-xc[i])/eta[i]
xc[i] = xc[i] + xc_rate[i]*dt
pc[i] = (-xc[i])**(3.0/2) #debugging
else: # contact region
xb = tip
pb = (-xb)**1.5
for i in range(len(tau)):
sum_G_pb_pc = sum_G_pb_pc + G[i]*(pb - pc[i])
Fts = alfa*( Ge*pb + sum_G_pb_pc )
# get postion of dashpots
for i in range(len(tau)):
pc_rate[i] = G[i]/eta[i] * (pb - pc[i])
pc[i] = pc[i] + pc_rate[i]*dt
xc[i] = -(pc[i])**(2.0/3)
# MAKING CORRECTION TO INCLUDE VDW ACCORDING TO DMT THEORY
if tip > xb: # overall non-contact
Fts = -H*R/( 6.0*( (tip-xb) + a )**2 )
else:
Fts = Fts - H*R/(6.0*a**2)
return np.array(t_a), np.array(tip_a), np.array(Fts_a), np.array(xb_a)
GenMaxwell_jit = jit()(gen_maxwell_lr) #this line should stay outside function to allow the numba compilation and simulation acceleration work properly
def dynamic_spectroscopy(G, tau, R, dt, startprint, simultime, fo1, fo2, fo3, k_m1, k_m2, k_m3, A1, A2, A3, printstep = 1, Ge = 0.0, Q1=100, Q2=200, Q3=300, H=2.0e-19, z_step = 1):
"""This function is designed for tapping mode spectroscopy to obtain amplitude and phase curves as the cantilever is approached towards the surface.
The contact mechanics are performed over the framework of Lee and Radok for viscoelastic indentation (Lee, E. Ho, and Jens Rainer Maria Radok. "The contact problem for viscoelastic bodies." Journal of Applied Mechanics 27.3 (1960): 438-444.)
Parameters:
----------
G : numpy.ndarray
moduli of the springs in the Maxwell arms of a generalized Maxwell model (also called Wiechert model)
tau: numpy.ndarray
relaxation times of the Maxwell arms
R : float
tip radius
dt : float
simulation timestep
fo1 : float
1st eigenmode resonance frequency
fo2 : float
2nd eigenmode resonance frequency
fo3 : float
3rd eigenmode resonance frequency
k_m1 : float
1st eigenmode's stiffness
k_m2 : float
2nd eigenmode's stiffness
k_m3 : float
3rd eigenmode's stiffness
A1 : float
target oscillating amplitude of 1st cantilever eigenmode
A2 : float
target oscillating amplitude of 2nd cantilever eigenmode
A3 : float
target oscillating amplitude of 3rd cantilever eigenmode
printstep : float, optional
how often the data will be stored, default is timestep
Ge : float, optional
rubbery modulus, the default value is zero
Q1 : float, optional
first eigenmode's quality factor
Q2 : float, optional
second eigenmode's quality factor
Q3 : float, optional
third eigenmode's quality factor
H : float, optional
Hammaker constant
z_step : float, optional
cantilever equilibrium spatial step between runs. The smaller this number, the more runs but slower the simulation
Returns:
-------
np.array(amp) : numpy.ndarray
array containing the reduced amplitudes at different cantilever equilibrium positions
np.array(phase) : numpy.ndarray
array containing the phase shifts obtained at different cantilever equilibrium positions
np.array(zeq) : numpy.ndarray
array containing the approaching cantilever equilibrium positions
np.array(Ediss) : numpy.ndarray
array containing the values of dissipated energy
p.array(Virial) : np.ndarray
array containing the values of the virial of the interaction
np.array(peakF) : np.ndarray
array containing valued of peak force
np.array(maxdepth) : numpy.ndarray
array containing the values of maximum indentation
np.array(t_a) : numpy.ndarray
time trace
np.array(tip_a) : numpy.ndarray
2D array containing the tip trajectory for each run
np.array(Fts_a) : numpy.ndarray
2D array containing the tip-sample interacting force for each run
np.array(xb_a) : numpy.ndarray
2D array array containing the instant position of the viscoelastic surface for each run
"""
if z_step == 1:
z_step = A1*0.05 #default value is 5% of the free oscillation amplitude
zeq = []
peakF = []
maxdepth = []
amp = []
phase = []
Ediss = []
Virial = []
tip_a = []
Fts_a = []
xb_a = []
zb = A1*1.1
while zb > 0.0:
t, tip, Fts, xb = GenMaxwell_jit(G, tau, R, dt, startprint, simultime, fo1, fo2, fo3, k_m1, k_m2,k_m3, A1, A2, A3, zb, printstep, Ge, Q1, Q2, Q3, H)
A,phi = amp_phase(t, tip, fo1)
Ets = e_diss(tip, Fts, dt, fo1)
fts_peak = Fts[np.argmax(Fts)]
tip_depth = xb[np.argmax(tip)] -xb[np.argmin(tip)]
Vts = v_ts(tip-zb, Fts, dt)
#Attaching single values to lists
zeq.append(zb)
peakF.append(fts_peak)
maxdepth.append(tip_depth)
amp.append(A)
phase.append(phi)
Ediss.append(Ets)
Virial.append(Vts)
#attaching 1D arrays to lists
tip_a.append(tip)
Fts_a.append(Fts)
xb_a.append(xb)
zb -= z_step
return np.array(amp), np.array(phase), np.array(zeq), np.array(Ediss), np.array(Virial), np.array(peakF), np.array(maxdepth), t, np.array(tip_a), np.array(Fts_a), np.array(xb_a)
def verlet_FS(y_t, Q1, Q2, Q3, k1, k2, k3, time, z1, z2,z3, v1,v2,v3, z1_old, z2_old, z3_old, Fts, dt, fo1,fo2,fo3, Fb1=0.0, Fb2=0.0, Fb3=0.0):
"""This function performs verlet algorithm (central difference) for numerical integration of the AFM cantilever dynamics.
The equations of motion are for a based excited cantilever, to be used for a static force spectroscopy simulation
It integrates the differential equations of three harmonic oscillators (each corresponding to a distinct
cantilever eigenmode)
The dynamics of the cantilever are assumed to be contained in the first three flexural modes
This function will be called each simulation timestep by a main wrap around function which will contain the specific
contact-mechanics model.
Parameters:
----------
y_t : float
z equilibrium position (average tip postion with respect to the sample)
Q1 : float
first eigenmode's quality factor
Q2 : float
second eigenmode's quality factor
Q3 : float
third eigenmode's quality factor
k1 : float
1st eigenmode's stiffness
k2 : float
2nd eigenmode's stiffness
k3 : float
3rd eigenmode's stiffness
time : float
instant time of the simulation
z1 : float
instant 1st eigenmode deflection contribution
z2 : float
instant 2nd eigenmode deflection contribution
z3 : float
instant 3rd eigenmode deflection contribution
v1 : float
instant 1st eigenmode velocity
v2 : float
instant 2nd eigenmode velocity
v3 : float
instant 3rd eigenmode velocity
z1_old : float
instant 1st eigenmode deflection contribution corresponding to previous timestep
z2_old : float
instant 2nd eigenmode deflection contribution corresponding to previous timestep
z3_old : float
instant 3rd eigenmode deflection contribution corresponding to previous timestep
Fts : float
tip-sample interacting force
dt : float
simulation timestep
fo1 : float
1st eigenmode resonance frequency
fo2 : float
2nd eigenmode resonance frequency
fo3 : float
3rd eigenmode resonance frequency
Fb1 : float, optional
amplitude of the 1st eigenmode Brownian force (associated to thermal noise)
Fb2 : float, optional
amplitude of the 2nd eigenmode Brownian force (associated to thermal noise)
Fb3 : float, optional
amplitude of the 3rd eigenmode Brownian force (associated to thermal noise)
Returns:
-------
tip: float
instant tip position for new simulation timestep
z1 : float
instant 1st eigenmode deflection contribution for new simulation timestep
z2 : float
instant 2nd eigenmode deflection contribution for new simulation timestep
z3 : float
instant 3rd eigenmode deflection contribution for new simulation timestep
v1 : float
instant 1st eigenmode velocity for new simulation timestep
v2 : float
instant 2nd eigenmode velocity for new simulation timestep
v3 : float
instant 3rd eigenmode velocity for new simulation timestep
z1_old : float
instant 1st eigenmode deflection contribution corresponding to current timestep
z2_old : float
instant 2nd eigenmode deflection contribution corresponding to current timestep
z3_old : float
instant 3rd eigenmode deflection contribution corresponding to current timestep
"""
a1 = ( - z1 - v1*1.0/(fo1*2*np.pi*Q1) + y_t + Fts/k1 + Fb1/k1) *(2.0*np.pi*fo1)**2
a2 = ( - z2 - v2*1.0/(fo2*2*np.pi*Q2) + Fts/k2 + Fb2/k2) *(2.0*np.pi*fo2)**2
a3 = ( - z3 - v3*1.0/(fo3*2*np.pi*Q3) + Fts/k3 + Fb3/k3) *(2.0*np.pi*fo3)**2
#Verlet algorithm (central difference) to calculate position of the tip
z1_new = 2*z1 - z1_old + a1*pow(dt, 2)
z2_new = 2*z2 - z2_old + a2*pow(dt, 2)
z3_new = 2*z3 - z3_old + a3*pow(dt, 2)
#central difference to calculate velocities
v1 = (z1_new - z1_old)/(2*dt)
v2 = (z2_new - z2_old)/(2*dt)
v3 = (z3_new - z3_old)/(2*dt)
#Updating z1_old and z1 for the next run
z1_old = z1
z1 = z1_new
z2_old = z2
z2 = z2_new
z3_old = z3
z3 = z3_new
tip = z1 + z2 + z3
return tip, z1, z2, z3, v1, v2, v3, z1_old, z2_old, z3_old
numba_verlet_FS = jit()(verlet_FS)
def sfs_genmaxwell_lr(G, tau, R, dt, simultime, y_dot, y_t_initial, k_m1, fo1, Ge = 0.0, Q1=100, printstep = 1, H = 2.0e-19, Q2=200, Q3=300, startprint = 1, vdw = 1):
"""This function is designed for force spectroscopy over a Generalized Maxwel surface
The contact mechanics are performed over the framework of Lee and Radok, thus strictly only applies for approach portion
Parameters:
----------
G : numpy.ndarray
moduli of the springs in the Maxwell arms of a generalized Maxwell model (also called Wiechert model)
tau: numpy.ndarray
relaxation times of the Maxwell arms
R : float
tip radius
dt : float
simulation timestep
simultime : float
total simulation time
y_dot: float
approach velocity of the cantilever's base towards the sample
y_t_initial: float
initial position of the cantilever base with respect to the sample
k_m1 : float
1st eigenmode's stiffness
fo1 : float
1st eigenmode resonance frequency
Ge : float, optional
equilibrium modulus of the material, default value is zero
Q1 : float, optional
1st eigenmode quality factor
printstep : int, optional
if value is 1 the data will be printed with step equal to dt
H : float, optional
Hamaker constant
Q2 : float, optional
2nd eigenmode quality factor
Q3 : float, optional
3rd eigenmode quality factor
startprint : float, optional
when the simulation starts getting printed
vdw : int, optional
if value is 1 van der Waals forces are neglected
Returns:
-------
np.array(t_a) : numpy.ndarray
time trace
np.array(tip_a) : numpy.ndarray
tip position in force spectroscopy simulation
np.array(Fts_a) : numpy.ndarray
tip-sample force interaction if force spectroscopy simulation
np.array(xb_a) : numpy.ndarray
viscoelastic sample position in the simulation
np.array(defl_a) : numpy.ndarray
cantilever deflection duing the force spectroscopy simulation
np.array(zs_a) : numpy.ndarray
z-sensor position (cantilever base position)
"""
G_a = []
tau_a = []
"""
this for loop is to make sure tau passed does not contain values lower than time step which would make numerical
integration unstable
"""
for i in range(len(G)):
if tau[i] > dt*10.0:
G_a.append(G[i])
tau_a.append(tau[i])
G = np.array(G_a)
tau = np.array(tau_a)
a = 0.2e-9 #intermolecular distancePopo
fo2 = 6.27*fo1 # resonance frequency of the second eigenmode (value taken from Garcia, R., & Herruzo, E. T. (2012). The emergence of multifrequency force microscopy. Nature nanotechnology, 7(4), 217-226.)
fo3 = 17.6*fo1 # resonance frequency of the third eigenmode (value taken from Garcia, R., & Herruzo, E. T. (2012). The emergence of multifrequency force microscopy. Nature nanotechnology, 7(4), 217-226.)
k_m2 = k_m1*(fo2/fo1)**2
k_m3 = k_m1*(fo3/fo1)**2
if startprint == 1: #this is the default value when the function will start saving results
startprint = y_t_initial/y_dot
eta = tau*G
Gg = Ge
for i in range(len(tau)): #this loop looks silly but if you replace it with Gg = Ge + sum(G[:]) it will conflict with numba making, simulation very slow
Gg = Gg + G[i]
t_a = []
Fts_a = []
xb_a = []
tip_a = []
defl_a = []
zs_a = []
printcounter = 1
if printstep == 1: #default value of tinestep
printstep = dt
t = 0.0 #initializing time
Fts = 0.0
xb = 0.0
pb = 0.0
pc, pc_rate = np.zeros(len(tau)), np.zeros(len(tau))
alfa = 16.0/3.0*np.sqrt(R) #cell constant, related to tip geometry
#Initializing Verlet variables
z2, z3, v2, v3, z2_old, z3_old = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
v1 = y_dot
z1 = y_t_initial
z1_old = y_t_initial
while t < simultime:
t = t + dt
y_t = - y_dot*t + y_t_initial #Displacement of the base (z_sensor position)
tip, z1, z2, z3, v1, v2, v3, z1_old, z2_old, z3_old = numba_verlet_FS(y_t, Q1, Q2, Q3, k_m1, k_m2, k_m3, t, z1, z2,z3, v1,v2,v3, z1_old, z2_old, z3_old, Fts, dt, fo1,fo2,fo3)
defl = tip - y_t
if t > ( startprint + printstep*printcounter):
defl_a.append(defl)
zs_a.append(y_t)
t_a.append(t)
Fts_a.append(Fts)
xb_a.append(xb)
tip_a.append(tip)
printcounter += 1
sum_G_pc = 0.0
sum_G_pb_pc = 0.0
if tip > xb: #aparent non contact
for i in range(len(tau)):
sum_G_pc = sum_G_pc + G[i]*pc[i]
if sum_G_pc/Gg > tip: #contact, the sample surface surpassed the tip
xb = tip
pb = (-xb)**1.5
for i in range(len(tau)):
sum_G_pb_pc = sum_G_pb_pc + G[i]*(pb - pc[i])
Fts = alfa*( Ge*pb + sum_G_pb_pc )
else: #true non contact
pb = sum_G_pc/Gg
xb = pb**(2.0/3)
Fts = 0.0
else: #contact region
xb = tip
pb = (-xb)**1.5
for i in range(len(tau)):
sum_G_pb_pc = sum_G_pb_pc + G[i]*(pb - pc[i])
Fts = alfa*( Ge*pb + sum_G_pb_pc )
#get postion of dashpots
for i in range(len(tau)):
pc_rate[i] = G[i]/eta[i] * (pb - pc[i])
pc[i] = pc[i] + pc_rate[i]*dt
if vdw != 1:
#MAKING CORRECTION TO INCLUDE VDW ACCORDING TO DMT THEORY
if tip > xb: #overall non-contact
Fts = -H*R/( 6.0*( (tip-xb) + a )**2 )
else:
Fts = Fts - H*R/(6.0*a**2)
return np.array(t_a), np.array(tip_a), np.array(Fts_a), np.array(xb_a), np.array(defl_a), np.array(zs_a)
def compliance_maxwell(G, tau , Ge = 0.0, dt = 1, simul_t = 1, lw=0):
"""This function returns the numerical compliance of a Generalized Maxwell model.
This numerical compliance is useful for interconversion from Gen Maxwell model to generalized Voigt model
Parameters:
----------
G : numpy.ndarray
moduli of the springs in the Maxwell arms of a generalized Maxwell model (also called Wiechert model)
tau: numpy.ndarray
relaxation times of the Maxwell arms
Ge : float, optional
equilibrium modulus of the material, default value is zero
dt : float, optional
simulation timestep
simul_t : float, optional
total simulation time
lw : int, optional
flag to return calculated compliance with logarithmic weight
Returns:
----------
np.array(t_r) : numpy.ndarray
array containing the time trace
np.array(J_r) : numpy.ndarray
array containing the calculated creep compliance
"""
if dt == 1: #if timestep is not user defined it will given as a fracion of the lowest characteristic time
dt = tau[0]/100.0
if simul_t ==1: #if simulation time is not defined it will be calculated with respect to largest retardation time
simul_t = tau[len(tau)-1]*10.0e3
G_a = []
tau_a = []
"""
this for loop is to make sure tau passed does not contain values lower than time step which would make numerical
integration unstable
"""
for i in range(len(G)):
if tau[i] > dt*10.0:
G_a.append(G[i])
tau_a.append(tau[i])
G = np.array(G_a)
tau = np.array(tau_a)
Gg = Ge
for i in range(len(tau)): #this loop looks silly but if you replace it with Gg = Ge + sum(G[:]) it will conflict with numba making, simulation very slow
Gg = Gg + G[i]
eta = tau*G
Jg =1.0/Gg #glassy compliance
N = len(tau)
Epsilon_visco = np.zeros(N) #initial strain
Epsilon_visco_dot = np.zeros(N) #initial strain velocity
t_r = [] #creating list with unknown number of elements
J_r = [] #creating list with unknown number of elements
time = 0.0
J_t = Jg #initial compliance
print_counter = 1
tr = dt #printstep
while time < simul_t: #CREEP COMPLIANCE SIMULATION, ADVANCING IN TIME
time = time + dt
sum_Gn_EpsVisco_n = 0.0 #this sum has to be resetted to zero every timestep
for n in range(0,N):
Epsilon_visco_dot[n] = G[n]*(J_t - Epsilon_visco[n])/eta[n]
Epsilon_visco[n] = Epsilon_visco[n] + Epsilon_visco_dot[n]*dt
sum_Gn_EpsVisco_n = sum_Gn_EpsVisco_n + G[n]*Epsilon_visco[n]
J_t = (1 + sum_Gn_EpsVisco_n)/Gg
if time >= print_counter*tr and time < simul_t:
t_r.append(time)
J_r.append(J_t)
print_counter += 1
if lw != 0: #if logarithmic weight is activated, the data will be appended weighted logarithmically
if print_counter == 10:
tr = tr*10
print_counter = 1
return np.array(t_r), np.array(J_r)
def relaxation_voigt(J, tau, Jg, phi_f = 0.0, dt = 1, simul_t = 1, lw = 0):
"""This function returns the numerical relaxation modulus of a Generalized Voigt model
This numerical relaxation modulus is useful for interconversion from Gen Maxwell model to generalized Voigt model
Parameters:
----------
J : numpy.ndarray
compliances of the springs in the Voigt units of a generalized Voigt model
tau: numpy.ndarray
relaxation times of the Maxwell arms
Jg : float
glassy compliance of the material
dt : float, optional
simulation timestep
simul_t : float, optional
total simulation time
lw : int, optional
flag to return calculated compliance with logarithmic weight
Returns:
----------
np.array(t_r) : numpy.ndarray
array containing the time trace
np.array(G_r) : numpy.ndarray
array containing the calculated relaxation modulus
"""
if dt == 1: #if timestep is not user defined it will given as a fracion of the lowest characteristic time
dt = tau[0]/100.0
if simul_t ==1: #if simulation time is not defined it will be calculated with respect to largest retardation time
simul_t = tau[len(tau)-1]*10.0e3
J_a = []
tau_a = []
"""
this for loop is to make sure tau passed does not contain values lower than time step which would make numerical
integration unstable
"""
for i in range(len(J)):
if tau[i] > dt*10.0:
J_a.append(J[i])
tau_a.append(tau[i])
J = np.array(J_a)
tau = np.array(tau_a)
Gg = 1.0/Jg
N = len(tau)
phi = J/tau
#Defining initial conditions
x = np.zeros(N)
x_dot = np.zeros(N)
t_r = [] #creating list with unknown number of elements
G_r = [] #creating list with unknown number of elements
time = 0.0
G_t = Gg #initial relaxation modulus
print_counter = 1
tr = dt #printstep
while time < simul_t: #RELAXATION MODULUS SIMULATION, ADVANCING IN TIME
time = time + dt
k = len(tau) - 1
while k > -1:
if k == len(tau) - 1:
x_dot[k] = G_t*phi[k]
else:
x_dot[k] = G_t*phi[k] + x_dot[k+1]
k -=1
for i in range(len(tau)):
x[i] = x[i] + x_dot[i]*dt
G_t = Gg*(1.0-x[0])
if time >= print_counter*tr and time <simul_t:
t_r.append(time)
G_r.append(G_t)
print_counter += 1
if lw != 0: #if logarithmic weight is activated, the data will be appended weighted logarithmically
if print_counter == 10:
tr = tr*10
print_counter = 1
return np.array(t_r), np.array(G_r)
| mit | 4,924,540,274,512,157,000 | 38.182984 | 246 | 0.607722 | false | 3.270331 | false | false | false |
binarybottle/mindboggle_sidelined | fundi_from_pits/libbasin.py | 1 | 18234 | #!/usr/bin/python
"""
Extracting features from VTK input files.
Authors:
- Forrest Sheng Bao ([email protected]) http://fsbao.net
Copyright 2012, Mindboggle team (http://mindboggle.info), Apache v2.0 License
For algorithmic details, please check:
Forrest S. Bao, et al., Automated extraction of nested sulcus features from human brain MRI data,
IEEE EMBC 2012, San Diego, CA
Dependencies:
python-vtk: vtk's official Python binding
numpy
io_vtk : under mindboggle/utils
"""
from numpy import mean, std, median, array, zeros, eye, flatnonzero, sign, matrix, zeros_like
import os.path
import cPickle
#import io_vtk # Assummng io_vtk is in PYTHONPATH
from mindboggle.utils import io_vtk
import sys
#-----------------Begin function definitions-------------------------------------------------------------
def fcNbrLst(FaceDB, Hemi):
'''Get a neighbor list of faces, also the vertex not shared with current face
Data structure:
NbrLst: a list of size len(FaceDB)
NbrLst[i]: two lists of size 3 each.
NbrLst[i][0] = [F0, F1, F2]: F0 is the neighbor of face i facing V0 where [V0, V1, V2] is face i. And so forth.
NbrLst[i][1] = [V0p, V1p, V2p]: V0p is the vertex of F0 that is not shared with face i
'''
NbrFile = Hemi + '.fc.nbr'
if os.path.exists(NbrFile):
#return fileio.loadFcNbrLst(NbrFile)
print "loading face nbr lst from:" , NbrFile
Fp = open(NbrFile, 'r')
NbrLst = cPickle.load(Fp)
Fp.close()
return NbrLst
print "calculating face neighbor list"
FaceNo = len(FaceDB)
NbrLst = []
[NbrLst.append([[-1,-1,-1], [-1,-1,-1]]) for i in xrange(FaceNo)]
Done =[]
[Done.append(0) for i in xrange(FaceNo)]
for i in xrange(0, FaceNo):
# for i in xrange(0, 2600+1):
# print i
Face = FaceDB[i]
# [V0, V1, V2] = Face
# Found = 0 # if Found == 1, no need to try other faces
for j in xrange(i+1, FaceNo):
AnotherFace = FaceDB[j]
for Idx in xrange(0,2):
ChkFc1 = Face[Idx]
for ChkFc2 in Face[Idx+1:3]:
if ChkFc1 in AnotherFace:
if ChkFc2 in AnotherFace:
NbrID1 = 3 - Face.index(ChkFc1) - Face.index(ChkFc2) # determine it's F0, F1 or F2.
NbrLst[i][0][NbrID1] = j
NbrID2 = 3 - AnotherFace.index(ChkFc1) - AnotherFace.index(ChkFc2) # determine it's F0, F1 or F2.
NbrLst[j][0][NbrID2] = i
# Vp1 = AnotherFace[NbrID2]# determine V{0,1,2}p
# Vp2 = Face[NbrID1]# determine V{0,1,2}p
NbrLst[i][1][NbrID1] = AnotherFace[NbrID2]
NbrLst[j][1][NbrID2] = Face[NbrID1]
Done[i] += 1
Done[j] += 1
if Done[i] ==3:
break # all three neighbors of Face has been found
Fp = open(NbrFile, 'w')
# Commented 2011-11-27 23:54
# for i in xrange(0, len(FaceDB)
# for j in NbrLst[i]:
# Fp.write(str(j[0]) + '\t' + str(j[1]) + '\t' + str(j[2]) + '\t')
# Fp.write('\n')
# End of Commented 2011-11-27 23:54
cPickle.dump(NbrLst, Fp)
Fp.close()
return NbrLst
def vrtxNbrLst(VrtxNo, FaceDB, Hemi):
"""Given the number of vertexes and the list of faces, find the neighbors of each vertex, in list formate.
"""
NbrFile = Hemi + '.vrtx.nbr'
if os.path.exists(NbrFile):
#return fileio.loadVrtxNbrLst(NbrFile) # change to cPickle
print "Loading vertex nbr lst from:", NbrFile
Fp = open(NbrFile, 'r') # need to use cPickle
NbrLst = cPickle.load(Fp)
Fp.close()
return NbrLst
print "Calculating vertex neighbor list"
NbrLst = [[] for i in xrange(0, VrtxNo)]
for Face in FaceDB:
[V0, V1, V2] = Face
if not V1 in NbrLst[V0]:
NbrLst[V0].append(V1)
if not V2 in NbrLst[V0]:
NbrLst[V0].append(V2)
if not V0 in NbrLst[V1]:
NbrLst[V1].append(V0)
if not V2 in NbrLst[V1]:
NbrLst[V1].append(V2)
if not V0 in NbrLst[V2]:
NbrLst[V2].append(V1)
if not V1 in NbrLst[V2]:
NbrLst[V2].append(V1)
Fp = open(NbrFile, 'w') # need to use cPickle
# Commented 2011-11-27 23:54
# for i in xrange(0, VrtxNo):
# [Fp.write(str(Vrtx) + '\t') for Vrtx in NbrLst[i]]
# Fp.write('\n')
# End of Commented 2011-11-27 23:54
cPickle.dump(NbrLst, Fp)
Fp.close()
return NbrLst
def compnent(FaceDB, Basin, NbrLst, PathHeader):
'''Get connected component, in each of all basins, represented as faces and vertex clouds
Parameters
-----------
NbrLst : list
neighbor list of faces, NOT VERTEXES
PathHeader : header of the path to save component list
'''
FcCmpntFile = PathHeader + '.cmpnt.face'
VrtxCmpntFile = PathHeader + '.cmpnt.vrtx'
if os.path.exists(FcCmpntFile) and os.path.exists(VrtxCmpntFile):
# return fileio.loadCmpnt(FcCmpntFile), fileio.loadCmpnt(VrtxCmpntFile)
print "Loading Face Components from:", FcCmpntFile
Fp = open(FcCmpntFile, 'r')
FcCmpnt = cPickle.load(Fp)
Fp.close()
print "Loading Vertex Components from:", VrtxCmpntFile
Fp = open(VrtxCmpntFile, 'r')
VrtxCmpnt = cPickle.load(Fp)
Fp.close()
return FcCmpnt, VrtxCmpnt
print "calculating face and vertex components"
Visited = [False for i in xrange(0, len(Basin))]
FcCmpnt, VrtxCmpnt = [], []
while not allTrue(Visited):
Seed = dfsSeed(Visited, Basin)# first basin face that is not True in Visited
# print Seed
Visited, FcMbr, VrtxMbr = dfs(Seed, Basin, Visited, NbrLst, FaceDB)# DFS to fine all connected members from the Seed
FcCmpnt.append(FcMbr)
VrtxCmpnt.append(VrtxMbr)
# fileio.writeCmpnt(FcCmpnt, FcCmpntFile)
# fileio.writeCmpnt(VrtxCmpnt, VrtxCmpntFile)
Fp = open(FcCmpntFile, 'w')
cPickle.dump(FcCmpnt, Fp)
Fp.close()
Fp = open(VrtxCmpntFile, 'w')
cPickle.dump(VrtxCmpnt, Fp)
Fp.close()
return FcCmpnt, VrtxCmpnt
def judgeFace1(FaceID, FaceDB, CurvatureDB, Threshold = 0):
"""Check whether a face satisfies the zero-order criterion
If all three vertexes of a face have negative curvature, return True. O/w, False.
Input
======
FaceID: integer
the ID of a face, indexing from 0
FaceDB: list
len(FaceDB) == number of faces in the hemisphere
FaceDB[i]: a 1-D list of the IDs of three vertexes that consist of the i-th face
CurvatureDB: list
len(CurvatureDB) == number of vertexes in the hemisphere
CurvatureDB[i]: integer, the curvature of the i-th vertex
"""
[V0, V1, V2] = FaceDB[FaceID]
##
# if (CurvatureDB[V0] > Threshold) and (CurvatureDB[V1] > Threshold) and (CurvatureDB[V2] > Threshold):
# return True
# else:
# return False
##
if (CurvatureDB[V0] <= Threshold) or (CurvatureDB[V1] <= Threshold) or (CurvatureDB[V2] <= Threshold):
return False
else:
return True
def basin(FaceDB, CurvatureDB, Threshold = 0):
'''Given a list of faces and per-vertex curvature value, return a list of faces comprising basins
'''
Basin = []
Left = []
for FaceID in xrange(0, len(FaceDB)):
if judgeFace1(FaceID, FaceDB, CurvatureDB, Threshold = Threshold):
Basin.append(FaceID)
else:
Left.append(FaceID)
return Basin, Left
def allTrue(List):
'''Check whether a logical list contains non-True elements.
'''
# for Bool in List:
# if not Bool:
# return False
# return True
return all(x==True for x in List)
def dfsSeed(Visited, Basin):
'''Given a list of faces comprising the basins, find a face that has not been visited which will be used as the seeding point for DFS.
'''
for i in xrange(0, len(Visited)):
if not Visited[i]:
return Basin[i]
def dfs(Seed, Basin, Visited, NbrLst, FaceDB):
'''Return all members (faces and vertexes) of the connected component that can be found by DFS from a given seed point
Parameters
-----------
NbrLst : list
neighbor list of faces, NOT VERTEXES
'''
Queue = [Seed]
FcMbr = [] # members that are faces of this connected component
VrtxMbr = [] # members that are vertex of this connected component
while Queue != []:
# print Queue
Seed = Queue.pop()
if Seed in Basin:
if not Visited[Basin.index(Seed)]:
Visited[Basin.index(Seed)] = True
FcMbr.append(Seed)
for Vrtx in FaceDB[Seed]:
if not (Vrtx in VrtxMbr):
VrtxMbr.append(Vrtx)
Queue += NbrLst[Seed][0]
return Visited, FcMbr, VrtxMbr
def pmtx(Adj):
'''Print a matrix as shown in MATLAB stdio
'''
for j in xrange(0,25):
print j,
print '\n'
for i in xrange(0, 25):
print i,
for j in xrange(0, 25):
print Adj[i,j],
print '\n'
def all_same(items):
return all(x == items[0] for x in items)
def univariate_pits(CurvDB, VrtxNbrLst, VrtxCmpnt, Thld):
'''Finding pits using one variable, e.g., depth.
'''
print "Extracting pits"
# Stack, P, Child, M, B, End, L = [], [], {}, -1, [], {}, 10
C = [-1 for i in xrange(0, len(VrtxNbrLst))]
Child = {}
End = {}
M = -1
B = []
for Cmpnt in VrtxCmpnt: # for each component
Curv=dict([(i, CurvDB[i]) for i in Cmpnt])
Stack = []
for Vrtx, Cvtr in sorted(Curv.iteritems(), key=lambda (k,v): (v,k)):
Stack.append(Vrtx)
Visited = []
while len(Stack) >0:
Skip_This_Vrtx = False # updated Forrest 2012-02-12, skip vertexes whose neighbors are not in the component to denoise
Vrtx = Stack.pop()
WetNbr = []
NbrCmpnt = []
for Nbr in list(set(VrtxNbrLst[Vrtx])):
if not Nbr in Cmpnt:
Skip_This_Vrtx = True
if Nbr in Visited: # This condition maybe replaced by If C[Vrtx] ==-1
WetNbr.append(Nbr)
if C[Nbr] != -1:
NbrCmpnt.append(C[Nbr])
if Skip_This_Vrtx :
continue
Visited.append(Vrtx)
if len(WetNbr) == 1: # if the vertex has one neighbor that is already wet
[Nbr] = WetNbr
if End[C[Nbr]]:
C[Vrtx] = Child[C[Nbr]]
else:
C[Vrtx] = C[Nbr]
# print C[Nbr], "==>", C[V]
elif len(WetNbr) >1 and all_same(NbrCmpnt): # if the vertex has more than one neighbors which are in the same component
if End[NbrCmpnt[0]]:
C[Vrtx] = Child[NbrCmpnt[0]]
else:
C[Vrtx] = NbrCmpnt[0]
elif len(WetNbr) >1 and not all_same(NbrCmpnt): # if the vertex has more than one neighbors which are NOT in the same component
M += 1
C[Vrtx] = M
for Nbr in WetNbr:
Child[C[Nbr]] = M
End[C[Nbr]] = True
End[M] = False
# elif : # the vertex's neighbor are not fully in the component
else:
M += 1
if CurvDB[Vrtx] > Thld:
B.append(Vrtx)
End[M] = False
C[Vrtx] = M
return B, C, Child
def clouchoux(MCurv, GCurv):
'''Judge whether a vertex is a pit in Clouchoux's definition
Parameters
===========
MCurv: float
mean curvature of a vertex
H in Clouchoux's paper
GCurv: float
mean curvature of a vertex
K in Clouchoux's paper
Returns
========
True if this is a pit. False, otherwise.
Notes
=========
(Since Joachim's code updates all the time, this settings has to be updated accordingly)
In Clochoux's paper, the following definitions are used:
H > 0, K > 0: pit, in Clouchoux's paper
H < 0, K > 0: peak, in Clouchoux's paper
If features are computed by ComputePricipalCurvature(),
use this settings to get proper pits:
H > 3, K < 0 (curvatures not normalized)
H > 0.2, K < 0 (curvatures normalized)
'''
# if (MCurv > 3) and (GCurv < 0):
if (MCurv > 0.2) and (GCurv < 0):
return True
else:
return False
def clouchoux_pits(Vertexes, MCurv, GCurv):
'''Extract pits using Clouchoux's definition
'''
Pits = []
for i in xrange(len(Vertexes)):
if clouchoux(MCurv[i], GCurv[i]):
Pits.append(i)
print len(Pits), "Pits found"
return Pits
def getBasin_and_Pits(Maps, Mesh, SulciVTK, PitsVTK, SulciThld = 0, PitsThld = 0, Quick=False, Clouchoux=False, SulciMap='depth'):
'''Extracting basin and pits (either local minimum approach or Clouchoux's)
Parameters
=============
Maps: dictionary
Keys are map names, e.g., depth or curvatures.
Values are per-vertex maps, e.g., curvature map.
Mesh: 2-tuple of lists
the first list has coordinates of vertexes while the second defines triangles on the mesh
This is a mandatory surface, normally a non-inflated surface.
SulciThld: float
the value to threshold the surface to separate sulci and gyri
PitsThld: float
vertexes deeper than this value can be considered as pits
Quick: Boolean
If true, extract sulci only (no component ID, only thresholding), skipping pits and later fundi.
Clouchoux: Boolean
If true, extract pits using Clouchoux's definition. O/w, local minimum approach.
SulciMap: string
The map to be used to get sulci
by default, 'depth'
'''
def write_surface_with_LUTs(File, Points, Faces, Maps):
"""Like write_scalars in io_vtk but no writing of vertices
"""
print "writing sulci into VTK file:", File
Fp = open(File,'w')
io_vtk.write_header(Fp)
io_vtk.write_points(Fp, Points)
io_vtk.write_faces(Fp, Faces)
if len(Maps) > 0:
# Make sure that LUTs is a list of lists
Count = 0
for LUT_name, LUT in Maps.iteritems():
if Count == 0 :
io_vtk.write_scalars(Fp, LUT, LUT_name)
else:
io_vtk.write_scalars(Fp, LUT, LUT_name, begin_scalars=False)
Count += 1
Fp.close()
return None
def write_pits_without_LUTs(File, Points, Indexes):
"""Like write_scalars in io_vtk but no writing of vertices
"""
print "writing pits into VTK file:", File
Fp = open(File,'w')
io_vtk.write_header(Fp)
io_vtk.write_points(Fp, Points)
io_vtk.write_vertices(Fp, Indexes)
Fp.close()
return None
print "\t thresholding the surface using threshold = ", SulciThld
[Vertexes, Faces] = Mesh
MapBasin = Maps[SulciMap]
Basin, Gyri = basin(Faces, Maps[SulciMap], Threshold = SulciThld)
if not Quick:
LastSlash = len(SulciVTK) - SulciVTK[::-1].find('/')
Hemi = SulciVTK[:SulciVTK[LastSlash:].find('.')+LastSlash]# path up to which hemisphere, e.g., /home/data/lh
VrtxNbr = vrtxNbrLst(len(Vertexes), Faces, Hemi)
FcNbr = fcNbrLst(Faces, Hemi)
FcCmpnt, VrtxCmpnt = compnent(Faces, Basin, FcNbr, ".".join([Hemi, SulciMap, str(SulciThld)]))
CmpntLUT = [-1 for i in xrange(len(MapBasin))]
for CmpntID, Cmpnt in enumerate(VrtxCmpnt):
for Vrtx in Cmpnt:
CmpntLUT[Vrtx] = CmpntID
Maps['CmpntID'] = CmpntLUT
if Clouchoux:
Pits = clouchoux_pits(Vertexes, Maps['meancurv'], Maps['gausscurv'])
else: # local minimum approach
MapPits = Maps[SulciMap] # Users will get the option to select pits extraction map in the future.
Pits, Parts, Child = univariate_pits(MapPits, VrtxNbr, VrtxCmpnt, PitsThld)
Maps['hierarchy'] = Parts
else:
print "\t\t Thresholding the surface to get sulci only."
Faces = [map(int,i) for i in Faces]# this is a temporal fix. It won't cause precision problem because sys.maxint is 10^18.
Vertexes = map(list, Vertexes)
write_surface_with_LUTs(SulciVTK, Vertexes, [Faces[i] for i in Basin], Maps)
if Quick:
sys.exit()
write_pits_without_LUTs(PitsVTK, Vertexes, Pits)
# output tree hierarchies of basal components
# print "writing hierarchies of basal components"
# WetFile = PrefixExtract + '.pits.hier'
# WetP = open(WetFile,'w')
# for LowComp, HighComp in Child.iteritems():
# WetP.write(str(LowComp) + '\t' + str(HighComp) + '\n')
# WetP.close()
# end of output tree hierarchies of basal components
# End of Get pits Forrest 2011-05-30 10:16
# a monolithic code output each component
# Dic = {}
# for CID, Cmpnt in enumerate(FcCmpnt):
# Dic[CID] = len(Cmpnt)
#
# #Dic = sorted(Dic.iteritems(), key= lambda (k,v,) : (v,k))
# Counter = 1
# for CID, Size in sorted(Dic.iteritems(), key=lambda (k,v): (v,k)):
## print Size
# Rank = len(FcCmpnt) - Counter +1
# Fp = open(BasinFile + '.' + SurfFile[-1*SurfFile[::-1].find('.'):] + '.' + str(Rank) +'-th.vtk','w')
# Vertex, Face = fileio.readSurf(SurfFile)
# FundiList = FcCmpnt[CID]
# libvtk.wrtFcFtr(Fp, Vertex, Face, FundiList)
# Fp.close()
# Counter += 1
# a monolithic code output each component
#---------------End of function definitions---------------------------------------------------------------
| apache-2.0 | 1,991,005,818,867,061 | 30.546713 | 140 | 0.56976 | false | 3.157949 | false | false | false |
tonioo/modoboa-public-api | modoboa_public_api/models.py | 1 | 1562 | """Modoboa API models."""
from dateutil.relativedelta import relativedelta
from django.db import models
from django.utils import timezone
class ModoboaInstanceManager(models.Manager):
"""Custom manager for ModoboaInstance."""
def active(self):
"""Return active instances (last_request <= 1 month)."""
return self.get_queryset().filter(
last_request__gte=timezone.now() - relativedelta(months=1))
class ModoboaInstance(models.Model):
"""A model to represent a modoboa instance."""
hostname = models.CharField(max_length=255)
ip_address = models.GenericIPAddressField()
known_version = models.CharField(max_length=30)
created = models.DateTimeField(auto_now_add=True)
last_request = models.DateTimeField(auto_now=True)
# Statistics
domain_counter = models.PositiveIntegerField(default=0)
domain_alias_counter = models.PositiveIntegerField(default=0)
mailbox_counter = models.PositiveIntegerField(default=0)
alias_counter = models.PositiveIntegerField(default=0)
# Used extensions
extensions = models.ManyToManyField("ModoboaExtension", blank=True)
objects = ModoboaInstanceManager()
def __str__(self):
return "[{0}] {1} -> {2}".format(
self.ip_address, self.hostname, self.known_version)
class ModoboaExtension(models.Model):
"""A modoboa extension with its latest version."""
name = models.CharField(max_length=255, unique=True)
version = models.CharField(max_length=30)
def __str__(self):
return self.name
| mit | 6,570,710,811,695,551,000 | 30.24 | 71 | 0.701665 | false | 3.974555 | false | false | false |
vbmendes/meio-pyofc | meiopyofc/series.py | 1 | 6187 | from util import OfcDict
from elements import Colour, ColoursList, LineStyle
from values import ValuesList, ShapePointsList, Value, BarValue
import conf
class SeriesList(list):
colours = conf.colours
value_colours = {
'positive': '#009900',
'negative': '#990000',
'zero': '#000099',
}
def append(self, series):
if series.colorize_series:
series['colour'] = self.colours[len(self)%len(self.colours)]
super(SeriesList, self).append(series)
class Series(OfcDict):
types = {
'type': str,
'alpha': float,
'colour': Colour,
'gradient-fill': str,
'halo-size': int,
'width': int,
'dot-size': int,
'text': str,
'font-size': int,
'values': ValuesList,
'line-style': LineStyle,
'tip': str,
'no-labels': bool,
'loop': bool,
'on-click': str,
}
value_cls = Value
def __init__(self, dictionary, colorize_series=True):
self.colorize_series = colorize_series
super(Series, self).__init__(dictionary)
class OutlineSeries(Series):
types = {
'outline-colour': Colour
}
# TODO! Keys in bar stacks
class Line(Series):
colors = {
'p': '#009900',
'z': '#000099',
'n': '#990000',
}
COLORIZE_NONE, COLORIZE_NEGATIVES, \
COLORIZE_ZEROS, COLORIZE_ZEROS_NEGATIVES, \
COLORIZE_POSITIVES, COLORIZE_POSITIVES_NEGATIVES, \
COLORIZE_PORITIVES_ZEROS, COLORIZE_ALL = range(8)
def __init__(self, dictionary, colorize_values=COLORIZE_ALL, **kwargs):
self.colorize_values = colorize_values
dictionary['type'] = dictionary.get('type', 'line')
super(Line, self).__init__(dictionary, **kwargs)
def _process_values(self, values):
#return values
return self.colorized_values(values)
def colorized_values(self, values, colors=None):
if not colors:
colors = self.colors
colorize_positives = bool(self.colorize_values/4) and 'p' in colors
colorize_zeros = bool((self.colorize_values%4)/2) and 'z' in colors
colorize_negatives = bool(self.colorize_values%2) and 'n' in colors
for k in range(len(values)):
value = values[k]
if isinstance(value, Value):
num_value = float(value['value'])
else:
num_value = float(value)
if num_value < 0:
if colorize_negatives:
values[k] = self.colorize_value(value, colors['n'])
elif num_value > 0:
if colorize_positives:
values[k] = self.colorize_value(value, colors['p'])
else:
values[k] = self.colorize_value(value, colors['z'])
return values
def colorize_value(self, value, color):
if isinstance(value, self.value_cls):
value['colour'] = color
return value
else:
return self.value_cls({'value': value, 'colour': color})
class LineDot(Line):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'line_dot'
super(LineDot, self).__init__(dictionary, **kwargs)
class LineHollow(Line):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'line_hollow'
super(LineHollow, self).__init__(dictionary, **kwargs)
class Bar(Series):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'bar'
super(Bar, self).__init__(dictionary, **kwargs)
class BarFilled(OutlineSeries):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'bar_filled'
super(BarFilled, self).__init__(dictionary, **kwargs)
class BarGlass(Series):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'bar_glass'
kwargs['colorize_series'] = kwargs.get('colorize_series', False)
super(BarGlass, self).__init__(dictionary, **kwargs)
class Bar3d(Series):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'bar_3d'
super(Bar3d, self).__init__(dictionary, **kwargs)
class BarSketch(OutlineSeries):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'bar_sketch'
super(BarSketch, self).__init__(dictionary, **kwargs)
class HBar(Series):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'hbar'
super(HBar, self).__init__(dictionary, **kwargs)
class BarStack(Series):
types = {
'colours': ColoursList,
}
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'bar_stack'
super(BarStack, self).__init__(dictionary, **kwargs)
class AreaLine(Series):
types = {
'fill-alpha': float,
'fill': Colour,
}
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'area_line'
super(AreaLine, self).__init__(dictionary, **kwargs)
class AreaHollow(AreaLine):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'area_hollow'
super(AreaHollow, self).__init__(dictionary, **kwargs)
class Pie(Series):
types = {
'start-angle': int,
'animate': bool,
'colours': ColoursList,
'label-colour': Colour,
}
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'pie'
dictionary['colours'] = conf.colours
super(Pie, self).__init__(dictionary, **kwargs)
class Scatter(Series):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'scatter'
super(Scatter, self).__init__(dictionary, **kwargs)
class ScatterLine(Series):
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'scatter_line'
super(ScatterLine, self).__init__(dictionary, **kwargs)
class Shape(OfcDict):
types = {
'type': str,
'colour': Colour,
'values': ShapePointsList
}
def __init__(self, dictionary, **kwargs):
dictionary['type'] = 'shape'
super(Shape, self).__init__(dictionary, **kwargs)
| bsd-2-clause | 6,318,545,894,399,630,000 | 30.566327 | 75 | 0.56942 | false | 3.700359 | false | false | false |
passy/glashammer-rdrei | examples/notes_with_sqla/notes.py | 1 | 4405 | from os.path import dirname, join
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, \
String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import create_session, scoped_session
from wtforms import Form, TextField, TextAreaField, validators
from glashammer.application import make_app
from glashammer.utils import local, local_manager, get_app, redirect, \
url_for, run_very_simple, render_response, Response
FOLDER = dirname(__file__)
Base = declarative_base()
metadata = Base.metadata
db = scoped_session(lambda: create_session(local.application.sqla_db_engine,
autocommit=False), local_manager.get_ident)
# Table, Class and Mapper - declarative style
class Note(Base):
""" Represents a note """
__tablename__ = 'notes'
id = Column(Integer, primary_key=True)
title = Column(String(150))
note = Column(Text)
importance = Column(String(20))
def __init__(self, title, text, importance=None):
self.title = title
self.note = text
self.importance = importance
# The form
class NotesForm(Form):
"""Add/edit form for notes"""
title = TextField(u'Title:', [validators.length(min=4, max=150)])
note = TextAreaField(u'Note:', [validators.length(min=4, max=500)])
importance = TextField(u'Importance:')
# The views
def index_view(req):
notes = db.query(Note).order_by(Note.id.desc()).all()
form = NotesForm(req.form)
return render_response('notes_index.jinja', notes=notes, form=form)
def add_edit_view(req, nid=None):
if nid is None:
form = NotesForm(req.form)
# Validate form
if req.method == 'POST' and form.validate():
# No validation errors, save note and redirect to success page
note = Note(
req.form.get('title'),
req.form.get('note'),
req.form.get('importance')
)
db.add(note)
db.commit()
return redirect(url_for('example/success'))
return render_response('notes_add.jinja', form=form)
else:
# Find note
note = db.query(Note).get(nid)
# Check if note exists
if note is None:
return Response('Not Found', status=404)
# Form with values
form = NotesForm(req.form,
title = note.title,
note = note.note,
importance = note.importance
)
# Validate form
if req.method == 'POST' and form.validate():
# No validation errors, update note and redirect to success page
note.title = req.form.get('title')
note.note = req.form.get('note')
note.importance = req.form.get('importance')
db.add(note)
db.commit()
return redirect(url_for('example/success'))
return render_response('notes_edit.jinja', note=note, form=form)
def add_success_view(req):
return render_response('notes_success.jinja')
# Setup
def _get_default_db_uri(app):
db_file = join(app.instance_dir, 'gh.sqlite')
return 'sqlite:///' + db_file
def setup(app):
# Setting up our database
app.add_config_var('sqla_db_uri', str, _get_default_db_uri(app))
app.sqla_db_engine = create_engine(app.cfg['sqla_db_uri'],
convert_unicode=True)
metadata.bind = app.sqla_db_engine
# Function to be run during data setup phase
app.add_data_func(init_data)
# Add the template searchpath
app.add_template_searchpath(FOLDER)
# Add bundles
from glashammer.bundles.htmlhelpers import setup_htmlhelpers
app.add_setup(setup_htmlhelpers)
# Urls
app.add_url('/', 'example/index', view=index_view)
app.add_url('/add', 'example/add', view=add_edit_view)
app.add_url('/add/success', 'example/success', view=add_success_view)
app.add_url('/edit/<int:nid>', 'example/edit', view=add_edit_view)
# Static files
app.add_shared('files', join(FOLDER, 'static'))
def init_data(app):
engine = get_app().sqla_db_engine
metadata.create_all(engine)
# Used by gh-admin
def create_app():
return make_app(setup, FOLDER)
if __name__ == '__main__':
app = create_app()
run_very_simple(app)
| mit | -8,471,366,169,608,996,000 | 32.120301 | 76 | 0.610897 | false | 3.686192 | false | false | false |
jerpat/csmake | csmake/Csmake/Settings.py | 2 | 3889 | # <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
class Setting:
def __init__(self, key, default, description, isFlag, short=None):
self.key = key
self.value = default
self.description = description
self.isFlag = isFlag
self.short = short
if short is None:
self.short = description
def keys(self):
if self.value.has_method("keys"):
return self.value.keys()
else:
[]
self.keys = keys
def __getitem__(self, key):
return self.value[key].value
def __setitem__(self, key, value):
self.value[key].value = value
def getObject(self, key):
return self.value[key]
class Settings:
def initSettings(self, root, settingsDict):
# Settings are in the form [default value, help text, isFlag, short text]
root.value = {}
for (key, value) in settingsDict.iteritems():
if value is not type(dict):
if len(value) == 3:
root.value[key] = Setting(
key, value[0], value[1], value[2])
else:
root.value[key] = Setting(
key, value[0], value[1], value[2], value[3])
else:
self.initSetting(root.value[key], value)
def __init__(self, settingsSeed):
self.allsettings = Setting("<root>", {}, "Root", False, "Root")
self.initSettings(self.allsettings, settingsSeed)
def __getitem__(self, key):
return self.lookupSetting(key).value
def __setitem__(self, key, value):
try:
self.lookupSetting(key).value = value
except KeyError:
self.lookupSetting(key[:-1]).value[key] = Setting(key,value,"",False)
def getObject(self, key):
return self.lookupSetting(key)
def keys(self):
return self.allsettings.value.keys()
def lookupSetting(self, key):
if len(key) == 0:
return self.allsettings
keylist = key.split(':')
keypath = keylist[:-1]
keyelem = keylist[-1]
current = self.allsettings
for part in keypath:
current = current.value[part]
return current.value[keyelem]
def getDescription(self, key):
setting = self.lookupSetting(key)
return setting.description
def isFlag(self, key):
setting = self.lookupSetting(key)
return setting.isFlag
def appendSettings(self, key, newSettings):
"""This will add settings for your CLI to use:
newSettings will be in a
dict(-name-, list(-default-, -description-, -flag-))
form
Where:
name is the option from the command line, file, or json input
default is the default value
description is the description of the setting
flag is a True/False boolean
True means that it doesn't expect a parameter on the CLI
False means a parameter is expected
"""
if len(key) == 0:
self.initSettings(self.allsettings, newSettings)
| gpl-3.0 | -3,048,745,625,793,723,000 | 32.525862 | 81 | 0.589612 | false | 4.306755 | false | false | false |
d-grossman/magichour | magichour/api/local/sample/auditd_driver.py | 2 | 13362 | import glob
import operator
import os
from magichour.api.local.modelgen.preprocess import log_cardinality
from magichour.api.local.sample.steps.evalapply import evalapply_step
from magichour.api.local.sample.steps.event import event_step
from magichour.api.local.sample.steps.genapply import genapply_step
from magichour.api.local.sample.steps.genwindow import genwindow_step
from magichour.api.local.sample.steps.preprocess import preprocess_step
from magichour.api.local.sample.steps.template import template_step
from magichour.api.local.util.log import get_logger, log_time
from magichour.api.local.util.namedtuples import strTimedEvent
from magichour.api.local.util.pickl import write_pickle_file
from magichour.validate.datagen.eventgen import auditd
logger = get_logger(__name__)
def get_auditd_templates(auditd_templates_file):
"""
Helper function to read in auditd type to id mapping and return lookup dictionary
"""
auditd_templates = {}
for line in open(auditd_templates_file):
type_field, id = line.rstrip().split(',')
auditd_templates[type_field] = int(id)
return auditd_templates
@log_time
def run_pipeline(options):
read_lines_kwargs = {'transforms_file': options.transforms_file,
'gettime_auditd': options.auditd,
'type_template_auditd': options.auditd_templates_file,
'ts_start_index': options.ts_start_index,
'ts_end_index': options.ts_end_index,
'ts_format': options.ts_format,
'skip_num_chars': options.skip_num_chars,
'mp': options.mp, }
loglines = []
log_files = []
if options.data_file:
log_files.append(options.data_file)
if options.data_dir:
log_files.extend(glob.glob(os.path.join(options.data_dir, '*')))
if not log_files or (not options.data_file and not options.data_dir):
raise RuntimeError('No input specified/available')
for log_file in log_files:
loglines.extend(
preprocess_step(
log_file,
**read_lines_kwargs))
# count cardinality; print unique lines if verbose and there are actually
# transforms to apply
log_cardinality(loglines,
get_item=operator.attrgetter('processed'),
item_title='Transform',
verbose=options.verbose and options.transforms_file)
if options.save_intermediate:
transformed_lines_file = os.path.join(
options.pickle_cache_dir, "transformed_lines.pickle")
write_pickle_file(loglines, transformed_lines_file)
if read_lines_kwargs.get('type_template_auditd'):
# Read in auditd template definitions
templates = get_auditd_templates(options.auditd_templates_file)
else:
# Generate templates
if options.template_gen == 'logcluster':
logcluster_kwargs = {"support": str(options.template_support)}
templates = template_step(
loglines, "logcluster", **logcluster_kwargs)
elif options.template_gen == 'stringmatch':
templates = template_step(loglines, "stringmatch") # WIP
else:
raise NotImplementedError(
'%s Template generation method not implemented' %
options.template_gen)
if options.save_intermediate:
templates_file = os.path.join(
options.pickle_cache_dir, "templates.pickle")
write_pickle_file(templates, templates_file)
log_cardinality(templates,
item_key=operator.attrgetter('id'),
item_title='Template',
verbose=options.verbose)
timed_templates = genapply_step(loglines, templates, **read_lines_kwargs)
if options.save_intermediate:
timed_templates_file = os.path.join(
options.pickle_cache_dir, "timed_templates.pickle")
write_pickle_file(timed_templates, timed_templates_file)
modelgen_windows = genwindow_step(timed_templates,
window_size=options.gwindow_time,
tfidf_threshold=options.gtfidf_threshold)
if options.save_intermediate:
modelgen_windows_file = os.path.join(
options.pickle_cache_dir, "modelgen_windows.pickle")
write_pickle_file(modelgen_windows, modelgen_windows_file)
if options.event_gen == 'fp-growth':
fp_growth_kwargs = {
"min_support": options.min_support,
"iterations": options.iterations,
"tfidf_threshold": options.tfidf_threshold}
gen_events = event_step(
modelgen_windows,
"fp_growth",
**fp_growth_kwargs)
elif options.event_gen == 'paris':
paris_kwargs = {
"r_slack": options.r_slack,
"num_iterations": options.num_iterations,
"tau": options.tau}
gen_events = event_step(
modelgen_windows,
"paris",
**paris_kwargs) # WIP
elif options.event_gen == 'glove':
glove_kwargs = {
'num_components': options.num_components,
'glove_window': options.glove_window,
'epochs': options.epochs}
gen_events = event_step(
modelgen_windows,
"glove",
verbose=options.verbose,
**glove_kwargs)
elif options.event_gen == 'auditd':
# ignore timed_templates and modelgen_window and pass templates to
# auditd-specific event generator
gen_events = auditd.event_gen(templates)
else:
raise NotImplementedError('%s Not implemented' % options.event_gen)
if options.save_intermediate:
events_file = os.path.join(options.pickle_cache_dir, "events.pickle")
write_pickle_file(gen_events, events_file)
logger.info("Discovered events: %d" % len(gen_events))
if options.verbose:
# Print events and their templates
if read_lines_kwargs.get('type_template_auditd'):
template_list = [(templates[template], template)
for template in templates]
else:
template_list = [(template.id, template) for template in templates]
template_d = {
template_id: template for (
template_id,
template) in template_list}
e = []
for event in sorted(gen_events, key=lambda event: event.id):
ts = ["event_id: %s" % event.id]
for template_id in sorted(event.template_ids):
ts.append("%s: %s" % (template_id, template_d[template_id]))
e.append(ts)
from pprint import pformat
logger.info("\n" + pformat(e))
# compute how many times each template was used (i.e. how many events
# each template appears in)
event_templates = (
template_d[template_id] for event in gen_events for template_id in event.template_ids)
log_cardinality(
event_templates,
item_title='EventTemplate',
item_key=operator.attrgetter('id'),
verbose=options.verbose)
timed_events = evalapply_step(
gen_events,
timed_templates,
window_time=options.awindow_time,
mp=options.mp)
if options.save_intermediate:
timed_events_file = os.path.join(
options.pickle_cache_dir, "timed_events.pickle")
write_pickle_file(timed_events, timed_events_file)
logger.info("Timed events: %d" % len(timed_events))
log_cardinality(
timed_events,
item_title='TimedEvent',
get_item=operator.attrgetter('event_id'),
verbose=options.verbose)
if options.verbose > 1:
# Print timed event summary for -vv
# sort timed_templates in ascending time order
for te in timed_events:
te.timed_templates.sort(key=lambda tt: tt.ts)
if options.sort_events_key == 'time':
# sort timed events in ascending time order (of their first
# occurring timed_template)
timed_event_key = lambda te: te.timed_templates[0].ts
else:
# sort timed events by event id, then by time order
timed_event_key = lambda te: (
te.event_id, te.timed_templates[0].ts)
timed_events.sort(key=timed_event_key)
e = []
for event in timed_events:
s = strTimedEvent(event)
e.append(s)
logger.info("\n" + pformat(e))
logger.info("Done!")
def main():
from argparse import ArgumentParser
import sys
logger.info('args: %s', ' '.join(sys.argv[1:]))
# NOTE: parser.add_argument() default value: default=None
parser = ArgumentParser()
parser.add_argument(
'-f',
'--data-file',
dest="data_file",
help="Input log file")
parser.add_argument(
'-d',
'--data-dir',
dest="data_dir",
help="Input log directory")
parser.add_argument(
'-t',
'--transforms-file',
dest="transforms_file",
help="Transforms mapping file")
parser.add_argument(
'--template-gen',
choices=[
'logcluster',
'stringmatch'],
required=True)
parser.add_argument(
'--event-gen',
choices=[
'fp-growth',
'paris',
'glove',
'auditd'],
required=True)
source_args = parser.add_argument_group('Source-specific Parameters')
source_args.add_argument(
'--auditd',
default=False,
help='Input is auditd logs',
action="store_true") # for now, this just means read auditd-timestamps
source_args.add_argument(
'--auditd_templates_file',
dest="auditd_templates_file",
help="CSV Mapping Auditd types to ids (if not specified Templates will be auto-generated)")
source_args.add_argument(
'--skip_num_chars',
default=0,
help='skip characters at beginning of each line')
source_args.add_argument(
'--ts_start_index',
default=0,
help='start of timestamp (after skipping)')
source_args.add_argument(
'--ts_end_index',
default=12,
help='end of timestamp (after skipping)')
source_args.add_argument(
'--ts_format',
help='datetime.strptime() format string')
control_args = parser.add_argument_group('General Control Parameters')
control_args.add_argument(
'-w',
'--gwindow_time',
default=60,
help='Event model generate window size (seconds)')
control_args.add_argument(
'--gtfidf_threshold',
default=None, # default = don't apply tfidf to model generation
help='Event model generation tf_idf threshold')
control_args.add_argument(
'--awindow_time',
default=60,
help='Event application window size (seconds)')
control_args.add_argument(
'--template-support',
default=50,
help='# occurrences required to generate a Template')
control_args.add_argument(
'--mp',
default=False,
help='Turn on multi-processing',
action='store_true')
fp_growth_args = parser.add_argument_group('FP-Growth Control Parameters')
fp_growth_args.add_argument('--min_support', default=0.03, help='?')
fp_growth_args.add_argument(
'--iterations',
default=-1,
help='Number of itemsets to produce (-1 == all)')
fp_growth_args.add_argument('--tfidf_threshold', default=0, help='?')
paris_args = parser.add_argument_group('PARIS Control Parameters')
paris_args.add_argument(
'--r_slack',
default=0,
help='cost function parameter')
paris_args.add_argument('--num_iterations', default=3, help='?')
paris_args.add_argument(
'--tau',
default=1.0,
help='cost function parameter')
glove_args = parser.add_argument_group('Glove Control Parameters')
glove_args.add_argument(
'--num_components',
default=16,
help='?')
glove_args.add_argument(
'--glove_window',
default=10,
help='?')
glove_args.add_argument(
'--epochs',
default=20,
help='?')
optional_args = parser.add_argument_group('Debug Arguments')
optional_args.add_argument(
'-v',
'--verbose',
dest='verbose',
default=False,
action="count",
help="Print definitions")
optional_args.add_argument(
'--sort-events-key',
choices=[
'time',
'event'],
default='time',
help="Sort events by time or event-id.")
optional_args.add_argument(
"--save-intermediate",
dest="save_intermediate",
default=False,
action="store_true",
help="Save intermediate files which may result in large files")
optional_args.add_argument(
'--pickle_dir',
dest="pickle_cache_dir",
help="Directory for intermediate files")
options = parser.parse_args()
run_pipeline(options)
if __name__ == "__main__":
main()
| apache-2.0 | 3,252,611,154,578,878,500 | 34.823056 | 99 | 0.596318 | false | 4.039299 | false | false | false |
stahlnow/stahlnow | website/apps/gallery/models.py | 1 | 1799 | # encoding: utf-8
import os
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django_extensions.db.fields import *
from django.db.models.signals import post_delete, pre_save
from django.dispatch import receiver
from taggit.managers import TaggableManager
from filer.fields.image import FilerImageField
class Image(models.Model):
uuid = UUIDField()
created = CreationDateTimeField()
updated = ModificationDateTimeField()
image = FilerImageField(blank=True, null=True)
caption = models.TextField(_('caption'), blank=True, null=True)
link = models.URLField(_('link'), blank=True, null=True)
gallery = models.ForeignKey('Gallery', related_name='images', null=True, blank=True)
class Meta:
verbose_name = _('image')
verbose_name_plural = _('images')
db_table = 'gallery_images'
ordering = ('-created',)
get_latest_by = 'created'
def __unicode__(self):
return self.image.file.name
def image_tag(self):
return u'<img src="%s" width="400px" />' % (settings.FILES_URL + self.image.file.name)
image_tag.short_description = 'Image'
image_tag.allow_tags = True
class Gallery(models.Model):
uuid = UUIDField()
created = CreationDateTimeField()
updated = ModificationDateTimeField()
name = models.CharField(max_length=1024)
class Meta:
verbose_name = _('gallery')
verbose_name_plural = _('galleries')
db_table = 'gallery_galleries'
ordering = ('-created',)
def __unicode__(self):
return self.name
@property
def images(self):
if self.images.exists():
return self.images.all() | mit | -7,918,202,679,409,547,000 | 28.508197 | 94 | 0.670372 | false | 3.927948 | false | false | false |
EUDAT-B2STAGE/http-api-base | flask_ext/flask_neo4j.py | 1 | 1810 | # -*- coding: utf-8 -*-
""" Neo4j GraphDB flask connector """
import socket
import neo4j
from neomodel import db, config
from flask_ext import BaseExtension, get_logger
from rapydo.utils.logs import re_obscure_pattern
log = get_logger(__name__)
class NeomodelClient():
def __init__(self, db):
self.db = db
def cypher(self, query):
""" Execute normal neo4j queries """
from neomodel import db
try:
results, meta = db.cypher_query(query)
except Exception as e:
raise Exception(
"Failed to execute Cypher Query: %s\n%s" % (query, str(e)))
return False
# log.debug("Graph query.\nResults: %s\nMeta: %s" % (results, meta))
return results
class NeoModel(BaseExtension):
def set_connection_exception(self):
return (
socket.gaierror,
neo4j.bolt.connection.ServiceUnavailable
)
def custom_connection(self, **kwargs):
if len(kwargs) > 0:
variables = kwargs
else:
variables = self.variables
self.uri = "bolt://%s:%s@%s:%s" % \
(
# User:Password
variables.get('user', 'neo4j'),
variables.get('password'),
# Host:Port
variables.get('host'),
variables.get('port'),
)
log.very_verbose("URI IS %s" % re_obscure_pattern(self.uri))
config.DATABASE_URL = self.uri
# Ensure all DateTimes are provided with a timezone
# before being serialised to UTC epoch
config.FORCE_TIMEZONE = True # default False
db.url = self.uri
db.set_connection(self.uri)
client = NeomodelClient(db)
return client
# return db
| mit | -8,237,820,969,986,270,000 | 25.617647 | 76 | 0.556906 | false | 3.926247 | false | false | false |
pombredanne/btnet | grabmakeimage.py | 1 | 4877 | #!/usr/bin/python
import urllib2
import urllib
import re
from BeautifulSoup import BeautifulSoup
import requests
# from stegano import slsb
import random
import os
import json
#gets pic from reddit.com/r/pics and saves as temp.png
def getpic():
num = random.randint(0, 24)
page = BeautifulSoup(urllib2.urlopen("http://www.reddit.com/r/pics/new"))
i = re.findall('(?<=<a class="thumbnail " href=")([^"]*)', str(page))
if i:
url = i[num]
#adds jpg to end of file if need be
if url.find(".jpg") == -1 and url.find(".png") == -1:
url = url + ".jpg"
print url
r = urllib2.urlopen(url)
f = open('images/temp.jpg', 'wb+')
f.write(r.read())
f.close()
else:
print "shits broke yo"
#stegs some stuff into image and saves image
def stegstuffs():
os.system("steghide embed -ef cryptmessage.txt -cf images/temp.jpg -p asd\n 2>errors")
f = open('errors', 'rw')
if "steghide" in f.read():
f.write("")
f.close()
raise Exception("lol error")
#os.system("steghide extract -sf images/temp.jpg -p asd\n")
#message = "(signature) #Internal intergrity check\n(signature of previous instruction)\n(signature of next instructions):(signature):(signature):(signature):....\n(pool index):(pool boolean) #Experimental pool finder and adhoc'r\n\n(Next location):(location):(location):(location):(location):(location):(location):...\n(previous instruction location) #doubly linked list\n(ghost pool) #backup pool also triply linked list :)\n\n(instructions)#one per line\nS #A single Captial S represents beginning of instruction set\n(1instruction)\n(2instruction)\n(3instruction)\n(4instruction)\n ...\nE #A single Captial E represents end of instruction set\n\n(key to decrypt next message) # optional can help obstuficate and make the process more difficult"
#secret = slsb.hide("images/temp.jpg", message)
#secret.save("images/tempsteg.jpg")
#datagram = str(slsb.reveal("images/tempsteg.jpg"))
#uploads image to imgur (need to figure out how imgur.com/upload post stuff works)
class banned(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def uploadpic():
url = 'http://imgur.com/upload'
payload = {'file': open('images/temp.jpg', 'rb')}
headers = {'Host': 'imgur.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive'}
r = requests.post(url, files=payload, headers=headers)
if str(r.text).find("Exception_OverUploadLimits") > 1:
raise banned(1)
test = json.loads(r.text)
print test['data']['hash']
def encrypt_RSA(key, message):
'''
param: public_key_loc Path to public key
param: message String to be encrypted
return base64 encoded encrypted string
'''
from M2Crypto import RSA, BIO
#key = open(public_key_loc, "r").read()
pubkey = str(key).encode('utf8')
bio = BIO.MemoryBuffer(pubkey)
rsa = RSA.load_pub_key_bio(bio)
encrypted = rsa.public_encrypt(message, RSA.pkcs1_oaep_padding)
return encrypted.encode('base64')
def hash_and_encrypt_Mod(message):
"""
Encrypts datagram payload. Also prepends hash of plaintext for integrity checking.
"""
import hashlib
s = hashlib.sha512()
s.update(message)
hash = s.hexdigest()
fo = open("rootkeys/public.txt", "rb")
publickey = fo.read()
fo.close()
crypttext = encrypt_RSA(publickey,message)
fo = open("cryptmessage.txt","wb")
fo.write(hash + crypttext)
fo.close()
return crypttext
while True:
try:
getpic()
except:
continue
break
while True:
try:
example_datagram = '''{
"prev_sig":"(signature of previous instruction)",
"pool_num":"(pool number)",
"instrs":"c3R1ZmYgPSAnaGVsbG8gd29ybGQnDQppZiBzdHVmZls6Nl0gPT0gJ2hlbGxvJzoNCiAgICBwcmludCAnaXQgaXMgY29vbCcNCmVsc2U6DQogICAgcHJpbnQgJ2dvZGRhbWl0Jw==",
"maint_instrs":"c3R1ZmYgPSAnaGVsbG8gd29ybGQnDQppZiBzdHVmZls6Nl0gPT0gJ2hlbGxvJzoNCiAgICBwcmludCAnaXQgaXMgY29vbCcNCmVsc2U6DQogICAgcHJpbnQgJ2dvZGRhbWl0Jw==",
"next_key":"(keyifweneedit)",
"next_locs":[
[{"check_time":"(EPOCH time)"}, {"loc":"(location)"},{"sig":"(signature)"}],
[{"check_time":"(EPOCH time)"}, {"loc":"(location)"},{"sig":"(signature)"}],
[{"check_time":"(EPOCH time)"}, {"loc":"(location)"},{"sig":"(signature)"}],
[{"check_time":"(EPOCH time)"}, {"loc":"(location)"},{"sig":"(signature)"}]
],
"prev_loc":"(previous location)",
"ghost_pool_locs":[
{"loc":"(location)"},
{"loc":"(location)"}
],
"gen_time":"1395082897"
}
'''
hash_and_encrypt_Mod(example_datagram)
stegstuffs()
except:
continue
break
while True:
try:
uploadpic()
except banned as e:
print "we got banned yo"
except:
continue
break
#cleansup
#os.remove("images/temp.png")
#os.remove("images/tempsteg.png")
| gpl-2.0 | -3,028,814,313,174,884,400 | 31.513333 | 749 | 0.691819 | false | 2.706437 | false | false | false |
fungs/mglex | mglex/models/aggregate.py | 1 | 3541 | # This file is subject to the terms and conditions of the GPLv3 (see file 'LICENSE' as part of this source code package)
u"""
This file holds all the functions and types necessary for the aggregate likelihood model.
"""
__author__ = "[email protected]"
from .. import common, types
import numpy as np
from sys import argv, exit, stdin, stdout, stderr, exit
class AggregateData(list): # TODO: rename CompositeData
def __init__(self, *args, **kwargs):
super(AggregateData, self).__init__(*args, **kwargs)
# try:
# self.sizes = np.asarray(sizes, dtype=self.size_type)
# except TypeError:
# self.sizes = np.fromiter(sizes, dtype=self.size_type)[:, np.newaxis]
def deposit(self, features):
# self.names.append(name)
for d, f in zip(self, features):
# print(d, f)
d.deposit(f)
def prepare(self):
for d in self:
d.prepare()
return self
@property
def num_data(self):
if not super(AggregateData, self).__len__():
return 0
# print(self, file=stderr)
num = self[0].num_data
assert num == len(self[0])
for l in self[1:]:
assert(l.num_data == num)
return num
@property
def num_features(self):
return super(AggregateData, self).__len__()
size_type = types.seqlen_type
class AggregateModel(list): # TODO: rename CompositeModel, implement update() and maximize_likelihood()
def __init__(self, *args, **kw):
super(AggregateModel, self).__init__(*args, **kw)
self.beta_correction = 1.0
@property
def names(self):
if len(self) > 1:
component_names = list(zip(*[m.names for m in self]))
return [",".join(t) for t in component_names]
return self[0].names
@property
def num_components(self): # transitional
if not len(self):
return 0
cluster_num = self[0].num_components
assert np.all(np.equal(cluster_num, [model.num_components for model in self[1:]]))
return cluster_num
def log_likelihood(self, data):
#assert self.weights.size == len(self)
ll_scale = np.asarray([m.stdev if m.stdev > 0.0 else 0.1 for m in self]) # stdev of zero is not allowed, quick workaround!
ll_weights = self.beta_correction*(ll_scale.sum()/ll_scale.size**2)/ll_scale
ll_per_model = np.asarray([w*m.log_likelihood(d) for (m, d, w) in zip(self, data, ll_weights)]) # TODO: reduce memory usage, de-normalize scale
s = np.mean(np.exp(ll_per_model), axis=1) # TODO: remove debug calculations
l = np.sum(ll_per_model, axis=1, dtype=types.large_float_type) # TODO: remove debug calculations
for m, mvec, lvec in zip(self, s, l): # TODO: save memory
stderr.write("LOG %s: average likelihood %s *** %s\n" % (m._short_name, common.pretty_probvector(mvec), common.pretty_probvector(lvec)))
loglike = np.sum(ll_per_model, axis=0, keepdims=False) # TODO: serialize
return loglike
def maximize_likelihood(self, data, responsibilities, weights, cmask=None):
loglikelihood = np.zeros(shape=(data.num_data, self.num_components), dtype=types.logprob_type)
return_value = False
for m, d in zip(self, data):
ret, ll = m.maximize_likelihood(d, responsibilities, weights, cmask)
ll = loglikelihood + ll
return_value = return_value and ret
return return_value, loglikelihood | gpl-3.0 | 3,050,839,692,751,011,000 | 35.895833 | 152 | 0.612821 | false | 3.569556 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.