repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aamirmajeedkhan/P4-conference-central | conference.py | 1 | 35026 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime,time
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import SessionType,Speaker,Session,SessionForm,SessionForms
from models import SessionQueryForm,SessionQueryForms
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
from models import Conference
from models import ConferenceForm
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
CONF_FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
SESSION_FIELDS = {
'NAME': 'name',
'DURATION': 'duration',
'TYPE_OF_SESSION': 'typeOfSession',
'Date': 'date',
'START_TIME':'startTime',
'SPEAKER':'speaker',
}
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
from models import BooleanMessage
from models import ConflictException
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1, required=True),
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1, required=True)
)
SESSION_TYPE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1, required=True),
sessionType=messages.StringField(2, required=True)
)
SESSION_WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1, required=True)
)
SESSION_REQUIRED_FIELDS = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
MEMCACHE_ANNOUNCEMENTS_KEY="LATEST_ANNOUNCEMENT"
MEMCACHE_FEATURED_SPEAKER_KEY="FEATURED_SPEAKER"
from google.appengine.api import memcache
from models import StringMessage
from google.appengine.api import taskqueue
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# retrieve profile from datastore
user_id=getUserId(user)
p_key = ndb.Key(Profile,user_id)
profile = p_key.get()
# create profile if not exist
if not profile:
profile = Profile(
key = p_key, # TODO 1 step 4. replace with the key from step 3
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf,names[conf.organizerUserId]) \
for conf in conferences]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# make profile key
p_key = ndb.Key(Profile, getUserId(user))
# create ancestor query for this user
conferences = Conference.query(ancestor=p_key)
# get the user profile and display name
prof = p_key.get()
displayName = getattr(prof, 'displayName')
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, displayName) for conf in conferences]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
q = Conference.query()
# simple filter usage:
# q = q.filter(Conference.city == "Paris")
# advanced filter building and usage
field = "city"
operator = "="
value = "London"
f = ndb.query.FilterNode(field, operator, value)
q = q.filter(f)
q=q.order(Conference.maxAttendees)
# filter for month of june
q=q.filter(Conference.maxAttendees > 6)
# TODO
# add 2 filters:
# 1: city equals to London
# 2: topic equals "Medical Innovations"
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters,type='Conference'):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
if type == 'Conference':
filtr["field"] = CONF_FIELDS[filtr["field"]]
elif type == 'Session':
filtr["field"] = SESSION_FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
#@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser()
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# retrieve organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf,
names[conf.organizerUserId])\
for conf in conferences]
)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ""
return StringMessage(data=announcement)
# - - - Conference Session - - - - - - - - - - - - - - - - - - - -
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET',
name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Given a conference, return all sessions"""
# get Conference object from request
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# Return set of SessionForm belong to Conference
return SessionForms(items=[self._copySessionToForm(session) for session in conf.sessions])
@endpoints.method(SESSION_TYPE_GET_REQUEST,
SessionForms,
path='conference/{websafeConferenceKey}/sessions/type/{sessionType}',
http_method='GET',
name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Given a conference, return all sessions of a specified type (eg lecture, keynote, workshop)"""
# get Conference object from request
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
# filter sessions by session type
sessions = conf.sessions.filter(Session.typeOfSession == str(request.sessionType))
# Return a set of SessionForm objects per session
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(SESSION_SPEAKER_GET_REQUEST,
SessionForms,
path='sessions/speaker/{speaker}',
http_method='GET',
name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Given a speaker, return all sessions given by this particular speaker, across all conferences"""
#filter session by speaker
sessions = Session.query(Session.speaker == Speaker(name=request.speaker))
# Return a set of SessionForm objects per session
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
def _createSessionObject(self, sessionForm):
"""Create Session object, return SessionForm."""
# ensure user is authenticated
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get the conference
conf = ndb.Key(urlsafe=sessionForm.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % sessionForm.conferenceKey)
# ensure ownership
if getUserId(user) != conf.organizerUserId:
raise endpoints.ForbiddenException('Only organizer of conference : %s can add sessions.' % conf.name)
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(sessionForm, field.name) for field in sessionForm.all_fields()}
# convert typeOfsession to string
if data['typeOfSession']:
data['typeOfSession']=str(data['typeOfSession'])
else:
data['typeOfSession']=str(SessionType.NOT_SPECIFIED)
del data['websafeKey']
del data['websafeConferenceKey']
# check required fields
for key in SESSION_REQUIRED_FIELDS:
if not data[key]:
raise endpoints.BadRequestException("'%s' field is required to create a session." % key)
# convert date string to a datetime object.
try:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
except (TypeError, ValueError):
raise endpoints.BadRequestException("Invalid date format. Please use 'YYYY-MM-DD'")
# convert date string to a time object. HH:MM
try:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
except (TypeError, ValueError):
raise endpoints.BadRequestException("Invalid date format. Please use 'HH:MM'")
if data['duration'] <= 0:
raise endpoints.BadRequestException("Duration must be greater than zero")
#session must be within conference start and end date only when dates
#defined at the time of conference creation
if conf.startDate and conf.endDate :
if data['date'] < conf.startDate or data['date'] > conf.endDate:
raise endpoints.BadRequestException("Session must be within range of conference start and end date")
data['speaker'] = Speaker(name=data['speaker'])
# Datastore to allocate an ID.
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
# Datastore returns an integer ID that we can use to create a session key
data['key'] = ndb.Key(Session, s_id, parent=conf.key)
# Add session to datastore
session = Session(**data)
session.put()
# Add a task to check and update new featured speaker
taskqueue.add(
params={'websafeConferenceKey': conf.key.urlsafe(), 'speaker': session.speaker.name},
url='/tasks/set_featured_speaker'
)
return self._copySessionToForm(session)
@endpoints.method(SESSION_POST_REQUEST,
SessionForm,
path='conference/sessions/{websafeConferenceKey}',
http_method='POST',
name='createSession')
def createSession(self, request):
"""Creates a session, open to the organizer of the conference"""
return self._createSessionObject(request)
def _copySessionToForm(self,session):
"""Copy fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date to date string; just copy others
if field.name.endswith('date'):
setattr(sf, field.name, getattr(session, field.name).strftime('%Y-%m-%d'))
elif field.name.endswith('startTime'):
setattr(sf, field.name, getattr(session, field.name).strftime('%H:%M'))
elif field.name.endswith('speaker'):
setattr(sf, field.name, session.speaker.name)
elif field.name.endswith('typeOfSession'):
setattr(sf, field.name, getattr(SessionType, getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
@endpoints.method(SESSION_WISHLIST_POST_REQUEST,
BooleanMessage,
path='profile/wishlist/{websafeSessionKey}',
http_method='POST',
name='addSessionToWishlist')
@ndb.transactional(xg=True)
def addSessionToWishlist(self, request):
"""adds the session to the user's list of sessions they are interested in attending"""
# get user Profile
prof = self._getProfileFromUser()
# get session and check if it exists
key = ndb.Key(urlsafe=request.websafeSessionKey)
session = key.get()
if not session:
raise endpoints.BadRequestException("Session with key %s doesn't exist" % request.sessionKey)
# ensure is not already in user's wishlist
if key in prof.wishList:
raise ConflictException("This session is already in user's wishlist")
# add session to user's list
prof.wishList.append(key)
prof.put()
return BooleanMessage(data=True)
@endpoints.method(message_types.VoidMessage,
SessionForms,
path='profile/wishlist/all',
http_method='GET',
name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""query for all the sessions in a conference that the user is interested in"""
# get user Profile
prof = self._getProfileFromUser()
# get all sessions in user's wishlist
sessions = ndb.get_multi(prof.wishList)
# return a set of `SessionForm` objects
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(SESSION_WISHLIST_POST_REQUEST,
BooleanMessage,
path='profile/wishlist/{websafeSessionKey}',
http_method='DELETE',
name='deleteSessionInWishlist')
@ndb.transactional()
def deleteSessionInWishlist(self, request):
"""removes the session from the user’s list of sessions they are interested in attending"""
# get user Profile
prof = self._getProfileFromUser()
key = ndb.Key(urlsafe=request.websafeSessionKey)
# get session the session key and check if it exists in user's wish list
if key not in prof.wishList:
raise endpoints.BadRequestException("Failed to find session in user's wishlist")
# remove session from user's wishlist
prof.wishList.remove(key)
prof.put()
return BooleanMessage(data=True)
#additional query endpoint
@endpoints.method(message_types.VoidMessage,
SessionForms,
path='conference/sessions/hour',
http_method='GET',
name='gethourSessions')
def gethourSessions(self,request):
""" Return all sessions that are of an hour or less """
sessions = Session.query(Session.duration <= 60)
#here duration is specified in minutes
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
def _getSessionQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Session.query()
inequality_filter, filters = self._formatFilters(request.filters,type='Session')
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Session.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Session.name)
for filtr in filters:
if filtr["field"] in ["duration"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
@endpoints.method(SessionQueryForms,
SessionForms,
path='querySessions',
http_method='POST',
name='querySessions')
def querySessions(self, request):
"""Query for sessions."""
# use `SESSION_FIELDS` to construct query.
sessions = self._getSessionQuery(request)
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
#special query problem
@endpoints.method(SESSION_POST_REQUEST,
SessionForms,
path='conference/{websafeConferenceKey}/sessions/typewithtime',
http_method='GET',
name='getTypewithTime')
def getTypewithTime(self,request):
"""Special query that handle couple of inequalities"""
wck=request.websafeConferenceKey
# get conference object
confKey=ndb.Key(urlsafe=wck)
if not confKey.get():
raise endpoints.NotFoundException('No conference found with key : %s' % wck)
query=Session.query(ancestor=confKey)
query=query.filter(Session.typeOfSession != str(SessionType.workshop))
query=query.order(Session.typeOfSession)
query=query.order(Session.date)
query=query.order(Session.startTime)
results=[session for session in query if session.startTime < time(19)]
return SessionForms(items=[self._copySessionToForm(session) for session in results])
@endpoints.method(message_types.VoidMessage,
StringMessage,
path='conference/featured_speakers/get',
http_method='GET',
name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Returns featured speaker along with their sessions from memcache"""
return StringMessage(data=memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) or "")
# registers API
api = endpoints.api_server([ConferenceApi])
| apache-2.0 | 3,213,191,701,473,363,000 | 38.264574 | 116 | 0.613094 | false |
foursquare/fsqio | src/jvm/io/fsq/twofishes/scripts/match-flickr.py | 1 | 2018 | #!/usr/bin/python
import sys
import csv
import urllib
import urllib2
import json
import geojson
output = {}
files = sys.argv[1:]
for f in files:
fdata = open(f).read()
try:
data = geojson.loads(fdata)
except:
print 'failed to parse: ' + fdata
continue
for feature in data['features']:
woeid = str(feature['properties']['woe_id'])
label = feature['properties']['label']
woetype = int(feature['properties']['place_type_id'])
bbox = feature['geometry']['bbox']
url = u"http://localhost:8081/?query=%s&woeHint=%s" % (urllib.quote(label.encode('utf-8')), woetype)
try:
response = urllib2.urlopen(url)
data = response.read()
except:
print url
print "Unexpected error:", sys.exc_info()[0]
continue
jsonData = json.loads(data)
geocodes = False
match = False
for interp in jsonData['interpretations']:
if interp['what']:
break
fwoetype = interp['feature']['woeType']
geocodes = True
center = interp['feature']['geometry']['center']
if (
center['lat'] >= bbox[1] and
center['lat'] <= bbox[3] and
center['lng'] >= bbox[0] and
center['lng'] <= bbox[2]
):
match = True
geonameids = filter(lambda i: i['source'] == 'geonameid', interp['feature']['ids'])
if len(geonameids):
id = geonameids[0]['id']
if ((id not in output) or (output[id][0] == False)):
lowlng = bbox[0]
lowlat = bbox[1]
hilng = bbox[2]
hilat = bbox[3]
output[id] = (fwoetype == woetype, '%s\t%s\t%s\t%s\t%s' % (id, lowlng, lowlat, hilng, hilat))
if not geocodes:
print (u'No geocodes for %s %s' % (woeid, label)).encode('utf-8')
elif not match:
print (u'Geocodes, but no match for %s: %s' % (woeid, label)).encode('utf-8')
print bbox
print '\t' + url
outfile = open('flickr-bbox.tsv', 'w')
for k in output:
outfile.write('%s\n' % output[k][1])
| apache-2.0 | -910,351,895,282,670,600 | 25.552632 | 106 | 0.564916 | false |
botswana-harvard/edc-lab | old/lab_clinic_api/migrations/0001_initial.py | 1 | 31522 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 13:52
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import django_revision.revision_field
import edc_base.model.fields.custom_fields
import edc_base.model.fields.hostname_modification_field
import edc_base.model.fields.userfield
import edc_base.model.fields.uuid_auto_field
import edc_base.model.validators.date
class Migration(migrations.Migration):
initial = True
dependencies = [
('edc_registration', '0002_auto_20160503_1604'),
]
operations = [
migrations.CreateModel(
name='Aliquot',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('aliquot_identifier', models.CharField(editable=False, help_text='Aliquot identifier', max_length=25, unique=True, verbose_name='Aliquot Identifier')),
('aliquot_datetime', models.DateTimeField(default=datetime.datetime(2016, 5, 7, 13, 51, 55, 444847), verbose_name='Date and time aliquot created')),
('count', models.IntegerField(editable=False, null=True)),
('medium', models.CharField(choices=[('tube_any', 'Tube'), ('tube_edta', 'Tube EDTA'), ('swab', 'Swab'), ('dbs_card', 'DBS Card')], default='TUBE', max_length=25, verbose_name='Medium')),
('original_measure', models.DecimalField(decimal_places=2, default='5.00', max_digits=10)),
('current_measure', models.DecimalField(decimal_places=2, default='5.00', max_digits=10)),
('measure_units', models.CharField(choices=[('mL', 'mL'), ('uL', 'uL'), ('spots', 'spots'), ('n/a', 'Not Applicable')], default='mL', max_length=25)),
('status', models.CharField(choices=[('available', 'available'), ('consumed', 'consumed')], default='available', max_length=25)),
('comment', models.CharField(blank=True, max_length=50, null=True)),
('subject_identifier', models.CharField(editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('is_packed', models.BooleanField(default=False, verbose_name='packed')),
('receive_identifier', models.CharField(editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('import_datetime', models.DateTimeField(editable=False, null=True)),
],
options={
'ordering': ('receive', 'count'),
},
),
migrations.CreateModel(
name='AliquotCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('name', models.CharField(db_index=True, help_text='(suggest 40 characters max.)', max_length=250, null=True, unique=True, verbose_name='Name')),
('short_name', models.CharField(db_index=True, help_text='This is the stored value, required', max_length=250, null=True, unique=True, verbose_name='Stored value')),
('display_index', models.IntegerField(db_index=True, default=0, help_text='Index to control display order if not alphabetical, not required', verbose_name='display index')),
('field_name', models.CharField(blank=True, editable=False, help_text='Not required', max_length=25, null=True)),
('version', models.CharField(default='1.0', editable=False, max_length=35)),
],
),
migrations.CreateModel(
name='AliquotType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('name', models.CharField(max_length=50, verbose_name='Description')),
('alpha_code', models.CharField(max_length=15, unique=True, validators=[django.core.validators.RegexValidator('^[A-Z]{2,15}$')], verbose_name='Alpha code')),
('numeric_code', models.CharField(max_length=2, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]{2}$')], verbose_name='Numeric code (2-digit)')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Order',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('order_identifier', models.CharField(db_index=True, editable=False, help_text='Allocated internally', max_length=25, unique=True, verbose_name='Order number')),
('order_datetime', models.DateTimeField(db_index=True, validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Order Date')),
('status', models.CharField(choices=[('PENDING', 'Pending'), ('PARTIAL', 'Partial'), ('COMPLETE', 'Complete'), ('ERROR', 'Error'), ('REDRAW', 'Redraw'), ('WITHDRAWN', 'Withdrawn'), ('DUPLICATE', 'Duplicate')], max_length=25, null=True, verbose_name='Status')),
('comment', models.CharField(blank=True, max_length=150, null=True, verbose_name='Comment')),
('import_datetime', models.DateTimeField(null=True)),
('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('aliquot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Aliquot')),
],
options={
'ordering': ['order_identifier'],
},
),
migrations.CreateModel(
name='Panel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('name', models.CharField(db_index=True, max_length=50, unique=True, verbose_name='Panel Name')),
('comment', models.CharField(blank=True, max_length=250, verbose_name='Comment')),
('edc_name', models.CharField(max_length=50, null=True)),
('panel_type', models.CharField(choices=[('TEST', 'Test panel'), ('STORAGE', 'Storage panel')], default='TEST', max_length=15)),
('aliquot_type', models.ManyToManyField(help_text='Choose all that apply', to='lab_clinic_api.AliquotType')),
],
),
migrations.CreateModel(
name='Receive',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('receive_identifier', models.CharField(db_index=True, editable=False, max_length=25, null=True, unique=True, verbose_name='Receiving Identifier')),
('requisition_identifier', models.CharField(blank=True, db_index=True, max_length=25, null=True, verbose_name='Requisition Identifier')),
('drawn_datetime', models.DateTimeField(db_index=True, validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Date and time drawn')),
('receive_datetime', models.DateTimeField(db_index=True, default=datetime.datetime(2016, 5, 7, 13, 51, 55, 407698), validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Date and time received')),
('visit', models.CharField(max_length=25, verbose_name='Visit Code')),
('clinician_initials', edc_base.model.fields.custom_fields.InitialsField(help_text='Type 2-3 letters, all in uppercase and no spaces', max_length=3, verbose_name='Initials')),
('receive_condition', models.CharField(max_length=50, null=True, verbose_name='Condition of primary tube')),
('import_datetime', models.DateTimeField(null=True)),
('registered_subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='edc_registration.RegisteredSubject')),
],
),
migrations.CreateModel(
name='Result',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('result_identifier', models.CharField(db_index=True, editable=False, max_length=25)),
('result_datetime', models.DateTimeField(db_index=True, help_text='Date result added to system.')),
('release_status', models.CharField(choices=[('NEW', 'New'), ('RELEASED', 'Released'), ('AMENDED', 'Amended')], db_index=True, default='NEW', max_length=25)),
('release_datetime', models.DateTimeField(blank=True, db_index=True, help_text='Date result authorized for release. This field will auto-fill if release status is changed', null=True)),
('release_username', models.CharField(blank=True, db_index=True, help_text='Username of person authorizing result for release. This field will auto-fill if release status is changed', max_length=50, null=True, verbose_name='Release username')),
('comment', models.CharField(blank=True, max_length=50, null=True, verbose_name='Comment')),
('dmis_result_guid', models.CharField(blank=True, editable=False, help_text='dmis import value. N/A unless data imported from old system', max_length=36, null=True)),
('import_datetime', models.DateTimeField(null=True)),
('reviewed', models.BooleanField(default=False)),
('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Order')),
],
options={
'ordering': ['result_identifier'],
},
),
migrations.CreateModel(
name='ResultItem',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('result_item_value', models.CharField(db_index=True, max_length=25, verbose_name='Result')),
('result_item_value_as_float', models.FloatField(db_index=True, editable=False, null=True, verbose_name='Numeric result')),
('result_item_quantifier', models.CharField(choices=[('=', '='), ('>', '>'), ('>=', '>='), ('<', '<'), ('<=', '<=')], default='=', max_length=25, verbose_name='Quantifier')),
('result_item_datetime', models.DateTimeField(db_index=True, verbose_name='Assay date and time')),
('result_item_operator', models.CharField(blank=True, db_index=True, max_length=50, null=True, verbose_name='Operator')),
('grade_range', models.CharField(blank=True, max_length=25, null=True)),
('grade_flag', models.CharField(blank=True, max_length=5, null=True)),
('grade_message', models.CharField(blank=True, max_length=50, null=True)),
('grade_warn', models.BooleanField(default=False)),
('reference_flag', models.CharField(blank=True, max_length=5, null=True)),
('reference_range', models.CharField(blank=True, max_length=25, null=True)),
('validation_status', models.CharField(choices=[('P', 'Preliminary'), ('F', 'Final'), ('R', 'Rejected')], db_index=True, default='P', help_text='Default is preliminary', max_length=10, verbose_name='Status')),
('validation_datetime', models.DateTimeField(blank=True, db_index=True, null=True)),
('validation_username', models.CharField(blank=True, db_index=True, max_length=50, null=True, verbose_name='Validation username')),
('validation_reference', models.CharField(blank=True, max_length=50, null=True, verbose_name='Validation reference')),
('comment', models.CharField(blank=True, max_length=50, null=True, verbose_name='Validation Comment')),
('error_code', models.CharField(blank=True, max_length=50, null=True, verbose_name='Error codes')),
('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)),
('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)),
('import_datetime', models.DateTimeField(null=True)),
('subject_type', models.CharField(max_length=25, null=True)),
('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Result')),
],
options={
'ordering': ('-result_item_datetime',),
},
),
migrations.CreateModel(
name='Review',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)),
('title', models.CharField(editable=False, max_length=50)),
('review_datetime', models.DateTimeField(null=True)),
('review_status', models.CharField(choices=[('REQUIRES_REVIEW', 'Requires Review'), ('REVIEWED', 'Reviewed')], max_length=25)),
('comment', models.TextField(blank=True, max_length=500, null=True)),
],
options={
'ordering': ['review_datetime'],
},
),
migrations.CreateModel(
name='TestCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('code', models.CharField(max_length=15, unique=True, validators=[django.core.validators.RegexValidator('^[A-Z0-9\\%\\_\\-]{1,15}$', 'Ensure test code is uppercase alphanumeric ( with _ ,%) and no spaces')], verbose_name='Test Code')),
('name', models.CharField(max_length=50, verbose_name='Test Code Description')),
('units', models.CharField(choices=[('%', '%'), ('10^0/L', '10^0/L'), ('10^3/uL', '10^3/uL'), ('10^6/uL', '10^6/uL'), ('cells/ul', 'cells/ul'), ('copies/ml', 'copies/ml'), ('fL', 'fL'), ('g/dL', 'g/dL'), ('g/L', 'g/L'), ('mg/dL', 'mg/dL'), ('mg/L', 'mg/L'), ('mm^3', 'mm^3'), ('mm/H', 'mm/H'), ('mmol/L', 'mmol/L'), ('ng/ml', 'ng/ml'), ('pg', 'pg'), ('ratio', 'ratio'), ('U/L', 'U/L'), ('umol/L', 'umol/L')], max_length=25, verbose_name='Units')),
('display_decimal_places', models.IntegerField(blank=True, null=True, verbose_name='Decimal places to display')),
('is_absolute', models.CharField(choices=[('absolute', 'Absolute'), ('calculated', 'Calculated')], default='absolute', max_length=15, verbose_name='Is the value absolute or calculated?')),
('formula', models.CharField(blank=True, max_length=50, null=True, verbose_name='If calculated, formula?')),
('edc_code', models.CharField(db_index=True, max_length=25, null=True)),
('edc_name', models.CharField(db_index=True, max_length=50, null=True)),
],
options={
'ordering': ['edc_name'],
},
),
migrations.CreateModel(
name='TestCodeGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')),
('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')),
('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)),
('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)),
('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')),
('code', models.CharField(max_length=15, null=True)),
('name', models.CharField(blank=True, max_length=25, null=True)),
],
options={
'ordering': ['code'],
},
),
migrations.AddField(
model_name='testcode',
name='test_code_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.TestCodeGroup'),
),
migrations.AddField(
model_name='resultitem',
name='test_code',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='lab_clinic_api.TestCode'),
),
migrations.AddField(
model_name='result',
name='review',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Review'),
),
migrations.AddField(
model_name='panel',
name='test_code',
field=models.ManyToManyField(blank=True, null=True, to='lab_clinic_api.TestCode'),
),
migrations.AddField(
model_name='order',
name='panel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Panel'),
),
migrations.AddField(
model_name='aliquot',
name='aliquot_condition',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.AliquotCondition', verbose_name='Aliquot Condition'),
),
migrations.AddField(
model_name='aliquot',
name='aliquot_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.AliquotType', verbose_name='Aliquot Type'),
),
migrations.AddField(
model_name='aliquot',
name='primary_aliquot',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='primary', to='lab_clinic_api.Aliquot'),
),
migrations.AddField(
model_name='aliquot',
name='receive',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Receive'),
),
migrations.AddField(
model_name='aliquot',
name='source_aliquot',
field=models.ForeignKey(editable=False, help_text='Aliquot from which this aliquot was created, Leave blank if this is the primary tube', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='source', to='lab_clinic_api.Aliquot'),
),
migrations.AlterUniqueTogether(
name='aliquot',
unique_together=set([('receive', 'count')]),
),
]
| gpl-2.0 | 2,280,493,389,148,608,000 | 91.985251 | 463 | 0.645327 | false |
mmasaki/trove | trove/guestagent/module/drivers/module_driver.py | 1 | 2114 | # Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import six
from trove.common import cfg
CONF = cfg.CONF
@six.add_metaclass(abc.ABCMeta)
class ModuleDriver(object):
"""Base class that defines the contract for module drivers.
Note that you don't have to derive from this class to have a valid
driver; it is purely a convenience.
"""
def get_type(self):
"""This is used when setting up a module in Trove, and is here for
code clarity. It just returns the name of the driver.
"""
return self.get_name()
def get_name(self):
"""Attempt to generate a usable name based on the class name. If
overridden, must be in lower-case.
"""
return self.__class__.__name__.lower().replace(
'driver', '').replace(' ', '_')
@abc.abstractmethod
def get_description(self):
"""Description for the driver."""
pass
@abc.abstractmethod
def get_updated(self):
"""Date the driver was last updated."""
pass
@abc.abstractmethod
def apply(self, name, datastore, ds_version, data_file):
"""Apply the data to the guest instance. Return status and message
as a tupple.
"""
return False, "Not a concrete driver"
@abc.abstractmethod
def remove(self, name, datastore, ds_version, data_file):
"""Remove the data from the guest instance. Return status and message
as a tupple.
"""
return False, "Not a concrete driver"
| apache-2.0 | -1,046,090,816,335,590,800 | 29.637681 | 78 | 0.648534 | false |
easy-as-pie-labs/tweap | tweap/project_management/tests.py | 1 | 11427 | from django.test import TestCase
from project_management.models import Project, Invitation, Tag
from project_management.tools import invite_users, get_tags
from django.contrib.auth.models import User
import json
from django.http.response import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed
class ModelTest(TestCase):
project_name = "Testproject"
project_description = "Testdescription"
def test_project_model_members_and_leave(self):
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
user2 = User.objects.create_user('testuser2', '[email protected]', 'testpw')
project = Project(name=self.project_name, description=self.project_description)
project.save()
self.assertEqual(str(project), self.project_name)
project.members.add(user)
project.members.add(user2)
# test if users are in project now
self.assertTrue(user in project.members.all())
self.assertTrue(user2 in project.members.all())
project.leave(user2)
project_exists = Project.objects.filter(id=project.id).exists()
# test if user2 is removed from project and project still exists
self.assertTrue(project_exists)
self.assertTrue(user in project.members.all())
self.assertFalse(user2 in project.members.all())
project.leave(user)
project_exists = Project.objects.filter(id=project.id).exists()
# test if leave of last user deletes the project
self.assertFalse(project_exists)
# cleanup
user.delete()
user2.delete()
def test_invitation_model_get_for_users(self):
project = Project(name=self.project_name, description=self.project_description)
project.save()
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
invitation = Invitation(user=user, project=project)
invitation.save()
self.assertEqual(str(invitation), user.username + ' invited to ' + self.project_name)
# test if invitation is returned for the user via the method get_for_user()
self.assertTrue(invitation in Invitation.get_for_user(user))
invitation.delete()
# cleanup
user.delete()
def test_invitation_model_accept(self):
project = Project(name=self.project_name, description=self.project_description)
project.save()
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
invitation = Invitation(user=user, project=project)
invitation.save()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if invitation exists
self.assertTrue(invitation_exists)
invitation.accept()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if user is now member of the project and invitation was deleted
self.assertTrue(user in project.members.all())
self.assertFalse(invitation_exists)
# cleanup
user.delete()
def test_invitation_model_reject(self):
project = Project(name=self.project_name, description=self.project_description)
project.save()
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
invitation = Invitation(user=user, project=project)
invitation.save()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if invitation exists
self.assertTrue(invitation_exists)
invitation.reject()
invitation_exists = Invitation.objects.filter(id=invitation.id).exists()
# test if user is not member of the project and invitation was deleted
self.assertFalse(user in project.members.all())
self.assertFalse(invitation_exists)
# cleanup
user.delete()
def test_has_user(self):
user = User.objects.create_user('testuser', '[email protected]', 'testpw')
user2 = User.objects.create_user('testuser2', '[email protected]', 'testpw')
user3 = User.objects.create_user('testuser3', '[email protected]', 'testpw')
project = Project(name=self.project_name, description=self.project_description)
project.save()
self.assertEqual(str(project), self.project_name)
project.members.add(user)
project.members.add(user2)
# test if users are in project now
self.assertTrue(project.has_user(user))
self.assertTrue(project.has_user(user2))
self.assertFalse(project.has_user(user3))
project.leave(user2)
project_exists = Project.objects.filter(id=project.id).exists()
# test if user2 is removed from project and project still exists
self.assertTrue(project.has_user(user))
self.assertFalse(project.has_user(user2))
self.assertFalse(project.has_user(user3))
project.leave(user)
project_exists = Project.objects.filter(id=project.id).exists()
# test if leave of last user deletes the project
self.assertFalse(project_exists)
# cleanup
user.delete()
user2.delete()
user3.delete()
class ToolsTest(TestCase):
def test_invite_users(self):
project = Project(name="Testprojekt")
project.save()
user1 = User.objects.create_user('user1', '[email protected]', 'testpw')
user2 = User.objects.create_user('user2', '[email protected]', 'testpw')
user3 = User.objects.create_user('user3', '[email protected]', 'testpw')
# test with username and email
user_string = ['user1', '[email protected]', 'test']
user_string = json.dumps(user_string)
invite_users(user_string, project)
# test if the both users are invited
self.assertTrue(Invitation.objects.filter(user=user1, project=project).exists())
self.assertTrue(Invitation.objects.filter(user=user2, project=project).exists())
self.assertFalse(Invitation.objects.filter(user=user3, project=project).exists())
#cleanup
user1.delete()
user2.delete()
user3.delete()
def test_get_tags(self):
project = Project(name="Testprojekt")
project.save()
tag = Tag(name="testtag1", project=project)
tag.save()
#test if only testtag1 exists
self.assertTrue(Tag.objects.filter(project=project, name="testtag1").exists())
self.assertFalse(Tag.objects.filter(project=project, name="testtag2").exists())
self.assertFalse(Tag.objects.filter(project=project, name="testtag3").exists())
tag_string = ['testttag1', 'testtag2', 'testtag3']
tag_string = json.dumps(tag_string)
tags = get_tags(tag_string, project)
#test if return list contains 3 Tags
self.assertEquals(len(tags), 3)
self.assertIsInstance(tags[0], Tag)
#test that all 3 testtags exists now
self.assertTrue(Tag.objects.filter(project=project, name="testtag1").exists())
self.assertTrue(Tag.objects.filter(project=project, name="testtag2").exists())
self.assertTrue(Tag.objects.filter(project=project, name="testtag3").exists())
class ViewsTest(TestCase):
def setup_login(self):
User.objects.create_user('user', '[email protected]', 'testpw')
self.client.post('/users/login/', {'username': 'user', 'password': 'testpw'})
def test_project_create_edit(self):
self.setup_login()
# test if page is available
resp = self.client.get('/projects/new/')
self.assertEqual(resp.status_code, 200)
self.assertFalse('error_messages' in resp.context)
# test if validation works
resp = self.client.post('/projects/new/', {})
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.context['error_messages'])
# test if project with name only can be created
resp = self.client.post('/projects/new/', {'name': 'TestCreateProject', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
project_exist = Project.objects.filter(name='TestCreateProject').exists()
self.assertTrue(project_exist)
# test if project with name and description can be created
resp = self.client.post('/projects/new/', {'name': 'TestCreateProject2', 'description': 'I am a test project', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
project_exist = Project.objects.filter(name='TestCreateProject2').exists()
self.assertTrue(project_exist)
project = Project.objects.get(name='TestCreateProject2')
self.assertEqual(project.description, 'I am a test project')
# test if a non existing project retuns 404
resp = self.client.get('/projects/edit/9999/')
self.assertEqual(resp.status_code, 404)
# test if an existing project can be edited
resp = self.client.get('/projects/edit/' + str(project.id) + '/')
self.assertEqual(resp.status_code, 200)
# test if changes are saved
resp = self.client.post('/projects/edit/' + str(project.id) + '/', {'name': 'new name', 'description': 'new description', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
project = Project.objects.get(id=project.id)
self.assertEqual(project.name, 'new name')
self.assertEqual(project.description, 'new description')
def test_project_view(self):
self.setup_login()
# test if project with name only can be created
resp = self.client.post('/projects/new/', {'name': 'TestCreateProject', 'icon': 'fa fa-folder-open-o'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
project_exists = Project.objects.filter(name='TestCreateProject').exists()
self.assertTrue(project_exists)
project = Project.objects.get(name='TestCreateProject')
print('test: acces own project')
resp = self.client.get('/projects/' + str(project.id))
self.assertEqual(resp.status_code, 200)
self.assertTrue(type(resp) is HttpResponse)
resp = self.client.post('/projects/' + str(project.id))
self.assertTrue(type(resp) is HttpResponseNotAllowed)
print('test non-existent project')
resp = self.client.get('/projects/1337')
self.assertEqual(resp.status_code, 404)
self.client.get('/users/logout/')
print('test: access \'own\' project when not logged in')
resp = self.client.get('/projects/' + str(project.id))
self.assertEqual(resp.status_code, 302)
self.assertTrue(type(resp) is HttpResponseRedirect)
User.objects.create_user('anotheruser', '[email protected]', 'testpw')
self.client.post('/users/login/', {'username': 'anotheruser', 'password': 'testpw'})
print('test: someone else\'s project')
resp = self.client.get('/projects/' + str(project.id))
self.assertEqual(resp.status_code, 404)
def test_view_all(self):
# TODO: renew tests
pass
def test_view_invites(self):
# TODO: renew tests
pass
def test_leave(self):
pass
def test_invitation_handler(self):
pass
| gpl-3.0 | 4,192,250,747,644,782,600 | 40.552727 | 161 | 0.655203 | false |
Wyn10/Cnchi | cnchi/ui/gtk/pages/features.py | 1 | 13668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# features.py
#
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Features screen """
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import subprocess
import logging
import desktop_info
import features_info
import misc.extra as misc
from ui.base_widgets import Page
COL_IMAGE = 0
COL_TITLE = 1
COL_DESCRIPTION = 2
COL_SWITCH = 3
class Features(Page):
""" Features screen class """
def __init__(self, params, prev_page="desktop", next_page="disk_grp", **kwargs):
""" Initializes features ui """
super().__init__(self, params, name="features", prev_page=prev_page,
next_page=next_page, **kwargs)
self.listbox_rows = {}
self.title = _("Features")
self.in_group = True
# Set up list box
self.listbox = self.ui.get_object("listbox")
self.listbox.set_selection_mode(Gtk.SelectionMode.NONE)
self.listbox.set_sort_func(self.listbox_sort_by_name, None)
# self.listbox.set_selection_mode(Gtk.SelectionMode.BROWSE)
# self.listbox.connect("row-selected", self.on_listbox_row_selected)
# This is initialized each time this screen is shown in prepare()
self.features = None
# Only show ufw rules and aur disclaimer info once
self.info_already_shown = {"ufw": False, "aur": False}
# Only load defaults the first time this screen is shown
self.load_defaults = True
@staticmethod
def nvidia_detected():
from hardware.nvidia import Nvidia
if Nvidia().detect():
return True
from hardware.nvidia_340xx import Nvidia340xx
if Nvidia340xx().detect():
return True
from hardware.nvidia_304xx import Nvidia304xx
if Nvidia304xx().detect():
return True
return False
@staticmethod
def amd_detected():
from hardware.catalyst import Catalyst
return Catalyst().detect()
@staticmethod
def on_listbox_row_selected(listbox, listbox_row):
""" Someone selected a different row of the listbox
WARNING: IF LIST LAYOUT IS CHANGED THEN THIS SHOULD BE CHANGED ACCORDINGLY. """
if listbox_row is not None:
for vbox in listbox_row:
switch = vbox.get_children()[2]
if switch:
switch.set_active(not switch.get_active())
def fill_listbox(self):
for listbox_row in self.listbox.get_children():
listbox_row.destroy()
self.listbox_rows = {}
# Only add graphic-driver feature if an AMD or Nvidia is detected
# FIXME: Conflict between lib32-nvidia-libgl and lib32-mesa-libgl
if "graphic_drivers" in self.features:
if not self.amd_detected() and not self.nvidia_detected():
logging.debug("Neither NVidia nor AMD have been detected.")
self.features.remove("graphic_drivers")
#if "graphic_drivers" in self.features:
# self.features.remove("graphic_drivers")
for feature in self.features:
box = Gtk.Box(spacing=20)
box.set_name(feature + "-row")
self.listbox_rows[feature] = []
if feature in features_info.ICON_NAMES:
icon_name = features_info.ICON_NAMES[feature]
else:
logging.debug("No icon found for feature %s", feature)
icon_name = "missing"
object_name = "image_" + feature
image = Gtk.Image.new_from_icon_name(
icon_name,
Gtk.IconSize.DND)
image.set_name(object_name)
image.set_property('margin_start', 10)
self.listbox_rows[feature].append(image)
box.pack_start(image, False, False, 0)
text_box = Gtk.VBox()
object_name = "label_title_" + feature
label_title = Gtk.Label.new()
label_title.set_halign(Gtk.Align.START)
label_title.set_justify(Gtk.Justification.LEFT)
label_title.set_name(object_name)
self.listbox_rows[feature].append(label_title)
text_box.pack_start(label_title, False, False, 0)
object_name = "label_" + feature
label = Gtk.Label.new()
label.set_name(object_name)
self.listbox_rows[feature].append(label)
text_box.pack_start(label, False, False, 0)
box.pack_start(text_box, False, False, 0)
object_name = "switch_" + feature
switch = Gtk.Switch.new()
switch.set_name(object_name)
switch.set_property('margin_top', 10)
switch.set_property('margin_bottom', 10)
switch.set_property('margin_end', 10)
switch.get_style_context().add_class('switch')
switch.set_property('width_request', 200)
self.listbox_rows[feature].append(switch)
box.pack_end(switch, False, False, 0)
# Add row to our gtklist
self.listbox.add(box)
self.listbox.get_style_context().add_class('list_box')
self.listbox.show_all()
@staticmethod
def listbox_sort_by_name(row1, row2, user_data):
""" Sort function for listbox
Returns : < 0 if row1 should be before row2, 0 if they are equal and > 0 otherwise
WARNING: IF LAYOUT IS CHANGED IN fill_listbox THEN THIS SHOULD BE
CHANGED ACCORDINGLY. """
box1 = row1.get_child()
txt_box1 = box1.get_children()[1]
label1 = txt_box1.get_children()[0]
box2 = row2.get_child()
txt_box2 = box2.get_children()[1]
label2 = txt_box2.get_children()[0]
text = [label1.get_text(), label2.get_text()]
# sorted_text = misc.sort_list(text, self.settings.get("locale"))
sorted_text = misc.sort_list(text)
# If strings are already well sorted return < 0
if text[0] == sorted_text[0]:
return -1
# Strings must be swaped, return > 0
return 1
def set_row_text(self, feature, title, desc, tooltip):
""" Set translated text to our listbox feature row """
if feature in self.listbox_rows:
title = "<span weight='bold' size='large'>{0}</span>".format(title)
desc = "<span size='small'>{0}</span>".format(desc)
row = self.listbox_rows[feature]
row[COL_TITLE].set_markup(title)
row[COL_DESCRIPTION].set_markup(desc)
for widget in row:
widget.set_tooltip_markup(tooltip)
def translate_ui(self):
""" Translates all ui elements """
self.header.set_subtitle(self.title)
for feature in self.features:
if feature == "graphic_drivers":
# Only add this feature if NVIDIA or AMD are detected
if not self.amd_detected() and not self.nvidia_detected():
continue
title = _(features_info.TITLES[feature])
desc = _(features_info.DESCRIPTIONS[feature])
tooltip = _(features_info.TOOLTIPS[feature])
self.set_row_text(feature, title, desc, tooltip)
# Sort listbox items
self.listbox.invalidate_sort()
def switch_defaults_on(self):
""" Enable some features by default """
if 'bluetooth' in self.features:
try:
process1 = subprocess.Popen(["lsusb"], stdout=subprocess.PIPE)
process2 = subprocess.Popen(
["grep", "-i", "bluetooth"],
stdin=process1.stdout,
stdout=subprocess.PIPE)
process1.stdout.close()
out, process_error = process2.communicate()
if out.decode() is not '':
row = self.listbox_rows['bluetooth']
row[COL_SWITCH].set_active(True)
except subprocess.CalledProcessError as err:
logging.warning(
"Error checking bluetooth presence. Command %s failed: %s",
err.cmd,
err.output)
if 'cups' in self.features:
row = self.listbox_rows['cups']
row[COL_SWITCH].set_active(True)
if 'visual' in self.features:
row = self.listbox_rows['visual']
row[COL_SWITCH].set_active(True)
def store_values(self):
""" Get switches values and store them """
for feature in self.features:
row = self.listbox_rows[feature]
is_active = row[COL_SWITCH].get_active()
self.settings.set("feature_" + feature, is_active)
if is_active:
logging.debug("Feature '%s' has been selected", feature)
# Show ufw info message if ufw is selected (show it only once)
if self.settings.get("feature_firewall") and not self.info_already_shown["ufw"]:
self.show_info_dialog("ufw")
self.info_already_shown["ufw"] = True
# Show AUR disclaimer if AUR is selected (show it only once)
if self.settings.get("feature_aur") and not self.info_already_shown["aur"]:
self.show_info_dialog("aur")
self.info_already_shown["aur"] = True
# LAMP: Ask user if he wants Apache or Nginx
if self.settings.get("feature_lamp"):
info = Gtk.MessageDialog(
transient_for=self.get_main_window(),
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.YES_NO)
info.set_markup("LAMP / LEMP")
msg = _("Do you want to install the Nginx server instead of the Apache server?")
info.format_secondary_markup(msg)
response = info.run()
info.destroy()
if response == Gtk.ResponseType.YES:
self.settings.set("feature_lemp", True)
else:
self.settings.set("feature_lemp", False)
self.listbox_rows = {}
return True
def show_info_dialog(self, feature):
""" Some features show an information dialog when this screen is accepted """
if feature == "aur":
# Aur disclaimer
txt1 = _("Arch User Repository - Disclaimer")
txt2 = _("The Arch User Repository is a collection of user-submitted PKGBUILDs\n"
"that supplement software available from the official repositories.\n\n"
"The AUR is community driven and NOT supported by Arch or Antergos.\n")
elif feature == "ufw":
# Ufw rules info
txt1 = _("Uncomplicated Firewall will be installed with these rules:")
toallow = misc.get_network()
txt2 = _("ufw default deny\nufw allow from {0}\nufw allow Transmission\n"
"ufw allow SSH").format(toallow)
else:
# No message
return
txt1 = "<big>{0}</big>".format(txt1)
txt2 = "<i>{0}</i>".format(txt2)
info = Gtk.MessageDialog(
transient_for=self.get_main_window(),
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.CLOSE)
info.set_markup(txt1)
info.format_secondary_markup(txt2)
info.run()
info.destroy()
def prepare(self, direction):
""" Prepare features screen to get ready to show itself """
# Each desktop has its own features
desktop = self.settings.get('desktop')
self.features = list(
set(desktop_info.ALL_FEATURES) -
set(desktop_info.EXCLUDED_FEATURES[desktop]))
self.fill_listbox()
self.translate_ui()
self.show_all()
if self.load_defaults:
self.switch_defaults_on()
# Only load defaults once
self.load_defaults = False
else:
# Load values user has chosen when this screen is shown again
self.load_values()
def load_values(self):
""" Get previous selected switches values """
for feature in self.features:
row = self.listbox_rows[feature]
is_active = self.settings.get("feature_" + feature)
if row[COL_SWITCH] is not None and is_active is not None:
row[COL_SWITCH].set_active(is_active)
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
if __name__ == '__main__':
from test_screen import _, run
run('Features')
| gpl-3.0 | -3,563,286,189,405,870,000 | 36.138587 | 94 | 0.585791 | false |
SVilgelm/CloudFerry | cloudferry/lib/os/storage/plugins/copy_mechanisms.py | 1 | 5969 | # Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import random
from cloudferry.lib.utils import files
from cloudferry.lib.utils import remote_runner
from cloudferry.lib.copy_engines import base
LOG = logging.getLogger(__name__)
class CopyFailed(RuntimeError):
pass
class CopyMechanism(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def copy(self, context, source_object, destination_object):
raise NotImplementedError()
class CopyObject(object):
def __init__(self, host=None, path=None):
self.host = host
self.path = path
def __repr__(self):
return "{host}:{path}".format(host=self.host, path=self.path)
class RemoteFileCopy(CopyMechanism):
"""Uses one of `rsync`, `bbcp` or `scp` to copy volume files across remote
nodes. Primarily used for NFS backend."""
def copy(self, context, source_object, destination_object):
data = {
'host_src': source_object.host,
'path_src': source_object.path,
'host_dst': destination_object.host,
'path_dst': destination_object.path
}
try:
copier = base.get_copier_checked(context.src_cloud,
context.dst_cloud,
data)
copier.transfer(data)
except (base.FileCopyError,
base.CopierCannotBeUsed,
base.CopierNotFound,
base.NotEnoughSpace) as e:
msg = ("Copying file from {src_host}@{src_file} to "
"{dst_host}@{dst_file}, error: {err}").format(
src_host=source_object.host,
src_file=source_object.path,
dst_host=destination_object.host,
dst_file=destination_object.path,
err=e.message)
raise CopyFailed(msg)
class CopyRegularFileToBlockDevice(CopyMechanism):
"""Redirects regular file to stdout and copies over ssh tunnel to calling
node into block device"""
@staticmethod
def _generate_session_name():
return 'copy_{}'.format(random.getrandbits(64))
def copy(self, context, source_object, destination_object):
cfg_src = context.cfg.src
cfg_dst = context.cfg.dst
src_user = cfg_src.ssh_user
dst_user = cfg_dst.ssh_user
src_host = source_object.host
dst_host = destination_object.host
rr_src = remote_runner.RemoteRunner(src_host, src_user, sudo=True,
password=cfg_src.ssh_sudo_password)
rr_dst = remote_runner.RemoteRunner(dst_host, dst_user, sudo=True,
password=cfg_dst.ssh_sudo_password)
ssh_opts = ('-o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no')
# Choose auxiliary port for SSH tunnel
aux_port_start, aux_port_end = \
context.cfg.migrate.ssh_transfer_port.split('-')
aux_port = random.randint(int(aux_port_start), int(aux_port_end))
session_name = self._generate_session_name()
try:
progress_view = ""
if files.is_installed(rr_src, "pv"):
src_file_size = files.remote_file_size(rr_src,
source_object.path)
progress_view = "pv --size {size} --progress | ".format(
size=src_file_size)
# First step: prepare netcat listening on aux_port on dst and
# forwarding all the data to block device
rr_dst.run('screen -S {session_name} -d -m /bin/bash -c '
'\'nc -l {aux_port} | dd of={dst_device} bs=64k\'; '
'sleep 1',
session_name=session_name, aux_port=aux_port,
dst_device=destination_object.path)
# Second step: create SSH tunnel between source and destination
rr_src.run('screen -S {session_name} -d -m ssh {ssh_opts} -L'
' {aux_port}:127.0.0.1:{aux_port} '
'{dst_user}@{dst_host}; sleep 1',
session_name=session_name, ssh_opts=ssh_opts,
aux_port=aux_port, dst_user=dst_user,
dst_host=dst_host)
# Third step: push data through the tunnel
rr_src.run('/bin/bash -c \'dd if={src_file} bs=64k | '
'{progress_view} nc 127.0.0.1 {aux_port}\'',
aux_port=aux_port, progress_view=progress_view,
src_file=source_object.path)
except remote_runner.RemoteExecutionError as e:
msg = "Cannot copy {src_object} to {dst_object}: {error}"
msg = msg.format(src_object=source_object,
dst_object=destination_object,
error=e.message)
raise CopyFailed(msg)
finally:
try:
rr_src.run('screen -X -S {session_name} quit || true',
session_name=session_name)
rr_dst.run('screen -X -S {session_name} quit || true',
session_name=session_name)
except remote_runner.RemoteExecutionError:
LOG.error('Failed to close copy sessions', exc_info=True)
| apache-2.0 | -7,871,854,656,423,564,000 | 37.75974 | 79 | 0.565756 | false |
saullocastro/pyNastran | pyNastran/bdf/dev_vectorized/cards/elements/rod/ptube.py | 1 | 8276 | from __future__ import print_function
from six.moves import zip
from numpy import array, zeros, unique, searchsorted, arange, pi
from pyNastran.bdf.dev_vectorized.cards.elements.property import Property
#from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import (integer,
double, double_or_blank)
class PTUBE(Property):
type = 'PTUBE'
def __init__(self, model):
"""
Defines the PTUBE object.
Parameters
----------
model : BDF
the BDF object
"""
Property.__init__(self, model)
def allocate(self, ncards):
self.n = ncards
self.model.log.debug('%s ncards=%s' % (self.type, ncards))
float_fmt = self.model.float_fmt
#: Property ID
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.OD = zeros((ncards, 2), float_fmt)
self.t = zeros(ncards, float_fmt)
self.nsm = zeros(ncards, float_fmt)
def add_card(self, card, comment=''):
self.model.log.debug('n=%s i=%s' % (self.n, self.i))
i = self.i
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
OD1 = double(card, 3, 'OD1')
t = double_or_blank(card, 4, 't')
if t is None:
t = OD1 / 2.
self.t[i] = t
self.nsm[i] = double_or_blank(card, 5, 'nsm', 0.0)
OD2 = double_or_blank(card, 6, 'OD2', OD1)
self.OD[i, :] = [OD1, OD2]
assert len(card) <= 7, 'len(PTUBE card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
"""
:param cards: the list of PTUBE cards
"""
if self.n:
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.OD = self.OD[i, :]
self.t = self.t[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PTUBE IDs...')
self._cards = []
self._comments = []
else:
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'property' : pid_map,
'material' : mid_map,
}
"""
if self.n:
nid_map = maps['node']
mid_map = maps['material']
for i, (pid, mid) in enumerate(zip(self.property_id, self.material_id)):
self.property_id[i] = pid_map[pid]
self.material_id[i] = mid_map[mid]
#=========================================================================
def get_mass_per_length_by_property_id(self, property_id=None):
# L * (A * rho + nsm)
i = self.get_property_index_by_property_id(property_id)
A = self.A[i]
mid = self.material_id[i]
#mat = self.model.materials.get_material(mid)
rho = self.model.materials.get_density_by_material_id(mid)
nsm = self.nsm[i]
return A * rho + nsm
def get_area_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
return self.get_area_by_property_index(i)
def get_area_by_property_index(self, i=None):
area = zeros(len(i), dtype='float64')
for ni, ii in enumerate(i):
A = (self._area1(ii) + self._area2(ii)) / 2.
area[ni] = A
return area
def _area1(self, i):
"""Gets the Area of Section 1 of the CTUBE."""
Dout = self.OD[i, 0]
if self.t[i] == 0:
return pi / 4. * Dout**2
Din = Dout - 2 * self.t
A1 = pi / 4. * (Dout * Dout - Din * Din)
return A1
def _area2(self, i):
"""Gets the Area of Section 2 of the CTUBE."""
Dout = self.OD[i, 1]
if self.t[i] == 0:
return pi / 4. * Dout**2
Din = Dout - 2 * self.t
A2 = pi / 4. * (Dout * Dout - Din * Din)
return A2
def get_non_structural_mass_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
nsm = self.nsm[i]
return nsm
#def get_E_by_property_id(self, property_id=None):
#i = self.get_property_index_by_property_id(property_id)
#material_id = self.material_id[i]
#E = self.model.materials.get_E_by_material_id(material_id)
#return E
def get_E_by_property_id(self, property_id=None):
mid = self.get_material_id_by_property_id(property_id)
E = self.model.materials.get_E_by_material_id(mid)
return E
#def get_G_by_property_id(self, property_id=None):
#i = self.get_property_index_by_property_id(property_id)
#material_id = self.material_id[i]
#G = self.model.materials.get_G_by_material_id(material_id)
#return G
def get_G_by_property_id(self, property_id=None):
mid = self.get_material_id_by_property_id(property_id)
G = self.model.materials.get_G_by_material_id(mid)
return G
def get_J_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
return self.get_J_by_property_index(i)
def get_J_by_property_index(self, i=None):
J = []
for ni, ii in enumerate(i):
Ji = self._Ji(ii)
J.append(Ji)
return array(J, dtype='float64')
def _Ji(self, i):
Dout = self.OD[i, 0]
if self.t[0] == 0.0:
return pi / 8. * Dout**4
Din = Dout - 2 * self.t[i]
return pi / 8. * (Dout**4 - Din**2)
def get_c_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
c = self.c[i]
return c
def get_material_id_by_property_id(self, property_id=None):
i = self.get_property_index_by_property_id(property_id)
mid = self.material_id[i]
return mid
#=========================================================================
def get_density_by_property_id(self, property_id=None):
mid = self.get_material_id_by_property_id(property_id)
density = self.model.materials.get_density_by_material_id(mid)
return density
#def get_J_by_property_id(self, property_id=None):
#mid = self.get_material_id_by_property_id(property_id)
#J = self.model.materials.get_J_by_material_id(mid)
#return J
#def get_E_by_property_id(self, property_id=None):
#mid = self.get_material_id_by_property_id(property_id)
#E = self.model.materials.get_E_by_material_id(mid)
#return E
#=========================================================================
def write_card(self, bdf_file, size=8, property_id=None):
if self.n:
if self.n:
if property_id is None:
i = arange(self.n)
else:
assert len(unique(property_id)) == len(property_id), unique(property_id)
i = searchsorted(self.property_id, property_id)
for (pid, mid, (OD1, OD2), t, nsm) in zip(
self.property_id, self.material_id[i], self.OD[i, :], self.t[i], self.nsm[i]):
#t = set_blank_if_default(t, OD1 / 2.)
#nsm = set_blank_if_default(nsm, 0.0)
#OD2 = set_blank_if_default(OD2, OD1)
card = ['PTUBE', pid, mid, OD1, t, nsm, OD2]
bdf_file.write(print_card_8(card))
def slice_by_index(self, i):
i = self._validate_slice(i)
obj = PTUBE(self.model)
n = len(i)
obj.n = n
obj.i = n
#obj._cards = self._cards[i]
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.property_id = self.property_id[i]
obj.material_id = self.material_id[i]
obj.OD = self.OD[i, :]
obj.t = self.t[i]
obj.nsm = self.nsm[i]
return obj
| lgpl-3.0 | -8,218,476,148,353,684,000 | 33.773109 | 95 | 0.531658 | false |
calebbrown/calebcc | feedgenerator/feeds.py | 1 | 8744 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title=u"Poynter E-Media Tidbits",
... link=u"http://www.poynter.org/column.asp?id=31",
... description=u"A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language=u"en",
... )
>>> feed.add_item(
... title="Hello",
... link=u"http://www.holovaty.com/test/",
... description="Testing."
... )
>>> fp = open('test.rss', 'w')
>>> feed.write(fp, 'utf-8')
>>> fp.close()
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from xmlutils import SimplerXMLGenerator
from utils import rfc2822_date, rfc3339_date, get_tag_uri
from base import SyndicationFeed, Enclosure
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u"rss", self.rss_attributes())
handler.startElement(u"channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement(u"rss")
def rss_attributes(self):
return {u"version": self._version,
u"xmlns:atom": u"http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement(u'item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"item")
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", self.feed['link'])
handler.addQuickElement(u"description", self.feed['description'])
handler.addQuickElement(u"atom:link", None, {u"rel": u"self", u"href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement(u"language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"copyright", self.feed['feed_copyright'])
handler.addQuickElement(u"lastBuildDate", rfc2822_date(self.latest_post_date()).decode('utf-8'))
if self.feed['ttl'] is not None:
handler.addQuickElement(u"ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement(u"channel")
class RssUserland091Feed(RssFeed):
_version = u"0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = u"2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(u"author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement(u"author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(u"dc:creator", item["author_name"], {u"xmlns:dc": u"http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('utf-8'))
if item['comments'] is not None:
handler.addQuickElement(u"comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement(u"guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement(u"ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"enclosure", '',
{u"url": item['enclosure'].url, u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf8'
ns = u"http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u'feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement(u"feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {u"xmlns": self.ns, u"xml:lang": self.feed['language']}
else:
return {u"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']})
handler.addQuickElement(u"id", self.feed['id'])
handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('utf-8'))
if self.feed['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement(u"email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement(u"uri", self.feed['author_link'])
handler.endElement(u"author")
if self.feed['subtitle'] is not None:
handler.addQuickElement(u"subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", "", {u"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement(u"entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"entry")
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"})
if item['pubdate'] is not None:
handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('utf-8'))
# Author information.
if item['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement(u"email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement(u"uri", item['author_link'])
handler.endElement(u"author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement(u"id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement(u"summary", item['description'], {u"type": u"html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"link", '',
{u"rel": u"enclosure",
u"href": item['enclosure'].url,
u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", u"", {u"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement(u"rights", item['item_copyright'])
| bsd-3-clause | -3,889,700,314,081,301,500 | 41.653659 | 123 | 0.613564 | false |
mathslinux/ceilometer | ceilometer/opts.py | 1 | 5518 | # Copyright 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import ceilometer.agent.manager
import ceilometer.alarm.notifier.rest
import ceilometer.alarm.rpc
import ceilometer.alarm.service
import ceilometer.api
import ceilometer.api.app
import ceilometer.api.controllers.v2.alarms
import ceilometer.cmd.eventlet.polling
import ceilometer.collector
import ceilometer.compute.discovery
import ceilometer.compute.notifications
import ceilometer.compute.util
import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.inspector
import ceilometer.compute.virt.vmware.inspector
import ceilometer.compute.virt.xenapi.inspector
import ceilometer.coordination
import ceilometer.dispatcher
import ceilometer.dispatcher.file
import ceilometer.energy.kwapi
import ceilometer.event.converter
import ceilometer.hardware.discovery
import ceilometer.image.glance
import ceilometer.ipmi.notifications.ironic
import ceilometer.ipmi.platform.intel_node_manager
import ceilometer.ipmi.pollsters
import ceilometer.middleware
import ceilometer.network.notifications
import ceilometer.neutron_client
import ceilometer.notification
import ceilometer.nova_client
import ceilometer.objectstore.rgw
import ceilometer.objectstore.swift
import ceilometer.pipeline
import ceilometer.profiler.notifications
import ceilometer.publisher.messaging
import ceilometer.publisher.utils
import ceilometer.sample
import ceilometer.service
import ceilometer.storage
import ceilometer.utils
def list_opts():
return [
('DEFAULT',
itertools.chain(ceilometer.agent.base.OPTS,
ceilometer.api.app.OPTS,
ceilometer.cmd.eventlet.polling.CLI_OPTS,
ceilometer.compute.notifications.OPTS,
ceilometer.compute.util.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.inspector.OPTS,
ceilometer.dispatcher.OPTS,
ceilometer.image.glance.OPTS,
ceilometer.ipmi.notifications.ironic.OPTS,
ceilometer.middleware.OPTS,
ceilometer.network.notifications.OPTS,
ceilometer.nova_client.OPTS,
ceilometer.objectstore.swift.OPTS,
ceilometer.pipeline.OPTS,
ceilometer.profiler.notifications.OPTS,
ceilometer.sample.OPTS,
ceilometer.service.OPTS,
ceilometer.storage.OLD_OPTS,
ceilometer.utils.OPTS,)),
('alarm',
itertools.chain(ceilometer.alarm.notifier.rest.OPTS,
ceilometer.alarm.service.OPTS,
ceilometer.alarm.rpc.OPTS,
ceilometer.alarm.evaluator.gnocchi.OPTS,
ceilometer.api.controllers.v2.alarms.ALARM_API_OPTS)),
('api',
itertools.chain(ceilometer.api.OPTS,
ceilometer.api.app.API_OPTS,
[ceilometer.service.API_OPT])),
# deprecated path, new one is 'polling'
('central', ceilometer.agent.manager.OPTS),
('collector',
itertools.chain(ceilometer.collector.OPTS,
[ceilometer.service.COLL_OPT])),
('compute', ceilometer.compute.discovery.OPTS),
('coordination', ceilometer.coordination.OPTS),
('database', ceilometer.storage.OPTS),
('dispatcher_file', ceilometer.dispatcher.file.OPTS),
('event', ceilometer.event.converter.OPTS),
('hardware', ceilometer.hardware.discovery.OPTS),
('ipmi',
itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS,
ceilometer.ipmi.pollsters.OPTS)),
('notification',
itertools.chain(ceilometer.notification.OPTS,
[ceilometer.service.NOTI_OPT])),
('polling', ceilometer.agent.manager.OPTS),
('publisher', ceilometer.publisher.utils.OPTS),
('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS),
('publisher_rpc', ceilometer.publisher.messaging.RPC_OPTS),
('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS),
('service_credentials', ceilometer.service.CLI_OPTS),
('service_types',
itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS,
ceilometer.image.glance.SERVICE_OPTS,
ceilometer.neutron_client.SERVICE_OPTS,
ceilometer.nova_client.SERVICE_OPTS,
ceilometer.objectstore.rgw.SERVICE_OPTS,
ceilometer.objectstore.swift.SERVICE_OPTS,)),
('vmware', ceilometer.compute.virt.vmware.inspector.OPTS),
('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS),
]
| apache-2.0 | -4,121,054,771,720,722,400 | 43.5 | 79 | 0.660747 | false |
google/j2cl | dev/test_all.py | 1 | 1081 | # Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs various tests in the repository."""
import argparse
import subprocess
def main(argv):
root = "//third_party/java_src/j2cl/"
tests = [root + t + "/..." for t in argv.test_pattern] or [root + "..."]
subprocess.call(["blaze", "test"] + tests)
def add_arguments(parser):
parser.add_argument(
"test_pattern",
metavar="<root>",
nargs="*",
help="test root(s). e.g. transpiler jre")
def run_for_presubmit():
argv = argparse.Namespace(test_pattern=[])
main(argv)
| apache-2.0 | 8,376,689,724,084,247,000 | 29.027778 | 74 | 0.695652 | false |
riklaunim/django-custom-multisite | django/contrib/auth/models.py | 1 | 17160 | import urllib
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
# UNUSABLE_PASSWORD is still imported here for backwards compatibility
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable, UNUSABLE_PASSWORD)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from audioapp.apps import multisite
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
def natural_key(self):
return (self.name,)
class UserManager(multisite.CurrentSiteManager, models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def create_user(self, username, email=None, password=None):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = UserManager.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(username=username)
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
if obj is not None:
permissions.update(backend.get_all_permissions(user, obj))
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if anon or active or backend.supports_inactive_user:
if hasattr(backend, "has_perm"):
if obj is not None:
if backend.has_perm(user, perm, obj):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if anon or active or backend.supports_inactive_user:
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(multisite.MultiSitesMixin, multisite.SiteFieldMixin,
models.Model):
"""
Users within the Django authentication system are represented by this
model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
last_login = models.DateTimeField(_('last login'), default=timezone.now)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'))
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text='Specific permissions for this user.')
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
unique_together = ('username', 'site')
def __unicode__(self):
return self.username
def natural_key(self):
return (self.username,)
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save()
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
permissions.update(backend.get_group_permissions(self,
obj))
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable(
'You need to set AUTH_PROFILE_MODULE in your project '
'settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in '
'the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check '
'AUTH_PROFILE_MODULE in your project settings')
self._profile_cache = model._default_manager.using(
self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| bsd-3-clause | 6,168,093,177,801,387,000 | 34.824635 | 79 | 0.617599 | false |
Azure/azure-sdk-for-python | sdk/digitaltwins/azure-digitaltwins-core/tests/_preparer.py | 1 | 5928 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from collections import namedtuple
import functools
import os
import uuid
from devtools_testutils import AzureMgmtPreparer
from azure_devtools.scenario_tests.exceptions import AzureTestError
from devtools_testutils import ResourceGroupPreparer
FakeResource = namedtuple(
'FakeResource',
['name', 'id', 'host_name']
)
class DigitalTwinsRGPreparer(ResourceGroupPreparer):
def create_resource(self, name, **kwargs):
if self.is_live and 'AZURE_DIGITAL_TWINS_HOSTNAME' in os.environ:
self.resource = self.resource or FakeResource(
name=name,
id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/"+name,
host_name=None
)
return {
self.parameter_name: self.resource,
self.parameter_name_for_location: self.location,
}
return super(DigitalTwinsRGPreparer, self).create_resource(name, **kwargs)
def remove_resource(self, name, **kwargs):
if 'AZURE_DIGITAL_TWINS_HOSTNAME' not in os.environ:
return super(DigitalTwinsRGPreparer, self).remove_resource(name, **kwargs)
class DigitalTwinsPreparer(AzureMgmtPreparer):
def __init__(self, name_prefix='',
use_cache=False,
random_name_length=50,
location='westcentralus',
parameter_name='digitaltwin',
role_assignment_name='Azure Digital Twins Data Owner',
resource_group_parameter_name='resource_group',
disable_recording=True,
playback_fake_resource=None,
client_kwargs=None,
random_name_enabled=True):
super(DigitalTwinsPreparer, self).__init__(
name_prefix,
random_name_length,
playback_fake_resource=playback_fake_resource,
disable_recording=disable_recording,
client_kwargs=client_kwargs,
random_name_enabled=random_name_enabled
)
self.location = location
self.resource_group_parameter_name = resource_group_parameter_name
self.parameter_name = parameter_name
self.resource_moniker = self.name_prefix
self.use_cache = use_cache
self.role_name = role_assignment_name
if random_name_enabled:
self.resource_moniker += "digitaltwinsname"
self.set_cache(use_cache, None, location)
def create_resource(self, name, **kwargs):
if self.is_live:
if os.environ.get('AZURE_DIGITAL_TWINS_HOSTNAME'):
host_name=os.environ['AZURE_DIGITAL_TWINS_HOSTNAME']
name = host_name.split('.')[0]
self.resource = FakeResource(name=name, id=name, host_name=host_name)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
# We have to import here due to a bug in the mgmt SDK
from azure.mgmt.digitaltwins import AzureDigitalTwinsManagementClient
self.client = self.create_mgmt_client(AzureDigitalTwinsManagementClient)
group = self._get_resource_group(**kwargs)
result = self.client.digital_twins.create_or_update(group.name, name, self.location)
self.resource = result.result()
self.id = self.resource.id
self._add_role_assignment(group)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(
name=name,
id=name,
host_name= self.resource_moniker + ".api.wcus.digitaltwins.azure.net")
return {self.parameter_name: self.resource}
def remove_resource(self, name, **kwargs):
if self.is_live and 'AZURE_DIGITAL_TWINS_HOSTNAME' not in os.environ:
group = self._get_resource_group(**kwargs)
self.client.digital_twins.delete(group.name, name, polling=False)
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a Digital Twin, a resource group is required. Please add ' \
'decorator @{} in front of this preparer.'
raise AzureTestError(template.format(ResourceGroupPreparer.__name__))
def _add_role_assignment(self, resource_group):
from azure.mgmt.authorization import AuthorizationManagementClient
role_client = self.create_mgmt_client(AuthorizationManagementClient)
sp_id = os.environ.get('AZURE_CLIENT_ID')
if not sp_id:
raise ValueError("Cannot assign role to DigitalTwins with AZURE_CLIENT_ID.")
roles = list(role_client.role_definitions.list(
resource_group.id,
filter="roleName eq '{}'".format(self.role_name)
))
assert len(roles) == 1
dt_role = roles[0]
role_client.role_assignments.create(
self.id,
uuid.uuid4(), # Role assignment random name
{
'role_definition_id': dt_role.id,
'principal_id': sp_id
}
)
CachedDigitalTwinsRGPreparer = functools.partial(DigitalTwinsRGPreparer, use_cache=True)
CachedDigitalTwinsPreparer = functools.partial(DigitalTwinsPreparer, use_cache=True)
| mit | -5,432,270,477,441,370,000 | 40.746479 | 100 | 0.593455 | false |
caktus/django-opendebates | opendebates/tests/test_flatpage_metadata_override.py | 1 | 2796 | from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase
from django.utils.html import escape
from opendebates.models import FlatPageMetadataOverride
from opendebates import site_defaults
from .factories import SiteFactory, DebateFactory
class FlatPageTest(TestCase):
def setUp(self):
self.site = SiteFactory()
self.debate = DebateFactory(site=self.site)
self.page1_content = 'About the site'
self.page1 = FlatPage(url='/{}/about/'.format(self.debate.prefix),
title='About',
content=self.page1_content)
self.page1.save()
self.page1.sites.add(self.site)
self.page2_content = '[An embedded video]'
self.page2 = FlatPage(url='/{}/watch/'.format(self.debate.prefix),
title='Watch Now!',
content=self.page2_content)
self.page2.save()
self.page2.sites.add(self.site)
FlatPageMetadataOverride(page=self.page2).save()
def tearDown(self):
Site.objects.clear_cache()
def test_metadata_not_overridden(self):
rsp = self.client.get(self.page1.url)
self.assertContains(rsp, self.page1_content)
self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE))
self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION))
self.assertContains(rsp, escape(site_defaults.FACEBOOK_IMAGE))
def test_default_metadata_overrides(self):
rsp = self.client.get(self.page2.url)
self.assertContains(rsp, self.page2_content)
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE))
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION))
self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_IMAGE))
self.assertNotContains(rsp, escape(site_defaults.TWITTER_IMAGE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_TITLE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_DESCRIPTION))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_IMAGE))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
def test_custom_metadata_overrides(self):
FlatPageMetadataOverride(
page=self.page1,
facebook_title='Foo! Foo! Foo!',
twitter_description='lorem ipsum dolor sit amet').save()
rsp = self.client.get(self.page1.url)
self.assertContains(rsp, escape('Foo! Foo! Foo!'))
self.assertContains(rsp, escape('lorem ipsum dolor sit amet'))
self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
| apache-2.0 | -2,610,047,757,008,014,300 | 44.836066 | 85 | 0.675966 | false |
patrickm/chromium.src | chrome/common/extensions/docs/server2/future.py | 1 | 1238 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
_no_value = object()
def Collect(futures):
'''Creates a Future which returns a list of results from each Future in
|futures|.
'''
return Future(callback=lambda: [f.Get() for f in futures])
class Future(object):
'''Stores a value, error, or callback to be used later.
'''
def __init__(self, value=_no_value, callback=None, exc_info=None):
self._value = value
self._callback = callback
self._exc_info = exc_info
if (self._value is _no_value and
self._callback is None and
self._exc_info is None):
raise ValueError('Must have either a value, error, or callback.')
def Get(self):
'''Gets the stored value, error, or callback contents.
'''
if self._value is not _no_value:
return self._value
if self._exc_info is not None:
self._Raise()
try:
self._value = self._callback()
return self._value
except:
self._exc_info = sys.exc_info()
self._Raise()
def _Raise(self):
exc_info = self._exc_info
raise exc_info[0], exc_info[1], exc_info[2]
| bsd-3-clause | -2,560,667,322,118,759,400 | 26.511111 | 73 | 0.638934 | false |
cliali/py2 | a.py | 1 | 1578 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages. This is built on the API wrapper, see
# echobot2.py to see the same example built on the telegram.ext bot framework.
# This program is dedicated to the public domain under the CC0 license.
import logging
import telegram
from telegram.error import NetworkError, Unauthorized
from time import sleep
update_id = None
def main():
global update_id
# Telegram Bot Authorization Token
bot = telegram.Bot('277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE')
# get the first pending update_id, this is so we can skip over it in case
# we get an "Unauthorized" exception.
try:
update_id = bot.getUpdates()[0].update_id
except IndexError:
update_id = None
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
while True:
try:
echo(bot)
except NetworkError:
sleep(1)
except Unauthorized:
# The user has removed or blocked the bot.
update_id += 1
def echo(bot):
global update_id
# Request updates after the last update_id
for update in bot.getUpdates(offset=update_id, timeout=10):
# chat_id is required to reply to any message
chat_id = update.message.chat_id
update_id = update.update_id + 1
if update.message: # your bot can receive updates without messages
# Reply to the message
update.message.reply_text(update.message.text)
if __name__ == '__main__':
main()
| apache-2.0 | 2,133,397,267,722,731,300 | 28.773585 | 86 | 0.653359 | false |
flavoi/diventi | diventi/landing/views.py | 1 | 3927 | from itertools import chain
from django.shortcuts import render, redirect, resolve_url
from django.views.generic.detail import DetailView
from django.views.generic import ListView, TemplateView
from django.views.generic.edit import CreateView
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.http import HttpResponseNotFound
from diventi.accounts.models import DiventiUser
from diventi.accounts.forms import DiventiUserInitForm
from diventi.products.models import Product
from diventi.blog.models import Article
from diventi.feedbacks.models import Survey, Answer
from diventi.core.views import StaffRequiredMixin
from .models import (
Section,
AboutArticle,
PolicyArticle,
)
class LandingSearchView(ListView):
""" Search for every content in the project. """
template_name = "landing/search_results_quick.html"
context_object_name = 'results'
model = Section
def get_queryset(self):
results = super(LandingSearchView, self).get_queryset()
query = self.request.GET.get('q')
if query:
articles = Article.search(self, query)
products = Product.search(self, query)
users = DiventiUser.search(self, query)
results = list(chain(products, articles, users))
else:
results = None
return results
def get_context_data(self, **kwargs):
context = super(LandingSearchView, self).get_context_data(**kwargs)
context['search_query'] = self.request.GET.get('q')
return context
class DashboardView(StaffRequiredMixin, ListView):
""" Report relevant piece of contents of any supported app. """
template_name = "landing/analytics_quick.html"
context_object_name = 'results'
model = Section
def get_queryset(self):
results = super(DashboardView, self).get_queryset()
articles = Article.reporting(self)
products = Product.reporting(self)
users = DiventiUser.reporting(self)
results = list(chain(users,articles, products, ))
return results
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data(**kwargs)
featured_section = Section.objects.featured()
context['featured_section'] = featured_section
return context
def get_landing_context(request):
sections = Section.objects.not_featured()
featured_section = Section.objects.featured()
if featured_section:
pass
elif sections.exists():
featured_section = sections.first()
sections = sections.exclude(id=featured_section.id)
else:
return HttpResponseNotFound(_('This page is not available yet.'))
context = {
'sections': sections,
'featured_section': featured_section,
}
return context
class LandingTemplateView(TemplateView):
""" Renders the landing page with all necessary context. """
template_name = "landing/landing_quick.html"
def get_context_data(self, **kwargs):
context = super(LandingTemplateView, self).get_context_data(**kwargs)
landing_context = get_landing_context(self.request)
context = {**context, **landing_context} # Merge the two dictionaries
return context
class AboutArticleDetailView(DetailView):
""" Renders the 'about us' article and the content related to it. """
model = AboutArticle
template_name = "landing/about_article_quick.html"
class PolicyArticleDetailView(DetailView):
""" Renders the policy article and the content related to it. """
model = PolicyArticle
template_name = "landing/about_article_quick.html"
| apache-2.0 | 2,472,953,982,382,659,600 | 30.725 | 77 | 0.675325 | false |
Uberi/botty-bot-bot-bot | src/plugins/timezones.py | 1 | 5164 | #!/usr/bin/env python3
import re
from datetime import datetime, date
import pytz
from .utilities import BasePlugin
from .utilities import clockify, untag_word
timezone_abbreviations = {
"est": pytz.timezone("Canada/Eastern"),
"edt": pytz.timezone("Canada/Eastern"),
"atlantic": pytz.timezone("Canada/Eastern"),
"eastern": pytz.timezone("Canada/Eastern"),
"toronto": pytz.timezone("Canada/Eastern"),
"waterloo": pytz.timezone("Canada/Eastern"),
"ontario": pytz.timezone("Canada/Eastern"),
"ny": pytz.timezone("US/Eastern"),
"pst": pytz.timezone("Canada/Pacific"),
"vancouver": pytz.timezone("Canada/Pacific"),
"pacific": pytz.timezone("US/Pacific-New"),
"sf": pytz.timezone("US/Pacific-New"),
"la": pytz.timezone("US/Pacific-New"),
"california": pytz.timezone("US/Pacific-New"),
}
other_timezones = (
("toronto", pytz.timezone("Canada/Eastern")),
("vancouver", pytz.timezone("Canada/Pacific")),
("utc", pytz.utc),
)
class TimezonesPlugin(BasePlugin):
"""
Timezone conversion plugin for Botty.
Example invocations:
#general | Me: 4pm local
#general | Botty: *EASTERN DAYLIGHT TIME* (Μe's local time) :clock4: 16:00 :point_right: *TORONTO* :clock4: 16:00 - *VANCOUVER* :clock1: 13:00 - *UTC* :clock8: 20:00
#general | Me: 6:23pm pst
#general | Botty: *PST* :clock630: 18:23 :point_right: *TORONTO* :clock930: 21:23 - *VANCOUVER* :clock630: 18:23 - *UTC* :clock130: 1:23 (tomorrow)
#general | Me: 6:23 here
#general | Botty: *EASTERN DAYLIGHT TIME* (Μe's local time) :clock630: 6:23 :point_right: *TORONTO* :clock630: 6:23 - *VANCOUVER* :clock330: 3:23 - *UTC* :clock1030: 10:23
#general | Me: 8pm toronto
#general | Botty: *TORONTO* :clock8: 20:00 :point_right: *TORONTO* :clock8: 20:00 - *VANCOUVER* :clock5: 17:00 - *UTC* :clock12: 0:00 (tomorrow)
"""
def __init__(self, bot):
super().__init__(bot)
def on_message(self, m):
if not m.is_user_text_message: return False
match = re.search(r"\b(\d\d?)(?::(\d\d))?(?:\s*(am|pm))?\s+(\w+)", m.text, re.IGNORECASE)
if not match: return False
# get time of day
if not match.group(2) and not match.group(3): return False # ignore plain numbers like "4 potato"
hour = int(match.group(1))
minute = 0 if match.group(2) is None else int(match.group(2))
if not (0 <= hour <= 23) or not (0 <= minute <= 59): return False
if match.group(3) is not None and match.group(3).lower() == "pm":
if not (1 <= hour <= 12): return False
hour = (hour % 12) + 12
today = date.today()
naive_timestamp = datetime(today.year, today.month, today.day, hour, minute)
timezone_name = match.group(4)
# get timezone and localized timestamp
if timezone_name.lower() in timezone_abbreviations: # use the specified timezone
timezone = timezone_abbreviations[timezone_name.lower()]
timezone_is_from_user_info = False
elif timezone_name.lower() in {"local", "here"}: # use the user's local timezone, specified in their profile
user_info = self.get_user_info_by_id(m.user_id)
try:
timezone = pytz.timezone(user_info.get("tz"))
except: # user does not have a valid timezone
return False
timezone_name = user_info.get("tz_label")
timezone_is_from_user_info = True
else:
return False
timestamp = timezone.localize(naive_timestamp)
# perform timezone conversions
timezone_conversions = []
for other_timezone_name, other_timezone in other_timezones:
converted_timestamp = timestamp.astimezone(other_timezone)
if converted_timestamp.date() > timestamp.date():
timezone_conversions.append("*{}* :{}: {}:{:>02} (tomorrow)".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute))
elif converted_timestamp.date() < timestamp.date():
timezone_conversions.append("*{}* :{}: {}:{:>02} (yesterday)".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute))
else:
timezone_conversions.append("*{}* :{}: {}:{:>02}".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute))
if timezone_is_from_user_info:
selected_time = "(timezone from {}'s profile) *{}* :{}: {}:{:>02}".format(untag_word(self.get_user_name_by_id(m.user_id)), timezone_name.upper(), clockify(timestamp), timestamp.hour, timestamp.minute)
else:
selected_time = "*{}* :{}: {}:{:>02}".format(timezone_name.upper(), clockify(timestamp), timestamp.hour, timestamp.minute)
self.respond_raw("{} :point_right: {}".format(selected_time, " - ".join(timezone_conversions)))
return True
| mit | 1,056,715,963,307,125,400 | 49.607843 | 212 | 0.610616 | false |
muttiopenbts/fusion | fusion_level02_4.py | 1 | 1932 | #!/usr/bin/python
'''
Simple script to interact with fusion level 02 challenge network daemon,
#!/usr/bin/python
[email protected]
'''
from pwn import *
import sys
#Use this hexdump lib because pwntools hexdump is too slow
from hexdump import *
def doMode(mode): # Either E or Q
print 'Sending mode call: {}'.format(mode)
#Specify encryption function
io.send(mode)
def doEncryption(message, fake_message_size=None):
doMode('E')
if fake_message_size is not None:
message_size = fake_message_size
else:
message_size = len(message)
#Specify message size as little endian 8(d) = \x08\x00\x00\x00
encryption_size_bytes = p32(message_size) #Use p32, p64, or pack
print 'Sending message size as bytes\n{}'.format(encryption_size_bytes.encode('hex'))
print 'Sending message size as bytes\n{}'.format(unpack(encryption_size_bytes))
#Specify size of message to be encrypted
io.send(encryption_size_bytes)
#Generate message and send
print 'Sending message\n{}'.format(hexdump(message))
io.send(message)
data = io.recvregex('your file --]\n')
log.info(data)
#Server sends message size as 4 bytes little endian
data = io.recvn(4)
log.info('Received encrypted message size as bytes\n{}'.format(data.encode('hex')))
log.info('Size in integer\n{}'.format(unpack(data)))
encrypted_message = io.recvn(message_size)
log.info('Received encrypted message\n{}'.format(hexdump(encrypted_message)))
return encrypted_message
if __name__ == "__main__":
host = sys.argv[1]
port = sys.argv[2]
io = remote(host,int(port))
#size = 32*4096 # No crash
# xor key is 32*4 = 128 bytes
message_size = 32*4096+100 # crash
message = cyclic(message_size) #Generate unique string to help determin payload register overwrite
xor_message = doEncryption(message)
message = doEncryption(xor_message)
doMode('Q')
| gpl-3.0 | 536,043,605,942,736,000 | 36.153846 | 102 | 0.683747 | false |
xiaonanln/myleetcode-python | src/Sudoku Solver.py | 1 | 2697 | class Solution:
# @param board, a 9x9 2D array
# Solve the Sudoku by modifying the input board in-place.
# Do not return any value.
def solveSudoku(self, board):
rowUsable = [set(xrange(1, 10)) for i in xrange(9)]
colUsable = [set(xrange(1, 10)) for i in xrange(9)]
blockUsable = [set(xrange(1, 10)) for i in xrange(9)]
__board = board
board = [ [ int(c) if c != '.' else None for c in row ] for row in board]
for row in xrange(9):
boardrow = board[row]
for col in xrange(9):
n = boardrow[col]
if n is None: continue
rowUsable[row].remove(n)
colUsable[col].remove(n)
blockindex = (row // 3) * 3 + (col // 3)
blockUsable[blockindex].remove(n)
self.rowUsable = rowUsable
self.colUsable = colUsable
self.blockUsable = blockUsable
r, c = 0, 0
self.solve(board, r, c)
for i, row in enumerate(board):
__board[i] = ''.join( str(n) for n in row)
def solve(self, board, r, c):
if c == 9:
c = 0
r += 1
if r == 9:
return True
if board[r][c] is None:
bi = (r // 3) * 3 + (c // 3)
usable = self.rowUsable[r] & self.colUsable[c] & self.blockUsable[bi]
# if r == 1: print self.rowUsable[1], usable
for n in usable:
# if r == 1: print 'using', n
board[r][c] = n
self.rowUsable[r].remove(n)
self.colUsable[c].remove(n)
self.blockUsable[bi].remove(n)
if self.solve(board, r, c+1): return True
board[r][c] = None
self.rowUsable[r].add(n)
self.colUsable[c].add(n)
self.blockUsable[bi].add(n)
return False
else:
return self.solve(board, r, c + 1)
E = '.'
# board = [
# [5, 3, E, E, 7, E, E, E, E],
# [6, E, E, 1, 9, 5, E, E, E],
# [E, 9, 8, E, E, E, E, 6, E],
# [8, E, E, E, 6, E, E, E, 3],
# [4, E, E, 8, E, 3, E, E, 1],
# [7, E, E, E, 2, E, E, E, 6],
# [E, 6, E, E, E, E, 2, 8, E],
# [E, E, E, 4, 1, 9, E, E, 5],
# [E, E, E, E, 8, E, E, 7, 9],
# ]
board = ["..9748...","7........",".2.1.9...","..7...24.",".64.1.59.",".98...3..","...8.3.2.","........6","...2759.."]
Solution().solveSudoku(board)
print '\n'.join(board) | apache-2.0 | 8,919,941,869,076,930,000 | 31.119048 | 118 | 0.411568 | false |
kennethreitz/pipenv | pipenv/patched/notpip/_internal/network/download.py | 1 | 6458 | """Download files with progress indicators.
"""
import cgi
import logging
import mimetypes
import os
from pipenv.patched.notpip._vendor import requests
from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE
from pipenv.patched.notpip._internal.models.index import PyPI
from pipenv.patched.notpip._internal.network.cache import is_from_cache
from pipenv.patched.notpip._internal.network.utils import response_chunks
from pipenv.patched.notpip._internal.utils.misc import (
format_size,
redact_auth_from_url,
splitext,
)
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
from pipenv.patched.notpip._internal.utils.ui import DownloadProgressProvider
if MYPY_CHECK_RUNNING:
from typing import Iterable, Optional
from pipenv.patched.notpip._vendor.requests.models import Response
from pipenv.patched.notpip._internal.models.link import Link
from pipenv.patched.notpip._internal.network.session import PipSession
logger = logging.getLogger(__name__)
def _get_http_response_size(resp):
# type: (Response) -> Optional[int]
try:
return int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
return None
def _prepare_download(
resp, # type: Response
link, # type: Link
progress_bar # type: str
):
# type: (...) -> Iterable[bytes]
total_length = _get_http_response_size(resp)
if link.netloc == PyPI.file_storage_domain:
url = link.show_url
else:
url = link.url_without_fragment
logged_url = redact_auth_from_url(url)
if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
else:
logger.info("Downloading %s", logged_url)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif is_from_cache(resp):
show_progress = False
elif not total_length:
show_progress = True
elif total_length > (40 * 1000):
show_progress = True
else:
show_progress = False
chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
if not show_progress:
return chunks
return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks)
def sanitize_content_filename(filename):
# type: (str) -> str
"""
Sanitize the "filename" value from a Content-Disposition header.
"""
return os.path.basename(filename)
def parse_content_disposition(content_disposition, default_filename):
# type: (str, str) -> str
"""
Parse the "filename" value from a Content-Disposition header, and
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
filename = sanitize_content_filename(filename)
return filename or default_filename
def _get_http_response_filename(resp, link):
# type: (Response, Link) -> str
"""Get an ideal filename from the given HTTP response, falling back to
the link filename if not provided.
"""
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext = splitext(filename)[1] # type: Optional[str]
if not ext:
ext = mimetypes.guess_extension(
resp.headers.get('content-type', '')
)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
return filename
def _http_get_download(session, link):
# type: (PipSession, Link) -> Response
target_url = link.url.split('#', 1)[0]
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
return resp
class Download(object):
def __init__(
self,
response, # type: Response
filename, # type: str
chunks, # type: Iterable[bytes]
):
# type: (...) -> None
self.response = response
self.filename = filename
self.chunks = chunks
class Downloader(object):
def __init__(
self,
session, # type: PipSession
progress_bar, # type: str
):
# type: (...) -> None
self._session = session
self._progress_bar = progress_bar
def __call__(self, link):
# type: (Link) -> Download
try:
resp = _http_get_download(self._session, link)
except requests.HTTPError as e:
logger.critical(
"HTTP error %s while getting %s", e.response.status_code, link
)
raise
return Download(
resp,
_get_http_response_filename(resp, link),
_prepare_download(resp, link, self._progress_bar),
)
| mit | -2,368,371,102,283,550,700 | 31.29 | 78 | 0.644162 | false |
stefanopanella/xapi-storage-plugins | libs/losetup.py | 1 | 1260 | import os.path
from xapi.storage.common import call
# Use Linux "losetup" to create block devices from files
class Loop:
"""An active loop device"""
def __init__(self, path, loop):
self.path = path
self.loop = loop
def destroy(self, dbg):
call(dbg, ["losetup", "-d", self.loop])
def block_device(self):
return self.loop
def find(dbg, path):
"""Return the active loop device associated with the given path"""
# The kernel loop driver will transparently follow symlinks, so
# we must too.
path = os.path.realpath(path)
for line in call(dbg, ["losetup", "-a"]).split("\n"):
line = line.strip()
if line != "":
bits = line.split()
loop = bits[0][0:-1]
open_bracket = line.find('(')
close_bracket = line.find(')')
this_path = line[open_bracket + 1:close_bracket]
if this_path == path:
return Loop(path, loop)
return None
def create(dbg, path):
"""Creates a new loop device backed by the given file"""
# losetup will resolve paths and 'find' needs to use string equality
path = os.path.realpath(path)
call(dbg, ["losetup", "-f", path])
return find(dbg, path)
| lgpl-2.1 | -1,819,758,216,933,129,700 | 26.391304 | 72 | 0.584127 | false |
mosarg/gestione_scuola | backend/migrations/0001_initial.py | 1 | 2074 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Backend'
db.create_table(u'backend_backend', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('backendId', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('kind', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('serverIp', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39)),
('serverFqdn', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'backend', ['Backend'])
def backwards(self, orm):
# Deleting model 'Backend'
db.delete_table(u'backend_backend')
models = {
u'backend.backend': {
'Meta': {'ordering': "('-modified', '-created')", 'object_name': 'Backend'},
'backendId': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'kind': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'serverFqdn': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'serverIp': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'})
}
}
complete_apps = ['backend'] | gpl-3.0 | -7,381,446,622,908,163,000 | 48.404762 | 125 | 0.607522 | false |
oarriaga/spatial_transformer_networks | src/models/layers.py | 1 | 5049 | from keras import backend as K
from keras.engine.topology import Layer
if K.backend() == 'tensorflow':
import tensorflow as tf
def K_meshgrid(x, y):
return tf.meshgrid(x, y)
def K_linspace(start, stop, num):
return tf.linspace(start, stop, num)
else:
raise Exception("Only 'tensorflow' is supported as backend")
class BilinearInterpolation(Layer):
"""Performs bilinear interpolation as a keras layer
References
----------
[1] Spatial Transformer Networks, Max Jaderberg, et al.
[2] https://github.com/skaae/transformer_network
[3] https://github.com/EderSantana/seya
"""
def __init__(self, output_size, **kwargs):
self.output_size = output_size
super(BilinearInterpolation, self).__init__(**kwargs)
def get_config(self):
return {
'output_size': self.output_size,
}
def compute_output_shape(self, input_shapes):
height, width = self.output_size
num_channels = input_shapes[0][-1]
return (None, height, width, num_channels)
def call(self, tensors, mask=None):
X, transformation = tensors
output = self._transform(X, transformation, self.output_size)
return output
def _interpolate(self, image, sampled_grids, output_size):
batch_size = K.shape(image)[0]
height = K.shape(image)[1]
width = K.shape(image)[2]
num_channels = K.shape(image)[3]
x = K.cast(K.flatten(sampled_grids[:, 0:1, :]), dtype='float32')
y = K.cast(K.flatten(sampled_grids[:, 1:2, :]), dtype='float32')
x = .5 * (x + 1.0) * K.cast(width, dtype='float32')
y = .5 * (y + 1.0) * K.cast(height, dtype='float32')
x0 = K.cast(x, 'int32')
x1 = x0 + 1
y0 = K.cast(y, 'int32')
y1 = y0 + 1
max_x = int(K.int_shape(image)[2] - 1)
max_y = int(K.int_shape(image)[1] - 1)
x0 = K.clip(x0, 0, max_x)
x1 = K.clip(x1, 0, max_x)
y0 = K.clip(y0, 0, max_y)
y1 = K.clip(y1, 0, max_y)
pixels_batch = K.arange(0, batch_size) * (height * width)
pixels_batch = K.expand_dims(pixels_batch, axis=-1)
flat_output_size = output_size[0] * output_size[1]
base = K.repeat_elements(pixels_batch, flat_output_size, axis=1)
base = K.flatten(base)
# base_y0 = base + (y0 * width)
base_y0 = y0 * width
base_y0 = base + base_y0
# base_y1 = base + (y1 * width)
base_y1 = y1 * width
base_y1 = base_y1 + base
indices_a = base_y0 + x0
indices_b = base_y1 + x0
indices_c = base_y0 + x1
indices_d = base_y1 + x1
flat_image = K.reshape(image, shape=(-1, num_channels))
flat_image = K.cast(flat_image, dtype='float32')
pixel_values_a = K.gather(flat_image, indices_a)
pixel_values_b = K.gather(flat_image, indices_b)
pixel_values_c = K.gather(flat_image, indices_c)
pixel_values_d = K.gather(flat_image, indices_d)
x0 = K.cast(x0, 'float32')
x1 = K.cast(x1, 'float32')
y0 = K.cast(y0, 'float32')
y1 = K.cast(y1, 'float32')
area_a = K.expand_dims(((x1 - x) * (y1 - y)), 1)
area_b = K.expand_dims(((x1 - x) * (y - y0)), 1)
area_c = K.expand_dims(((x - x0) * (y1 - y)), 1)
area_d = K.expand_dims(((x - x0) * (y - y0)), 1)
values_a = area_a * pixel_values_a
values_b = area_b * pixel_values_b
values_c = area_c * pixel_values_c
values_d = area_d * pixel_values_d
return values_a + values_b + values_c + values_d
def _make_regular_grids(self, batch_size, height, width):
# making a single regular grid
x_linspace = K_linspace(-1., 1., width)
y_linspace = K_linspace(-1., 1., height)
x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
x_coordinates = K.flatten(x_coordinates)
y_coordinates = K.flatten(y_coordinates)
ones = K.ones_like(x_coordinates)
grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)
# repeating grids for each batch
grid = K.flatten(grid)
grids = K.tile(grid, K.stack([batch_size]))
return K.reshape(grids, (batch_size, 3, height * width))
def _transform(self, X, affine_transformation, output_size):
batch_size, num_channels = K.shape(X)[0], K.shape(X)[3]
transformations = K.reshape(affine_transformation,
shape=(batch_size, 2, 3))
# transformations = K.cast(affine_transformation[:, 0:2, :], 'float32')
regular_grids = self._make_regular_grids(batch_size, *output_size)
sampled_grids = K.batch_dot(transformations, regular_grids)
interpolated_image = self._interpolate(X, sampled_grids, output_size)
new_shape = (batch_size, output_size[0], output_size[1], num_channels)
interpolated_image = K.reshape(interpolated_image, new_shape)
return interpolated_image
| mit | -3,533,227,027,551,988,700 | 35.854015 | 79 | 0.575956 | false |
mattgemmell/DOT-MGTextEntry | mgtext.py | 1 | 12149 | #! /usr/bin/python
"""
MGText
Text-entry plugin for Pimoroni's menu system for the Raspberry Pi Display-O-Tron.
Code and info: https://github.com/mattgemmell/DOT-MGTextEntry
By: Matt Gemmell
http://mattgemmell.com/
http://twitter.com/mattgemmell
"""
from dot3k.menu import MenuOption
_UP = 0
_DOWN = 1
_LEFT = 2
_RIGHT = 3
class MGText(MenuOption):
def __init__(self):
self.cols = 16
self.initialized = False
self.scroll_up_icon = chr(0)
self.scroll_down_icon = chr(1)
self.abbreviation_icon = chr(2)
self.placeholder_icon = chr(3)
self.caps_on = True
self.symbols_mode = False
self.cancel_aborts = False # by default, Cancel button acts as Delete
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
self.uppercase_letters = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
self.lowercase_letters = list('abcdefghijklmnopqrstuvwxyz')
self.space_symbol = 'Spc'
self.line_break = '\n' # for layout only; can't be entered
self.numbers = list('0123456789')
self.quick_punctuation = list('./:@')
self.symbols = list('./:@\'"~+-=_!?,;()[]<>{}\\^|&*$%#`')
self.caps_command = "Caps"
self.symbols_command = "More"
self.delete_command = "Del"
self.cancel_command = "Cancel"
self.commit_command = "Accept"
self.commands = [self.caps_command, self.symbols_command, self.delete_command, self.cancel_command, self.commit_command]
self.uppercase_set = self.uppercase_letters
self.uppercase_set.append(self.space_symbol)
self.uppercase_set.extend(self.numbers)
self.uppercase_set.extend(self.quick_punctuation)
self.uppercase_set.extend(self.commands)
self.lowercase_set = self.lowercase_letters
self.lowercase_set.append(self.space_symbol)
self.lowercase_set.extend(self.numbers)
self.lowercase_set.extend(self.quick_punctuation)
self.lowercase_set.extend(self.commands)
self.symbols_set = self.symbols
self.symbols_set.append(self.line_break)
self.symbols_set.extend(self.commands)
self.confirm_accept = "Yes"
self.confirm_cancel = "No"
self.confirm_quit = "Quit"
self.confirm_set = [self.confirm_accept, self.confirm_cancel, self.confirm_quit]
self.display_map = [] # 2D array of options
self.display_ranges = [] # 2D array of range-arrays with option extents
self.entered_text = ''
self.confirming = False
MenuOption.__init__(self)
self.is_setup = False
def set_value(self, value):
self.entered_text = value
def get_value(self):
return self.entered_text
def begin(self):
self.initialized = False
self.confirming = False
self.symbols_mode = False
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
self.set_value('')
self.update_display_map()
def setup(self, config):
MenuOption.setup(self, config)
def cleanup(self):
self.entered_text = ''
self.display_map = []
self.display_ranges = []
def update_display_map(self):
"""
Builds two datastructures:
- display_map is an array of rows of the display, with each entry being an array of that row's options as strings.
- display_ranges is similar, but each row-array contains dictionaries that are ranges of where the corresponding option renders on the display.
"""
self.display_map = []
self.display_ranges = []
options_set = self.uppercase_set if self.caps_on else self.lowercase_set
if self.symbols_mode:
options_set = self.symbols_set
if self.confirming:
options_set = self.confirm_set
row_len = 0
self.display_map.append([])
self.display_ranges.append([])
for opt in options_set:
if (opt == self.line_break) or ((len(opt) + row_len + 2) > (self.cols - 1)):
# Start a new row
self.display_map.append([])
self.display_ranges.append([])
row_len = 0
if opt == self.line_break:
# We don't actually include line-breaks as options
continue
# Add to latest row
self.display_map[-1].append(opt)
opt_len = len(opt) + 1 # to account for the leading space
self.display_ranges[-1].append({'start': row_len, 'len': opt_len})
row_len += opt_len
def index_of_range_containing(self, row, col):
"""
This allows us to move the cursor spatially when going to a different row. For example, consider moving from a row with only two lengthy options, to a row with seven single-character options. If option 2 of 2 was selected on the former row, we wouldn't just want option 2 to be selected on the latter row after the move, because the cursor would seem to jump way over to the left. What we really want is to "move to whatever option is directly above/below the one I already had selected", which is what this method (and the display_ranges structure) allows.
"""
if row >= 0 and row < len(self.display_ranges) and col >= 0 and col < self.cols:
row_ranges = self.display_ranges[row]
index = len(row_ranges) - 1
for range in reversed(row_ranges):
if col >= range['start']:
break
index -= 1
return index
def move_cursor(self, direction):
# Move cursor appropriately using ranges
sel_row = self.selection['row']
sel_opt = self.selection['option']
sel_orig_row = sel_row
sel_orig_col = self.display_ranges[sel_row][sel_opt]['start']
if direction == _UP:
self.selection['row'] = (sel_row - 1) % len(self.display_map)
self.selection['option'] = self.index_of_range_containing(self.selection['row'], sel_orig_col)
elif direction == _DOWN:
self.selection['row'] = (sel_row + 1) % len(self.display_map)
self.selection['option'] = self.index_of_range_containing(self.selection['row'], sel_orig_col)
elif direction == _LEFT:
# We wrap back onto the previous row when appropriate
self.selection['option'] = (sel_opt - 1) % len(self.display_map[sel_row])
# Check to see if we wrapped around
if self.selection['option'] > sel_opt or len(self.display_map[sel_row]) == 1:
# Wrap to previous row
self.selection['row'] = (sel_row - 1) % len(self.display_map)
self.selection['option'] = len(self.display_map[self.selection['row']]) - 1
elif direction == _RIGHT:
# We wrap forward onto the next row when appropriate
self.selection['option'] = (sel_opt + 1) % len(self.display_map[sel_row])
# Check to see if we wrapped around
if self.selection['option'] < sel_opt or len(self.display_map[sel_row]) == 1:
# Wrap to next row
self.selection['row'] = (sel_row + 1) % len(self.display_map)
self.selection['option'] = 0
# Sanitise new selection
self.selection['option'] = max(0, self.selection['option'])
self.selection['option'] = min(len(self.display_map[self.selection['row']]) - 1, self.selection['option'])
# Update first_displayed_row appropriately
sel_row = self.selection['row']
if sel_row < self.first_displayed_row:
self.first_displayed_row = sel_row
elif sel_row > self.first_displayed_row + 1:
self.first_displayed_row = sel_row - 1
def render_row(self, row):
# Returns the actual rendered full text of a row, with all annotations
result = ""
if row >= 0 and row < len(self.display_map):
row_opts = self.display_map[row]
row_selected = (self.selection['row'] == row)
selected_option = self.selection['option']
for index, opt in enumerate(row_opts):
# Selection markers
if row_selected:
if selected_option == index:
result += "["
elif selected_option == (index - 1):
result += "]"
else:
result += " "
else:
result += " "
# Option text
if opt == self.caps_command:
if self.caps_on:
result += "lowr"
else:
result += "UPPR"
elif opt == self.symbols_command:
if self.symbols_mode:
if self.caps_on:
result += "ABC1"
else:
result += "abc1"
else:
result += "#+=$"
else:
result += opt
# Special case for end of row
if index == len(row_opts) - 1:
# Selection markers
if row_selected and selected_option == index:
result += "]"
else:
result += " "
# Add any end-of-row padding required
result += (" " * (self.cols - (len(result) + 1)))
# Scroll indicators
if row == self.first_displayed_row and row > 0:
result += self.scroll_up_icon
elif row == (self.first_displayed_row + 1) and row < (len(self.display_map) - 1):
result += self.scroll_down_icon
else:
result += " "
return result
def delete(self):
# Delete last character entered
if (not self.confirming) and len(self.entered_text) > 0:
self.entered_text = self.entered_text[:-1]
def left(self):
self.move_cursor(_LEFT)
return True
def right(self):
self.move_cursor(_RIGHT)
return True
def up(self):
self.move_cursor(_UP)
return True
def down(self):
self.move_cursor(_DOWN)
return True
def cancel(self):
if self.cancel_aborts:
# Confirm quit if we have text
if len(self.entered_text > 0):
self.confirming = True
self.update_display_map()
self.selection = {'row': 0, 'option': 1}
self.first_displayed_row = 0
return False
else:
return True
# Delete last character entered
self.delete()
return False
def select(self):
# Handle all the selectable options and commands
opt = self.display_map[self.selection['row']][self.selection['option']]
if opt == self.space_symbol:
self.entered_text += " "
elif opt == self.caps_command:
self.caps_on = not (self.caps_on)
self.symbols_mode = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
elif opt == self.symbols_command:
self.symbols_mode = not (self.symbols_mode)
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
elif opt == self.delete_command:
self.delete()
elif opt == self.cancel_command:
self.confirming = True
self.update_display_map()
self.selection = {'row': 0, 'option': 1}
self.first_displayed_row = 0
elif opt == self.commit_command:
self.confirming = True
self.update_display_map()
self.selection = {'row': 0, 'option': 1}
self.first_displayed_row = 0
elif opt == self.confirm_accept:
self.confirming = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
return True
elif opt == self.confirm_cancel:
self.confirming = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
elif opt == self.confirm_quit:
self.confirming = False
self.update_display_map()
self.selection = {'row': 0, 'option': 0}
self.first_displayed_row = 0
self.cancel_input = True
return True
else:
self.entered_text += opt
return False
def redraw(self, menu):
if not self.initialized:
menu.lcd.create_char(0, [0, 0, 4, 14, 31, 0, 0, 0]) # scroll up icon
menu.lcd.create_char(1, [0, 0, 0, 31, 14, 4, 0, 0]) # scroll down icon
menu.lcd.create_char(2, [0, 0, 0, 0, 0, 0, 21, 0]) # abbreviation icon
menu.lcd.create_char(3, [0, 0, 0, 0, 0, 0, 0, 28]) # placeholder icon
self.initialized = True
if not self.confirming:
# Output the editing row
text_len = len(self.entered_text)
if text_len > self.cols:
menu.write_row(0, self.abbreviation_icon + self.entered_text[text_len - self.cols + 1:])
else:
menu.write_row(0, self.entered_text + (self.placeholder_icon * (self.cols - text_len)))
# Output relevant two rows
if self.first_displayed_row < len(self.display_map):
menu.write_row(1, self.render_row(self.first_displayed_row))
else:
menu.clear_row(1)
if self.first_displayed_row + 1 < len(self.display_map):
menu.write_row(2, self.render_row(self.first_displayed_row + 1))
else:
menu.clear_row(2)
else:
# Handle the confirmation screen
if len(self.entered_text) > self.cols:
menu.write_option(0, self.entered_text, scroll=True, scroll_repeat=2000)
else:
menu.write_row(0, self.entered_text + (" " * (self.cols - len(self.entered_text))))
menu.write_row(1, 'Confirm?')
menu.write_row(2, self.render_row(self.first_displayed_row))
| mit | -2,095,706,729,113,539,300 | 30.720627 | 559 | 0.658408 | false |
saebyn/nwgui | nwgui/gui.py | 1 | 2263 |
import pygame
from nwgui.container import AbsoluteContainer
class AbstractGUI(object):
def __init__(self, game):
raise NotImplementedError
def getGameObject(self):
raise NotImplementedError
def get(self, widgetName):
raise NotImplementedError
def setName(self, name, widget):
raise NotImplementedError
def updateLayers(self):
raise NotImplementedError
def getLayer(self):
raise NotImplementedError
def addSprite(self, widget):
raise NotImplementedError
def setActive(self, widget):
raise NotImplementedError
def setInactive(self, widget):
raise NotImplementedError
def isControlledPosition(self, position):
raise NotImplementedError
class GUI(AbsoluteContainer, AbstractGUI):
def __init__(self, game):
self._game = game
AbsoluteContainer.__init__(self, game.screen.get_width(),
game.screen.get_height(),
self, root=self)
self.image = pygame.Surface((0, 0))
self.active = None
self.names = {}
def getGameObject(self):
return self._game
def get(self, widgetName):
return self.names[widgetName]
def handleEvent(self, event):
AbsoluteContainer.handleEvent(self, event)
def updateLayers(self):
for widget in self.widgets:
widget.updateLayer()
def setParent(self, parent):
raise NotImplementedError
def isActive(self):
return self.active is not None
def setActive(self, widget):
if self.active is not None:
self.active.setInactive()
self.active = widget
def setInactive(self, widget=None):
if self.active == widget or widget is None:
self.active = None
def addSprite(self, sprite):
self._game.addGUISprite(sprite)
def setName(self, name, widget):
self.names[name] = widget
def isControlledPosition(self, position):
for widget in self._game.guiSprites.sprites():
if widget is self:
continue
if widget.rect.collidepoint(position):
return True
return False
| gpl-3.0 | -5,101,451,680,991,540,000 | 23.597826 | 66 | 0.613787 | false |
algorhythms/LeetCode | 673 Number of Longest Increasing Subsequence.py | 1 | 1796 | #!/usr/bin/python3
"""
Given an unsorted array of integers, find the number of longest increasing
subsequence.
Example 1:
Input: [1,3,5,4,7]
Output: 2
Explanation: The two longest increasing subsequence are [1, 3, 4, 7] and
[1, 3, 5, 7].
Example 2:
Input: [2,2,2,2,2]
Output: 5
Explanation: The length of longest continuous increasing subsequence is 1, and
there are 5 subsequences' length is 1, so output 5.
Note: Length of the given array will be not exceed 2000 and the answer is
guaranteed to be fit in 32-bit signed int.
"""
from typing import List
class LenCnt:
def __init__(self, l, c):
self.l = l
self.c = c
def __repr__(self):
return repr((self.l, self.c))
class Solution:
def findNumberOfLIS(self, A: List[int]) -> int:
"""
Two pass - 1st pass find the LIS, 2nd pass find the number
Let F[i] be the length of LIS ended at A[i]
"""
if not A:
return 0
n = len(A)
F = [LenCnt(l=1, c=1) for _ in A]
mx = LenCnt(l=1, c=1)
for i in range(1, n):
for j in range(i):
if A[i] > A[j]:
if F[i].l < F[j].l + 1:
F[i].l = F[j].l + 1
F[i].c = F[j].c
elif F[i].l == F[j].l + 1:
F[i].c += F[j].c
if F[i].l > mx.l:
# mx = F[i] error, need deep copy
mx.l = F[i].l
mx.c = F[i].c
elif F[i].l == mx.l:
mx.c += F[i].c
return mx.c
if __name__ == "__main__":
assert Solution().findNumberOfLIS([1,1,1,2,2,2,3,3,3]) == 27
assert Solution().findNumberOfLIS([1, 3, 5, 4, 7]) == 2
assert Solution().findNumberOfLIS([2, 2, 2, 2, 2]) == 5
| mit | 3,544,294,024,520,078,000 | 26.630769 | 78 | 0.50167 | false |
pearu/sympycore | sympycore/functions/algebra.py | 1 | 2063 | """ Implementes functions ring support.
"""
#
# Author: Pearu Peterson
# Created: April, 2008
#
__all__ = ['FunctionRing']
from ..core import classes, objects, init_module
from ..basealgebra import Verbatim, Algebra
from ..ring import CommutativeRing
init_module.import_heads()
class FunctionRing(CommutativeRing):
""" Base class to functions ring classes.
Use ``Function`` function to construct instances.
"""
argument_algebras = None
nargs = None
@classmethod
def get_value_algebra(cls):
return CommutativeRing
def get_argument_algebra(self, index):
return self.get_value_algebra()
@classmethod
def get_function_algebra(cls):
return classes.OperatorRing
@classmethod
def get_differential_algebra(cls):
return classes.DifferentialRing
@classmethod
def get_predefined_symbols(cls, name):
if name=='D': return D
return
@classmethod
def convert(cls, obj, typeerror=True):
tobj = type(obj)
if tobj is cls:
return obj
if isinstance(obj, cls.get_value_algebra()):
return cls(NUMBER, obj)
return super(CommutativeRing, cls).convert(obj, typeerror=typeerror)
def as_algebra(self, cls, typeerror=True):
if cls is classes.Verbatim:
return self.as_verbatim()
if type(self) is cls:
return self
#if isinstance(self, cls):
# return self.as_verbatim().as_algebra(cls)
if typeerror:
raise TypeError('Cannot convert %s to %s instance' % (type(self).__name__, cls.__name__))
return NotImplemented
def __call__(self, *args, **options):
cls = self.get_value_algebra()
#cls = classes.Calculus
evaluate = options.get('evaluate', True)
if evaluate:
result = self.head.apply(cls, self.data, self, args)
if result is not NotImplemented:
return result
return cls(APPLY, (self, args))
classes.FunctionRing = FunctionRing
| bsd-3-clause | 7,460,060,148,131,743,000 | 25.792208 | 101 | 0.62191 | false |
NEONScience/NEON-Data-Skills | tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.py | 1 | 20510 | #!/usr/bin/env python
# coding: utf-8
# ---
# syncID: e6ccf19a4b454ca594388eeaa88ebe12
# title: "Calculate Vegetation Biomass from LiDAR Data in Python"
# description: "Learn to calculate the biomass of standing vegetation using a canopy height model data product."
# dateCreated: 2017-06-21
# authors: Tristan Goulden
# contributors: Donal O'Leary
# estimatedTime: 1 hour
# packagesLibraries: numpy, gdal, matplotlib, matplotlib.pyplot, os
# topics: lidar,remote-sensing
# languagesTool: python
# dataProduct: DP1.10098.001, DP3.30015.001,
# code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.ipynb
# tutorialSeries: intro-lidar-py-series
# urlTitle: calc-biomass-py
# ---
# <div id="ds-objectives" markdown="1">
#
# In this tutorial, we will calculate the biomass for a section of the SJER site. We
# will be using the Canopy Height Model discrete LiDAR data product as well as NEON
# field data on vegetation data. This tutorial will calculate Biomass for individual
# trees in the forest.
#
# ### Objectives
# After completing this tutorial, you will be able to:
#
# * Learn how to apply a guassian smoothing fernal for high-frequency spatial filtering
# * Apply a watershed segmentation algorithm for delineating tree crowns
# * Calculate biomass predictor variables from a CHM
# * Setup training data for Biomass predictions
# * Apply a Random Forest machine learning approach to calculate biomass
#
#
# ### Install Python Packages
#
# * **numpy**
# * **gdal**
# * **matplotlib**
# * **matplotlib.pyplot**
# * **os**
#
#
# ### Download Data
#
# If you have already downloaded the data set for the Data Institute, you have the
# data for this tutorial within the SJER directory. If you would like to just
# download the data for this tutorial use the following link.
#
# <a href="https://neondata.sharefile.com/d-s58db39240bf49ac8" class="link--button link--arrow">
# Download the Biomass Calculation teaching data subset</a>
#
# </div>
# In this tutorial, we will calculate the biomass for a section of the SJER site. We
# will be using the Canopy Height Model discrete LiDAR data product as well as NEON
# field data on vegetation data. This tutorial will calculate Biomass for individual
# trees in the forest.
#
# The calculation of biomass consists of four primary steps:
#
# 1. Delineating individual tree crowns
# 2. Calculating predictor variables for all individuals
# 3. Collecting training data
# 4. Applying a regression model to estiamte biomass from predictors
#
# In this tutorial we will use a watershed segmentation algorithm for delineating
# tree crowns (step 1) and and a Random Forest (RF) machine learning algorithm for
# relating the predictor variables to biomass (part 4). The predictor variables were
# selected following suggestions by Gleason et al. (2012) and biomass estimates were
# determined from DBH (diamter at breast height) measurements following relationships
# given in Jenkins et al. (2003).
#
# ## Get Started
#
# First, we need to specify the directory where we will find and save the data needed for this tutorial. You will need to change this line to suit your local machine. I have decided to save my data in the following directory:
# In[1]:
data_path = '/Users/olearyd/Git/data/'
# Next, we will import several of the typical libraries.
# In[2]:
import numpy as np
import os
import gdal, osr
import matplotlib.pyplot as plt
import sys
from scipy import ndimage as ndi
get_ipython().run_line_magic('matplotlib', 'inline')
# Next, we will add libraries from skilearn which will help with the watershed delination, determination of predictor variables and random forest algorithm
# In[3]:
#Import biomass specific libraries
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from sklearn.ensemble import RandomForestRegressor
# ## Define functions
#
# Now we will define a few functions that allow us to more easily work with the NEON data.
#
# * `plot_band_array`: function to plot NEON spatial data.
# In[4]:
#Define plot band array function
def plot_band_array(band_array,image_extent,title,cmap_title,colormap,colormap_limits):
plt.imshow(band_array,extent=image_extent)
cbar = plt.colorbar(); plt.set_cmap(colormap); plt.clim(colormap_limits)
cbar.set_label(cmap_title,rotation=270,labelpad=20)
plt.title(title); ax = plt.gca()
ax.ticklabel_format(useOffset=False, style='plain')
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90)
# * `array2raster`: function to output geotiff files.
# In[5]:
def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array,epsg):
cols = array.shape[1]
rows = array.shape[0]
originX = rasterOrigin[0]
originY = rasterOrigin[1]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(epsg)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
# * `raster2array`: function to conver rasters to an array.
# In[6]:
def raster2array(geotif_file):
metadata = {}
dataset = gdal.Open(geotif_file)
metadata['array_rows'] = dataset.RasterYSize
metadata['array_cols'] = dataset.RasterXSize
metadata['bands'] = dataset.RasterCount
metadata['driver'] = dataset.GetDriver().LongName
metadata['projection'] = dataset.GetProjection()
metadata['geotransform'] = dataset.GetGeoTransform()
mapinfo = dataset.GetGeoTransform()
metadata['pixelWidth'] = mapinfo[1]
metadata['pixelHeight'] = mapinfo[5]
metadata['ext_dict'] = {}
metadata['ext_dict']['xMin'] = mapinfo[0]
metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1]
metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5]
metadata['ext_dict']['yMax'] = mapinfo[3]
metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'],
metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax'])
if metadata['bands'] == 1:
raster = dataset.GetRasterBand(1)
metadata['noDataValue'] = raster.GetNoDataValue()
metadata['scaleFactor'] = raster.GetScale()
# band statistics
metadata['bandstats'] = {} # make a nested dictionary to store band stats in same
stats = raster.GetStatistics(True,True)
metadata['bandstats']['min'] = round(stats[0],2)
metadata['bandstats']['max'] = round(stats[1],2)
metadata['bandstats']['mean'] = round(stats[2],2)
metadata['bandstats']['stdev'] = round(stats[3],2)
array = dataset.GetRasterBand(1).ReadAsArray(0,0,
metadata['array_cols'],
metadata['array_rows']).astype(np.float)
array[array==int(metadata['noDataValue'])]=np.nan
array = array/metadata['scaleFactor']
return array, metadata
elif metadata['bands'] > 1:
print('More than one band ... need to modify function for case of multiple bands')
# * `crown_geometric_volume_pth`: function to get tree crown volumn.
# In[7]:
def crown_geometric_volume_pth(tree_data,min_tree_height,pth):
p = np.percentile(tree_data, pth)
tree_data_pth = [v if v < p else p for v in tree_data]
crown_geometric_volume_pth = np.sum(tree_data_pth - min_tree_height)
return crown_geometric_volume_pth, p
# * `get_predictors`: function to get the trees from the biomass data.
# In[8]:
def get_predictors(tree,chm_array, labels):
indexes_of_tree = np.asarray(np.where(labels==tree.label)).T
tree_crown_heights = chm_array[indexes_of_tree[:,0],indexes_of_tree[:,1]]
full_crown = np.sum(tree_crown_heights - np.min(tree_crown_heights))
crown50, p50 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,50)
crown60, p60 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,60)
crown70, p70 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,70)
return [tree.label,
np.float(tree.area),
tree.major_axis_length,
tree.max_intensity,
tree.min_intensity,
p50, p60, p70,
full_crown, crown50, crown60, crown70]
# ## Canopy Height Data
#
# With everything set up, we can now start working with our data by define the file path to our CHM file. Note that you will need to change this and subsequent filepaths according to your local machine.
# In[9]:
chm_file = data_path+'NEON_D17_SJER_DP3_256000_4106000_CHM.tif'
# When we output the results, we will want to include the same file information as the input, so we will gather the file name information.
# In[10]:
#Get info from chm file for outputting results
just_chm_file = os.path.basename(chm_file)
just_chm_file_split = just_chm_file.split(sep="_")
# Now we will get the CHM data...
# In[11]:
chm_array, chm_array_metadata = raster2array(chm_file)
# ..., plot it, and save the figure.
# In[12]:
#Plot the original CHM
plt.figure(1)
#Plot the CHM figure
plot_band_array(chm_array,chm_array_metadata['extent'],
'Canopy height Model',
'Canopy height (m)',
'Greens',[0, 9])
plt.savefig(data_path+just_chm_file[0:-4]+'_CHM.png',dpi=300,orientation='landscape',
bbox_inches='tight',
pad_inches=0.1)
# It looks like SJER primarily has low vegetation with scattered taller trees.
#
# ## Create Filtered CHM
#
# Now we will use a Gaussian smoothing kernal (convolution) across the data set to remove spurious high vegetation points. This will help ensure we are finding the treetops properly before running the watershed segmentation algorithm.
#
# For different forest types it may be necessary to change the input parameters. Information on the function can be found in the <a href="https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html" target="_blank">SciPy documentation</a>.
#
# Of most importance are the second and fifth inputs. The second input defines the standard deviation of the Gaussian smoothing kernal. Too large a value will apply too much smoothing, too small and some spurious high points may be left behind. The fifth, the truncate value, controls after how many standard deviations the Gaussian kernal will get cut off (since it theoretically goes to infinity).
# In[13]:
#Smooth the CHM using a gaussian filter to remove spurious points
chm_array_smooth = ndi.gaussian_filter(chm_array,2,
mode='constant',cval=0,truncate=2.0)
chm_array_smooth[chm_array==0] = 0
# Now save a copy of filtered CHM. We will later use this in our code, so we'll output it into our data directory.
# In[14]:
#Save the smoothed CHM
array2raster(data_path+'chm_filter.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,
np.array(chm_array_smooth,dtype=float),
32611)
# ## Determine local maximums
#
# Now we will run an algorithm to determine local maximums within the image. Setting indices to 'False' returns a raster of the maximum points, as opposed to a list of coordinates. The footprint parameter is an area where only a single peak can be found. This should be approximately the size of the smallest tree. Information on more sophisticated methods to define the window can be found in Chen (2006).
# In[15]:
#Calculate local maximum points in the smoothed CHM
local_maxi = peak_local_max(chm_array_smooth,indices=False, footprint=np.ones((5, 5)))
# Our new object `local_maxi` is an array of boolean values where each pixel is identified as either being the local maximum (`True`) or not being the local maximum (`False`).
# In[16]:
local_maxi
# This is very helpful, but it can be difficult to visualizee boolean values using our typical numeric plotting procedures as defined in the `plot_band_array` function above. Therefore, we will need to convert this boolean array to an numeric format to use this function. Booleans convert easily to integers with values of `False=0` and `True=1` using the `.astype(int)` method.
# In[17]:
local_maxi.astype(int)
# Next ,we can plot the raster of local maximums bo coercing the boolean array into an array ofintegers inline. The following figure shows the difference in finding local maximums for a filtered vs. non-filtered CHM.
#
# We will save the graphics (.png) in an outputs folder sister to our working directory and data outputs (.tif) to our data directory.
# In[18]:
#Plot the local maximums
plt.figure(2)
plot_band_array(local_maxi.astype(int),chm_array_metadata['extent'],
'Maximum',
'Maxi',
'Greys',
[0, 1])
plt.savefig(data_path+just_chm_file[0:-4]+ '_Maximums.png',
dpi=300,orientation='landscape',
bbox_inches='tight',pad_inches=0.1)
array2raster(data_path+'maximum.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(local_maxi,dtype=np.float32),32611)
# If we were to look at the overlap between the tree crowns and the local maxima from each method, it would appear a bit like this raster.
#
# <figure>
# <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg">
# <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"></a>
# <figcaption> The difference in finding local maximums for a filtered vs.
# non-filtered CHM.
# Source: National Ecological Observatory Network (NEON)
# </figcaption>
# </figure>
#
#
# Apply labels to all of the local maximum points
# In[19]:
#Identify all the maximum points
markers = ndi.label(local_maxi)[0]
# Next we will create a mask layer of all of the vegetation points so that the watershed segmentation will only occur on the trees and not extend into the surrounding ground points. Since 0 represent ground points in the CHM, setting the mask to 1 where the CHM is not zero will define the mask
# In[20]:
#Create a CHM mask so the segmentation will only occur on the trees
chm_mask = chm_array_smooth
chm_mask[chm_array_smooth != 0] = 1
# ## Watershed segmentation
#
# As in a river system, a watershed is divided by a ridge that divides areas. Here our watershed are the individual tree canopies and the ridge is the delineation between each one.
#
# <figure>
# <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png">
# <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"></a>
# <figcaption> A raster classified based on watershed segmentation.
# Source: National Ecological Observatory Network (NEON)
# </figcaption>
# </figure>
#
# Next, we will perform the watershed segmentation which produces a raster of labels.
# In[21]:
#Perfrom watershed segmentation
labels = watershed(chm_array_smooth, markers, mask=chm_mask)
labels_for_plot = labels.copy()
labels_for_plot = np.array(labels_for_plot,dtype = np.float32)
labels_for_plot[labels_for_plot==0] = np.nan
max_labels = np.max(labels)
# In[22]:
#Plot the segments
plot_band_array(labels_for_plot,chm_array_metadata['extent'],
'Crown Segmentation','Tree Crown Number',
'Spectral',[0, max_labels])
plt.savefig(data_path+just_chm_file[0:-4]+'_Segmentation.png',
dpi=300,orientation='landscape',
bbox_inches='tight',pad_inches=0.1)
array2raster(data_path+'labels.tif',
(chm_array_metadata['ext_dict']['xMin'],
chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(labels,dtype=float),32611)
# Now we will get several properties of the individual trees will be used as predictor variables.
# In[23]:
#Get the properties of each segment
tree_properties = regionprops(labels,chm_array)
# Now we will get the predictor variables to match the (soon to be loaded) training data using the function defined above. The first column will be segment IDs, the rest will be the predictor variables.
# In[24]:
predictors_chm = np.array([get_predictors(tree, chm_array, labels) for tree in tree_properties])
X = predictors_chm[:,1:]
tree_ids = predictors_chm[:,0]
# ## Training data
#
# We now bring in the training data file which is a simple CSV file with no header. The first column is biomass, and the remaining columns are the same predictor variables defined above. The tree diameter and max height are defined in the NEON vegetation structure data along with the tree DBH. The field validated values are used for training, while the other were determined from the CHM and camera images by manually delineating the tree crowns and pulling out the relevant information from the CHM.
#
# Biomass was calculated from DBH according to the formulas in Jenkins et al. (2003).
#
# If you didn't download this training dataset above, you can <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fobd4959-4cf0-44ab-acc6-0695a04a1afc" target="_blank">Download the training dataset CSV here</a>.
# In[25]:
#Define the file of training data
training_data_file = data_path+'SJER_Biomass_Training.csv'
#Read in the training data from a CSV file
training_data = np.genfromtxt(training_data_file,delimiter=',')
#Grab the biomass (Y) from the first line
biomass = training_data[:,0]
#Grab the biomass prdeictors from the remaining lines
biomass_predictors = training_data[:,1:12]
# ## Random Forest classifiers
#
# We can then define parameters of the Random Forest classifier and fit the predictor variables from the training data to the Biomass estaimtes.
# In[26]:
#Define paraemters for Random forest regressor
max_depth = 30
#Define regressor rules
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
#Fit the biomass to regressor variables
regr_rf.fit(biomass_predictors,biomass)
# We now apply the Random Forest model to the predictor variables to retreive biomass
# In[27]:
#Apply the model to the predictors
estimated_biomass = regr_rf.predict(X)
# For outputting a raster, copy the labels raster to a biomass raster, then cycle through the segments and assign the biomass estimate to each individual tree segment.
# In[28]:
#Set an out raster with the same size as the labels
biomass_map = np.array((labels),dtype=float)
#Assign the appropriate biomass to the labels
biomass_map[biomass_map==0] = np.nan
for tree_id, biomass_of_tree_id in zip(tree_ids, estimated_biomass):
biomass_map[biomass_map == tree_id] = biomass_of_tree_id
# ## Calc Biomass
# Collect some of the biomass statistics and then plot the results and save an output geotiff.
# In[29]:
#Get biomass stats for plotting
mean_biomass = np.mean(estimated_biomass)
std_biomass = np.std(estimated_biomass)
min_biomass = np.min(estimated_biomass)
sum_biomass = np.sum(estimated_biomass)
print('Sum of biomass is ',sum_biomass,' kg')
#Plot the biomass!
plt.figure(5)
plot_band_array(biomass_map,chm_array_metadata['extent'],
'Biomass (kg)','Biomass (kg)',
'winter',
[min_biomass+std_biomass, mean_biomass+std_biomass*3])
plt.savefig(data_path+just_chm_file_split[0]+'_'+just_chm_file_split[1]+'_'+just_chm_file_split[2]+'_'+just_chm_file_split[3]+'_'+just_chm_file_split[4]+'_'+just_chm_file_split[5]+'_'+'Biomass.png',
dpi=300,orientation='landscape',
bbox_inches='tight',
pad_inches=0.1)
array2raster(data_path+'biomass.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(biomass_map,dtype=float),32611)
# In[ ]:
| agpl-3.0 | -6,897,848,424,968,655,000 | 35.17284 | 503 | 0.709654 | false |
abstractfactory/openmetadata-mk1 | transaction.py | 1 | 5900 | """Convenience module for the end-user
The goal of this module is to provide as high-level utilities
as possible for users who wish to have as little knowledge as
possible about Open Folder.
Target audience leans towards Technical Directors or
fellow scripters in any DCC.
"""
from __future__ import absolute_import
import os
# import sys
import errno
import logging
import shutil
import collections
from openmetadata import domain
log = logging.getLogger('openmetadata.transaction')
def write(path, channel=None, key=None, data=None):
"""Convenience method for writing metadata"""
container = domain.Folder(path)
if key and not channel:
raise ValueError("Argument `key` must be specified in "
"conjunction with `channel`")
if channel and not key:
if not isinstance(data, dict):
raise ValueError("Data passed to object of type "
"<Channel> must be of type <dict>")
container = domain.Channel(channel, container)
if channel and key:
channel = domain.Channel(channel, container)
key = domain.Key(key, channel)
container = key
container.data = data
# container.write()
print "%r = %r" % (container.path, container.data)
def update(path, channel=None, key=None, data=None):
"""Convenience method for updating metadata"""
raise NotImplementedError
def read(path, channel=None, key=None):
"""Convenience method for reading metadata
Parameters
path (str) : Path to meta folder
channel (str) : (optional) Name of individual channel
key (str) : (optional) Name of individual file
Returns
dict() : {'obj.name': content}
Calling this method with only `path` specified is identical
to calling Folder.read().data directly.
"""
if key and not channel:
raise ValueError("Must supply `channel` with `key` argument")
if not os.path.exists(path):
return {}
try:
obj = domain.Factory.create(path)
except WindowsError as e:
# Temporary fix. An error occurs when trying to
# read junctions pointing to invalid targets.
if e.errno == errno.ENOENT:
print e
return {}
raise e
assert isinstance(obj, domain.Folder)
if channel:
obj = obj.child(channel)
if not obj:
return {}
if key:
obj = obj.child(key)
if not obj:
return None
return obj.read().data
def exists(path, channel=None, key=None):
pass
def cascade(path, channel, key=None):
"""Merge metadata of each channel matching `term` up-wards through hierarchy"""
folder = domain.Folder(path)
hierarchy = _findchannels(folder, channel)
hierarchy.reverse()
# An implementation of the Property-Pattern as discussed here:
# http://steve-yegge.blogspot.co.uk/2008/10/universal-design-pattern.html
metadata_hierarchy = []
for _channel in hierarchy:
_channel.read()
_data = _channel.data or {}
metadata_hierarchy.append(_data)
# The following algorithm is based on this answer:
# http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
metadata = {}
for _metadata in metadata_hierarchy:
update(metadata, _metadata)
return metadata
def delete(path, channel=None, key=None, max_retries=10):
assert os.path.exists(path)
retries = 0
while True:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
break
except WindowsError as e:
# Sometimes, Dropbox can bother this operation;
# creating files in the midst of deleting a folder.
#
# If this happens, try again in a short while.
retries += 1
if retries > max_retries:
log.error(e)
break
import time
time.sleep(0.1)
log.info("Retired %i time(s) for %s" % (retries, path))
log.info("Removed %s" % path)
def _findchannels(folder, term, result=None):
"""Return channels matching `term` up-wards through hierarchy"""
assert isinstance(folder, domain.Folder)
result = result or []
# Note: We can only cascade channels of type .kvs
current_channel = None
# Look for `term` within folder
for _channel in folder:
if _channel.name == term and _channel.extension == '.kvs':
result.append(_channel)
current_channel = _channel
# Recurse
parent = folder.parent
if parent:
# Before we recurse, ensure this is not a path.
isroot = False
# TODO
# Find a way to optimize this. Channel is being read here
# to find the isRoot property which is used solely to
# determine whether or not to continue searching.
# This is an expensive operation, and whats worse,
# the channel is being re-read in `cascade`.
if current_channel:
data = current_channel.read().data or {}
if data.get('isRoot') is True:
isroot = True
if not isroot:
return _findchannels(parent, term, result)
return result
# def cascade(folder, term):
if __name__ == '__main__':
import openmetadata as om
package = os.getcwd()
path = os.path.join(package, 'test', 'persist')
path = om.Folder(r's:\content\jobs\test\content\shots')
# print cascade(path, 'properties')
| mit | 4,959,071,983,263,927,000 | 25.818182 | 101 | 0.605593 | false |
mchels/FolderBrowser | plotcontrols.py | 1 | 6028 | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QSizePolicy
class PlotControls(QtWidgets.QWidget):
"""
Control bar for controlling how plots are shown.
Parameters
----------
cmap_names : list
List of colormap names to show in the colormap dropdown menu.
plot_2D_types : list
List of plot_2D_type names.
"""
def __init__(self, cmap_names, plot_2D_types):
super().__init__()
self.layout = QtWidgets.QHBoxLayout()
self.num_col_boxes = 3
self.num_lim_boxes = 3
self.cmap_names = cmap_names
self.plot_2D_types = plot_2D_types
self.init_col_sel_boxes()
self.init_cmap_sel()
self.init_plot_2D_type_sel()
self.init_lim_boxes()
self.init_aspect_box()
self.setLayout(self.layout)
def reset_col_boxes(self, array_of_text_items):
"""
Reset column selector boxes.
"""
assert len(array_of_text_items) == self.num_col_boxes
for i, box in enumerate(self.col_boxes):
box.list_of_text_items = array_of_text_items[i]
prev_text = box.currentText()
box.clear()
box.addItems(array_of_text_items[i])
idx = box.findText(prev_text)
box.setCurrentIndex(idx)
min_width = len(max(box.list_of_text_items, key=len)) * 8
box.view().setMinimumWidth(min_width)
# All indices must be set in the loop above before we can start
# assigning lowest unoccupied texts. Otherwise we don't know which
# texts are unoccupied.
for box in self.col_boxes:
if box.currentIndex() == -1:
self.select_lowest_unoccupied(box)
def init_col_sel_boxes(self):
"""
Initialize column selector boxes.
"""
self.col_boxes = [None] * self.num_col_boxes
for i in range(self.num_col_boxes):
box = QtWidgets.QComboBox()
box.setMaxVisibleItems(80)
policy_horiz = QSizePolicy.MinimumExpanding
policy_vert = QSizePolicy.Maximum
box.setSizePolicy(policy_horiz, policy_vert)
box.setMinimumWidth(40)
self.layout.addWidget(box)
self.col_boxes[i] = box
def init_cmap_sel(self):
"""
Initialize colormap selector.
"""
cmap_sel = QtWidgets.QComboBox()
cmap_sel.addItems(self.cmap_names)
policy_horiz = QSizePolicy.MinimumExpanding
policy_vert = QSizePolicy.Maximum
cmap_sel.setSizePolicy(policy_horiz, policy_vert)
cmap_sel.setMinimumWidth(40)
min_width = len(max(self.cmap_names, key=len)) * 8
cmap_sel.view().setMinimumWidth(min_width)
self.layout.addWidget(cmap_sel)
self.cmap_sel = cmap_sel
def init_plot_2D_type_sel(self):
plot_2D_type_sel = QtWidgets.QComboBox()
plot_2D_type_sel.addItems(self.plot_2D_types)
policy_horiz = QSizePolicy.MinimumExpanding
policy_vert = QSizePolicy.Maximum
plot_2D_type_sel.setSizePolicy(policy_horiz, policy_vert)
plot_2D_type_sel.setMinimumWidth(40)
min_width = len(max(self.plot_2D_types, key=len)) * 8
plot_2D_type_sel.view().setMinimumWidth(min_width)
self.layout.addWidget(plot_2D_type_sel)
self.plot_2D_type_sel = plot_2D_type_sel
def init_lim_boxes(self):
self.lim_boxes = [None] * self.num_lim_boxes
dim_names = ['x', 'y', 'z']
for i in range(self.num_lim_boxes):
lim_box = QtWidgets.QLineEdit()
tooltip = ('Limit for {}. Use <number>:<number> where both numbers '
'can be empty').format(dim_names[i])
lim_box.setToolTip(tooltip)
self.layout.addWidget(lim_box)
self.lim_boxes[i] = lim_box
def init_aspect_box(self):
aspect_box = QtWidgets.QLineEdit()
aspect_box.setToolTip('Aspect ratio, use <number> or <number:number>')
self.layout.addWidget(aspect_box)
self.aspect_box = aspect_box
def get_sel_cols(self):
sel_texts = [box.currentText() for box in self.col_boxes]
return sel_texts
def get_sel_2D_type(self):
sel_str = self.plot_2D_type_sel.currentText()
return sel_str
def get_lims(self):
lims = [None] * self.num_lim_boxes
for i, lim_box in enumerate(self.lim_boxes):
lims[i] = self.parse_lims(lim_box.text())
return lims
def get_aspect(self):
text = self.aspect_box.text()
return self.parse_aspect(text)
def select_lowest_unoccupied(self, box):
"""
Sets the text on box to the text with the lowest index in
box.list_of_text_items which is not already selected in another box in
self.col_boxes.
"""
sel_texts = self.get_sel_cols()
for i, text in enumerate(box.list_of_text_items):
if text not in sel_texts:
box.setCurrentIndex(i)
return
def set_text_on_box(self, box_idx, text):
"""
Potential infinite loop if sel_col_func calls this function.
"""
box = self.col_boxes[box_idx]
idx = box.findText(text)
box.setCurrentIndex(idx)
def parse_lims(self, text):
lims = text.split(':')
if len(lims) != 2:
return (None, None)
lower_lim = self.conv_to_float_or_None(lims[0])
upper_lim = self.conv_to_float_or_None(lims[1])
return (lower_lim, upper_lim)
def parse_aspect(self, text):
try: return float(text)
except ValueError: pass
parts = text.split(':')
try:
num = float(parts[0])
den = float(parts[1])
except (ValueError, IndexError):
return 'auto'
return num / den
@staticmethod
def conv_to_float_or_None(str):
try:
return float(str)
except ValueError:
return None
| mit | -5,390,035,938,519,185,000 | 34.251462 | 80 | 0.584439 | false |
AlexProfi/django-cms | cms/cms_menus.py | 1 | 16191 | # -*- coding: utf-8 -*-
from django.utils.translation import get_language
from cms import constants
from cms.apphook_pool import apphook_pool
from cms.utils.permissions import load_view_restrictions, has_global_page_permission
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import current_site
from cms.utils.i18n import get_fallback_languages, hide_untranslated
from cms.utils.page_resolver import get_page_queryset
from cms.utils.moderator import get_title_queryset, use_draft
from menus.base import Menu, NavigationNode, Modifier
from menus.menu_pool import menu_pool
def get_visible_page_objects(request, pages, site=None):
"""
This code is basically a many-pages-at-once version of
Page.has_view_permission.
pages contains all published pages
check if there is ANY restriction
that needs a permission page visibility calculation
"""
public_for = get_cms_setting('PUBLIC_FOR')
can_see_unrestricted = public_for == 'all' or (
public_for == 'staff' and request.user.is_staff)
is_auth_user = request.user.is_authenticated()
restricted_pages = load_view_restrictions(request, pages)
if not restricted_pages:
if can_see_unrestricted:
return pages
elif not is_auth_user:
return [] # Unauth user can't acquire global or user perm to see pages
if get_cms_setting('PERMISSION') and not site:
site = current_site(request) # avoid one extra query when possible
if has_global_page_permission(request, site, can_view=True):
return pages
def has_global_perm():
if has_global_perm.cache < 0:
if request.user.has_perm('cms.view_page'):
has_global_perm.cache = 1
else:
has_global_perm.cache = 0
return bool(has_global_perm.cache)
has_global_perm.cache = -1
def has_permission_membership(page_id):
"""
PagePermission user group membership tests
"""
user_pk = request.user.pk
for perm in restricted_pages[page_id]:
if perm.user_id == user_pk:
return True
if not perm.group_id:
continue
if has_permission_membership.user_groups is None:
has_permission_membership.user_groups = request.user.groups.all().values_list(
'pk', flat=True)
if perm.group_id in has_permission_membership.user_groups:
return True
return False
has_permission_membership.user_groups = None
visible_pages = []
for page in pages:
to_add = False
page_id = page.pk
is_restricted = page_id in restricted_pages
# restricted_pages contains as key any page.pk that is
# affected by a permission grant_on
if not is_restricted and can_see_unrestricted:
to_add = True
elif is_auth_user:
# setting based handling of unrestricted pages
# check group and user memberships to restricted pages
if is_restricted and has_permission_membership(page_id):
to_add = True
elif has_global_perm():
to_add = True
if to_add:
visible_pages.append(page)
return visible_pages
def get_visible_pages(request, pages, site=None):
"""Returns the IDs of all visible pages"""
pages = get_visible_page_objects(request, pages, site)
return [page.pk for page in pages]
def page_to_node(page, home, cut):
"""
Transform a CMS page into a navigation node.
:param page: the page you wish to transform
:param home: a reference to the "home" page (the page with path="0001")
:param cut: Should we cut page from its parent pages? This means the node will not
have a parent anymore.
"""
# Theses are simple to port over, since they are not calculated.
# Other attributes will be added conditionnally later.
attr = {
'soft_root': page.soft_root,
'auth_required': page.login_required,
'reverse_id': page.reverse_id,
}
parent_id = page.parent_id
# Should we cut the Node from its parents?
if home and page.parent_id == home.pk and cut:
parent_id = None
# possible fix for a possible problem
# if parent_id and not page.parent.get_calculated_status():
# parent_id = None # ????
if page.limit_visibility_in_menu is constants.VISIBILITY_ALL:
attr['visible_for_authenticated'] = True
attr['visible_for_anonymous'] = True
else:
attr['visible_for_authenticated'] = page.limit_visibility_in_menu == constants.VISIBILITY_USERS
attr['visible_for_anonymous'] = page.limit_visibility_in_menu == constants.VISIBILITY_ANONYMOUS
attr['is_home'] = page.is_home
# Extenders can be either navigation extenders or from apphooks.
extenders = []
if page.navigation_extenders:
if page.navigation_extenders in menu_pool.menus:
extenders.append(page.navigation_extenders)
elif "{0}:{1}".format(page.navigation_extenders, page.pk) in menu_pool.menus:
extenders.append("{0}:{1}".format(page.navigation_extenders, page.pk))
# Is this page an apphook? If so, we need to handle the apphooks's nodes
lang = get_language()
# Only run this if we have a translation in the requested language for this
# object. The title cache should have been prepopulated in CMSMenu.get_nodes
# but otherwise, just request the title normally
if not hasattr(page, 'title_cache') or lang in page.title_cache:
app_name = page.get_application_urls(fallback=False)
if app_name: # it means it is an apphook
app = apphook_pool.get_apphook(app_name)
extenders += app.menus
exts = []
for ext in extenders:
if hasattr(ext, "get_instances"):
# CMSAttachMenus are treated a bit differently to allow them to be
# able to be attached to multiple points in the navigation.
exts.append("{0}:{1}".format(ext.__name__, page.pk))
elif hasattr(ext, '__name__'):
exts.append(ext.__name__)
else:
exts.append(ext)
if exts:
attr['navigation_extenders'] = exts
# Do we have a redirectURL?
attr['redirect_url'] = page.get_redirect() # save redirect URL if any
attr['slug'] = page.get_slug() #save page slug
# Now finally, build the NavigationNode object and return it.
ret_node = NavigationNode(
page.get_menu_title(),
page.get_absolute_url(),
page.pk,
parent_id,
attr=attr,
visible=page.in_navigation,
)
return ret_node
class CMSMenu(Menu):
def get_nodes(self, request):
page_queryset = get_page_queryset(request)
site = current_site(request)
lang = get_language_from_request(request)
filters = {
'site': site,
}
if hide_untranslated(lang, site.pk):
filters['title_set__language'] = lang
if not use_draft(request):
filters['title_set__published'] = True
if not use_draft(request):
page_queryset = page_queryset.published()
pages = page_queryset.filter(**filters).order_by("path")
ids = {}
nodes = []
first = True
home_cut = False
home_children = []
home = None
actual_pages = []
# cache view perms
visible_pages = get_visible_pages(request, pages, site)
for page in pages:
# Pages are ordered by path, therefore the first page is the root
# of the page tree (a.k.a "home")
if page.pk not in visible_pages:
# Don't include pages the user doesn't have access to
continue
if not home:
home = page
if first and page.pk != home.pk:
home_cut = True
if (home_cut and (page.parent_id == home.pk or
page.parent_id in home_children)):
home_children.append(page.pk)
if ((page.pk == home.pk and home.in_navigation)
or page.pk != home.pk):
first = False
ids[page.id] = page
actual_pages.append(page)
page.title_cache = {}
langs = [lang]
if not hide_untranslated(lang):
langs.extend(get_fallback_languages(lang))
titles = list(get_title_queryset(request).filter(
page__in=ids, language__in=langs))
for title in titles: # add the title and slugs and some meta data
page = ids[title.page_id]
page.title_cache[title.language] = title
for page in actual_pages:
if page.title_cache:
nodes.append(page_to_node(page, home, home_cut))
return nodes
menu_pool.register_menu(CMSMenu)
class NavExtender(Modifier):
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if post_cut:
return nodes
# rearrange the parent relations
# Find home
home = next((n for n in nodes if n.attr.get("is_home", False)), None)
# Find nodes with NavExtenders
exts = []
for node in nodes:
extenders = node.attr.get("navigation_extenders", None)
if extenders:
for ext in extenders:
if ext not in exts:
exts.append(ext)
# Link the nodes
for extnode in nodes:
if extnode.namespace == ext and not extnode.parent_id:
# if home has nav extenders but home is not visible
if node == home and not node.visible:
# extnode.parent_id = None
extnode.parent_namespace = None
extnode.parent = None
else:
extnode.parent_id = node.id
extnode.parent_namespace = node.namespace
extnode.parent = node
node.children.append(extnode)
removed = []
# find all not assigned nodes
for menu in menu_pool.menus.items():
if (hasattr(menu[1], 'cms_enabled')
and menu[1].cms_enabled and not menu[0] in exts):
for node in nodes:
if node.namespace == menu[0]:
removed.append(node)
if breadcrumb:
# if breadcrumb and home not in navigation add node
if breadcrumb and home and not home.visible:
home.visible = True
if request.path_info == home.get_absolute_url():
home.selected = True
else:
home.selected = False
# remove all nodes that are nav_extenders and not assigned
for node in removed:
nodes.remove(node)
return nodes
menu_pool.register_modifier(NavExtender)
class SoftRootCutter(Modifier):
"""
Ask evildmp/superdmp if you don't understand softroots!
Softroot description from the docs:
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
For example, you’re on the page -Introduction to Bleeding-?, so the menu
might look like this:
School of Medicine
Medical Education
Departments
Department of Lorem Ipsum
Department of Donec Imperdiet
Department of Cras Eros
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <this is the current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
Department of Curabitur a Purus
Department of Sed Accumsan
Department of Etiam
Research
Administration
Contact us
Impressum
which is frankly overwhelming.
By making -Department of Mediaeval Surgery-? a soft root, the menu
becomes much more manageable:
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
# only apply this modifier if we're pre-cut (since what we do is cut)
# or if no id argument is provided, indicating {% show_menu_below_id %}
if post_cut or root_id:
return nodes
selected = None
root_nodes = []
# find the selected node as well as all the root nodes
for node in nodes:
if node.selected:
selected = node
if not node.parent:
root_nodes.append(node)
# if we found a selected ...
if selected:
# and the selected is a softroot
if selected.attr.get("soft_root", False):
# get it's descendants
nodes = selected.get_descendants()
# remove the link to parent
selected.parent = None
# make the selected page the root in the menu
nodes = [selected] + nodes
else:
# if it's not a soft root, walk ancestors (upwards!)
nodes = self.find_ancestors_and_remove_children(selected, nodes)
return nodes
def find_and_remove_children(self, node, nodes):
for child in node.children:
if child.attr.get("soft_root", False):
self.remove_children(child, nodes)
return nodes
def remove_children(self, node, nodes):
for child in node.children:
nodes.remove(child)
self.remove_children(child, nodes)
node.children = []
def find_ancestors_and_remove_children(self, node, nodes):
"""
Check ancestors of node for soft roots
"""
if node.parent:
if node.parent.attr.get("soft_root", False):
nodes = node.parent.get_descendants()
node.parent.parent = None
nodes = [node.parent] + nodes
else:
nodes = self.find_ancestors_and_remove_children(
node.parent, nodes)
else:
for newnode in nodes:
if newnode != node and not newnode.parent:
self.find_and_remove_children(newnode, nodes)
for child in node.children:
if child != node:
self.find_and_remove_children(child, nodes)
return nodes
menu_pool.register_modifier(SoftRootCutter)
| bsd-3-clause | 7,715,891,116,846,316,000 | 36.992958 | 103 | 0.571517 | false |
hadware/lexicographer | epub-parser/src/epub_to_json/epub_to_json.py | 1 | 5101 | import sys
from epub import open_epub
import simplejson as json
from bs4 import BeautifulSoup, Tag
class SimpleChapter(object):
def __init__(self, name, text):
self.name = name
self.text = text
class Parser(object):
def __init__(self, epub_path):
self.epub_file = open_epub(epub_path, 'r')
# current item used for navigation
self.current_item = None
# soup for the current item
self.item_data_soup = None
def _get_metadata_(self, metadata):
dict = {}
# get metadata
dict['titles'] = [x for x in metadata.titles[0] if x]
dict['creators'] = [x for x in metadata.creators[0] if x]
dict['subjects'] = [x for x in metadata.subjects if x]
dict['identifiers'] = [x for x in metadata.identifiers[0] if x]
dict['dates'] = [x for x in metadata.dates[0] if x]
dict['right'] = metadata.right
# return filled dict
return dict
def _get_text_chapter_(self, current_tag, next_tag=None, first_item=False):
if first_item:
chapter_text = current_tag.get_text()
else:
chapter_text = ''
for elem in current_tag.next_siblings:
# if next tag
if next_tag is not None and isinstance(elem, Tag) and elem == next_tag:
break
# else, append text
elif isinstance(elem, Tag):
text = elem.get_text()
# if end of ebook
if "Project Gutenberg" in text:
break
else:
chapter_text += text
# sanitize text
chapter_text = chapter_text.replace('\n', ' ').replace('*', '').replace('"', ' ')
chapter_text = chapter_text.strip()
return chapter_text
def _switch_item_(self, item):
# if new file or first read
if self.current_item != item or self.item_data_soup is None:
# we change the current item
self.current_item = item
# we read the file
self.item_data_soup = BeautifulSoup(self.epub_file.read_item(item), 'lxml')
def _iterate_chapter_(self, chapters, current_nav, next_nav):
# get chapter name
chapter_name = current_nav.labels[0][0]
# get chapter id & file
split_src = current_nav.src.rsplit('#', 1)
item = self.epub_file.get_item_by_href(split_src[0])
self._switch_item_(item)
# get tag by id
current_tag = self.item_data_soup.find(id=split_src[1])
# determine which tag is next
if current_nav.nav_point:
direct_next = current_nav.nav_point[0]
else:
if next_nav is not None:
direct_next = next_nav
else:
direct_next = None
if direct_next is not None:
next_split = direct_next.src.rsplit('#', 1)
# if next is on same file
if split_src[0] == next_split[0]:
next_tag = self.item_data_soup.find(id=next_split[1])
chapter_text = self._get_text_chapter_(current_tag, next_tag)
else:
# get text remaining on current page
chapter_text = self._get_text_chapter_(current_tag)
# get next item
item = self.epub_file.get_item_by_href(next_split[0])
self._switch_item_(item)
current_tag = self.item_data_soup.body.contents[0]
next_tag = self.item_data_soup.find(id=next_split[1])
chapter_text += self._get_text_chapter_(current_tag, next_tag, True)
else:
chapter_text = self._get_text_chapter_(current_tag)
# add chapter to array if not empty
if chapter_text != '' and "CONTENT" not in chapter_name.upper() and "CHAPTERS" not in chapter_name.upper():
chapters.append(SimpleChapter(chapter_name, chapter_text).__dict__)
# if nav point has subchild
if current_nav.nav_point:
it = iter(current_nav.nav_point)
current_nav = next(it)
for child in it:
self._iterate_chapter_(chapters, current_nav, child)
current_nav = child
self._iterate_chapter_(chapters, current_nav, next_nav)
def epub_to_json(self):
epub = {}
chapters = []
it = iter(self.epub_file.toc.nav_map.nav_point)
current_nav = next(it)
for next_nav in it:
self._iterate_chapter_(chapters, current_nav, next_nav)
current_nav = next_nav
self._iterate_chapter_(chapters, current_nav, None)
# assemble parts
epub['metadatas'] = self._get_metadata_(self.epub_file.opf.metadata)
epub['chapters'] = chapters
# create json object
json_obj = json.dumps(epub, separators=(',', ':'), ensure_ascii=False)
self.epub_file.close()
return json_obj
if __name__ == '__main__':
# need one argument
parser = Parser(sys.argv[1])
parser.epub_to_json()
| gpl-2.0 | 7,686,731,611,143,497,000 | 32.781457 | 115 | 0.554401 | false |
datamade/yournextmp-popit | candidates/tests/test_person_view.py | 1 | 2933 | # Smoke tests for viewing a candidate's page
from datetime import date, timedelta
import re
from django.conf import settings
from django.test.utils import override_settings
from django_webtest import WebTest
from .factories import (
AreaTypeFactory, ElectionFactory, CandidacyExtraFactory,
ParliamentaryChamberFactory, PartyFactory, PartyExtraFactory,
PersonExtraFactory, PostExtraFactory
)
election_date_before = lambda r: {'DATE_TODAY': date.today()}
election_date_after = lambda r: {'DATE_TODAY': date.today() + timedelta(days=28)}
processors = settings.TEMPLATE_CONTEXT_PROCESSORS
processors_before = processors + ("candidates.tests.test_person_view.election_date_before",)
processors_after = processors + ("candidates.tests.test_person_view.election_date_after",)
class TestPersonView(WebTest):
def setUp(self):
wmc_area_type = AreaTypeFactory.create()
election = ElectionFactory.create(
slug='2015',
name='2015 General Election',
area_types=(wmc_area_type,)
)
commons = ParliamentaryChamberFactory.create()
post_extra = PostExtraFactory.create(
elections=(election,),
base__organization=commons,
slug='65808',
base__label='Member of Parliament for Dulwich and West Norwood'
)
person_extra = PersonExtraFactory.create(
base__id='2009',
base__name='Tessa Jowell'
)
PartyFactory.reset_sequence()
party_extra = PartyExtraFactory.create()
CandidacyExtraFactory.create(
election=election,
base__person=person_extra.base,
base__post=post_extra.base,
base__on_behalf_of=party_extra.base
)
def test_get_tessa_jowell(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertTrue(
re.search(
r'''(?msx)
<h1>Tessa\s+Jowell</h1>\s*
<p>Candidate\s+for\s+
<a\s+href="/election/2015/post/65808/dulwich-and-west-norwood">Dulwich\s+
and\s+West\s+Norwood</a>\s+in\ 2015\s+General\s+Election\s*</p>''',
unicode(response)
)
)
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_before)
def test_get_tessa_jowell_before_election(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertContains(response, 'Contesting in the 2015 General Election')
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_after)
def test_get_tessa_jowell_after_election(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertContains(response, 'Contested in the 2015 General Election')
def test_get_non_existent(self):
response = self.app.get(
'/person/987654/imaginary-person',
expect_errors=True
)
self.assertEqual(response.status_code, 404)
| agpl-3.0 | 2,693,899,126,005,377,500 | 35.6625 | 92 | 0.656325 | false |
antoinecarme/pyaf | setup.py | 1 | 1126 | from setuptools import setup
from setuptools import find_packages
with open("README.md", "r") as fh:
pyaf_long_description = fh.read()
setup(name='pyaf',
version='3.0-RC1',
description='Python Automatic Forecasting',
long_description=pyaf_long_description,
long_description_content_type="text/markdown",
author='Antoine CARME',
author_email='[email protected]',
url='https://github.com/antoinecarme/pyaf',
license='BSD 3-clause',
packages=find_packages(include=['pyaf', 'pyaf.*']),
python_requires='>=3',
classifiers=['Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3'],
keywords='arx automatic-forecasting autoregressive benchmark cycle decomposition exogenous forecasting heroku hierarchical-forecasting horizon jupyter pandas python scikit-learn seasonal time-series transformation trend web-service',
install_requires=[
'scipy',
'pandas',
'sklearn',
'matplotlib',
'pydot',
'dill',
'sqlalchemy'
])
| bsd-3-clause | 8,461,214,734,102,750,000 | 37.827586 | 239 | 0.64476 | false |
vlegoff/tsunami | src/primaires/pnj/contextes/__init__.py | 1 | 1640 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant les contexes de PNJ."""
from . import controler
| bsd-3-clause | 2,335,497,463,469,652,000 | 48.69697 | 79 | 0.777439 | false |
miguelalonso/pywws | src/doc/conf.py | 1 | 9030 | # -*- coding: utf-8 -*-
#
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-15 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# pywws documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 30 08:05:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# cludge to allow documentation to be compiled without installing dependencies
class Dummy(object):
def __getattr__(self, name):
if name in ('__file__',):
return None
return Dummy
for mod_name in ('hid', 'oauth2', 'twitter', 'usb', 'usb.core', 'usb.util',
'libusb1', 'usb1', 'daemon', 'daemon.runner'):
sys.modules[mod_name] = Dummy()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.viewcode']
autosummary_generate = True
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'undoc-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
rst_epilog = """
----
Comments or questions? Please subscribe to the pywws mailing list
http://groups.google.com/group/pywws and let us know.
"""
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pywws'
copyright = u'2008-15, pywws contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version =
# The full version, including alpha/beta/rc tags.
#release =
from pywws import __version__ as release
version = release[:release.rfind('.')]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
if not on_rtd and 'LANG' in os.environ:
language = os.environ['LANG'].split('_')[0]
locale_dirs = ['../pywws/lang']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
html_logo = 'pywws_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
html_favicon = 'pywws_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pywwsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pywws.tex', u'pywws Documentation',
u'Jim Easterbrook', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pywws', u'pywws Documentation',
[u'Jim Easterbrook'], 1)
]
| gpl-2.0 | 7,874,652,060,301,545,000 | 31.956204 | 81 | 0.706645 | false |
PaddlePaddle/Paddle | python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py | 1 | 50054 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from functools import reduce
import collections
import math
import os
import warnings
import logging
import six
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.core import CommContext
import paddle.fluid.framework as framework
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools
from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, PSDispatcher
from paddle.fluid.transpiler.details.program_utils import delete_ops
OP_NAME_SCOPE = "op_namescope"
CLIP_OP_NAME_SCOPE = "gradient_clip"
STEP_COUNTER = "@PS_STEP_COUNTER@"
LEARNING_RATE_DECAY_COUNTER = "@LR_DECAY_COUNTER@"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleAttrName()
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
SPARSE_OP_LIST = ["lookup_table", "lookup_table_v2"]
SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"}
def _get_lr_ops(program):
lr_ops = []
for index, op in enumerate(program.global_block().ops):
role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME))
if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \
role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \
int(OPT_OP_ROLE_ATTR_VALUE):
lr_ops.append(op)
return lr_ops
def _has_global_step(lr_ops):
if len(lr_ops) > 0:
for idx, op in enumerate(lr_ops):
if op.type != 'increment':
continue
counter = op.input("X")[0]
if counter == LEARNING_RATE_DECAY_COUNTER:
return True
return False
def is_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr(
'is_distributed') is False:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is False:
return True
return False
def is_distributed_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is True:
return True
return False
def get_sparse_tablename(op):
return op.input("W")[0]
def get_sparse_tablenames(program, is_distributed):
tablenames = set()
if is_distributed:
for op in program.global_block().ops:
if is_distributed_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
else:
for op in program.global_block().ops:
if is_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
return list(tablenames)
class MergedVariable:
def __init__(self, merged, ordered, offsets):
self.merged_var = merged
self.ordered_vars = ordered
self.offsets = offsets
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class CompileTimeStrategy(object):
def __init__(self, main_program, startup_program, strategy, role_maker):
self.min_block_size = 81920
self.origin_main_program = main_program
self.origin_startup_program = startup_program
self.origin_ps_main_program = main_program
self.origin_ps_startup_program = startup_program
self.strategy = strategy
self.role_maker = role_maker
self.use_ps_gpu = False
try:
self.is_heter_ps_mode = role_maker._is_heter_parameter_server_mode
except:
warnings.warn(
"Using paddle.distributed.fleet instead of paddle.fluid.incubate.fleet"
)
self.is_heter_ps_mode = False
self.origin_sparse_pairs = []
self.origin_dense_pairs = []
self.merged_variables_pairs = []
self.merged_dense_pairs = []
self.merged_sparse_pairs = []
self.merged_variable_map = {}
self.param_name_to_grad_name = {}
self.grad_name_to_param_name = {}
self.param_grad_ep_mapping = collections.OrderedDict()
self.grad_param_mapping = collections.OrderedDict()
self._build_var_distributed()
self.tensor_table_dict = {}
# for heter-ps save variables
self.origin_merged_variables_pairs = list(self.merged_variables_pairs)
self.origin_merged_dense_pairs = list(self.merged_dense_pairs)
self.origin_merged_sparse_pairs = list(self.merged_sparse_pairs)
def get_distributed_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode
def is_sync_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.SYNC
def is_geo_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.GEO
def is_async_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.ASYNC
def get_role_id(self):
try:
return self.role_maker._role_id()
except Exception:
return self.role_maker.role_id()
def get_trainers(self):
try:
return self.role_maker._worker_num()
except Exception:
return self.role_maker.worker_num()
def get_ps_endpoint(self):
try:
return self.role_maker._get_pserver_endpoints()[self.get_role_id()]
except Exception:
return self.role_maker.get_pserver_endpoints()[self.get_role_id()]
def get_ps_endpoints(self):
try:
return self.role_maker._get_pserver_endpoints()
except Exception:
return self.role_maker.get_pserver_endpoints()
def get_heter_worker_endpoints(self):
try:
return self.role_maker._get_heter_worker_endpoints()
except Exception:
return self.role_maker.get_heter_worker_endpoints()
def get_heter_worker_endpoint(self):
try:
return self.role_maker._get_heter_worker_endpoint()
except Exception:
return self.role_maker.get_heter_worker_endpoint()
def get_origin_programs(self):
return self.origin_main_program, self.origin_startup_program
def get_origin_main_program(self):
return self.origin_main_program
def get_origin_startup_program(self):
return self.origin_startup_program
def set_origin_ps_main_program(self, program):
self.origin_ps_main_program = program
def set_origin_ps_startup_program(self, program):
self.origin_ps_startup_program = program
def get_origin_ps_main_program(self):
return self.origin_ps_main_program
def get_origin_ps_startup_program(self):
return self.origin_ps_startup_program
def add_tensor_table(self,
feed_var_name,
fetch_var_name="",
startup_program=None,
main_program=None,
tensor_table_class=""):
self.tensor_table_dict[feed_var_name] = {}
self.tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name
self.tensor_table_dict[feed_var_name]["fetch_var_name"] = fetch_var_name
self.tensor_table_dict[feed_var_name][
"startup_program"] = startup_program
self.tensor_table_dict[feed_var_name]["main_program"] = main_program
self.tensor_table_dict[feed_var_name][
"tensor_table_class"] = tensor_table_class
def get_tensor_table_dict(self):
return self.tensor_table_dict
def get_sparse_varname_on_ps(self, is_distributed, endpoint=None):
if not endpoint:
endpoint = self.get_ps_endpoint()
varnames = get_sparse_tablenames(self.get_origin_main_program(),
is_distributed)
ps_sparse_varnames = []
for varname in varnames:
tables = self.get_var_distributed(varname, True)
for i in range(len(tables)):
table, ep, _ = tables[i]
if ep == endpoint:
ps_sparse_varnames.append(table)
return ps_sparse_varnames
def get_optimize_varname_on_ps(self, param_name):
origin_param_name, _, _ = _get_varname_parts(param_name)
optimize_var_names = []
for op in self.get_origin_main_program().global_block().ops:
# check all optimizer op
if int(op.all_attrs()["op_role"]) == 2:
# check param name
if op.input("Param")[0] != origin_param_name:
continue
# check all input
for key in op.input_names:
if key in [
"Param", "Grad", "LearningRate", "Beta1Tensor",
"Beta2Tensor"
]:
continue
# check varibale shape related param, e.g: Moment1
optimize_var_names += self._get_optimizer_param_related_var_name(
op, op.type, key)
return optimize_var_names
def _get_optimizer_param_related_var_name(self, op, op_type, varkey):
"""
Returns the names for optimizer inputs that need to be load
"""
related_var_names = []
if op_type == "adam":
if varkey in ["Moment1", "Moment2"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "adagrad":
if varkey == "Moment":
related_var_names.append(op.input(varkey)[0])
elif op_type in ["momentum", "lars_momentum"]:
if varkey == "Velocity":
related_var_names.append(op.input(varkey)[0])
elif op_type == "rmsprop":
if varkey in ["Moment", "MeanSquare"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "ftrl":
if varkey in ["SquaredAccumulator", "LinearAccumulator"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "sgd":
pass
else:
raise ValueError(
"Not supported optimizer for distributed training: %s" %
op_type)
return related_var_names
def build_ctx(self,
vars,
mapping,
is_grad,
is_sparse,
is_send,
is_distributed=False):
def get_grad_var_ep(slices):
names = []
eps = []
sections = []
for slice in slices:
if self.is_geo_mode():
if is_send:
names.append("{}.delta".format(slice.name))
else:
names.append(slice.name)
elif is_grad and self.is_sync_mode() and self.get_trainers(
) > 1:
names.append("{}.trainer_{}".format(slice.name,
self.get_role_id()))
else:
names.append(slice.name)
sections.append(slice.shape[0])
for ep, pairs in self.param_grad_ep_mapping.items():
params, grads = pairs["params"], pairs["grads"]
for var in params + grads:
if slice.name == var.name:
eps.append(ep)
break
return names, eps, sections
if isinstance(vars, MergedVariable):
name = vars.merged_var.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [var.name for var in vars.ordered_vars]
else:
name = vars.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [vars.name]
trainer_id = self.get_role_id()
aggregate = True
ctx = CommContext(name, names, eps, sections, origin_varnames,
trainer_id, aggregate, is_sparse, is_distributed)
return ctx
def get_trainer_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
if not self.is_geo_mode():
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param = merged[0]
grad = merged[1]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
if self.is_async_mode():
name, ctx = self._step_ctx()
send_ctx[name] = ctx
else:
for pairs in self.origin_sparse_pairs:
param, grad = pairs
param_name = param.name
is_distributed = True if param_name in distibuted_varnames else False
param_ctx = self.build_ctx(param, self.param_var_mapping, False,
True, True, is_distributed)
grad_ctx = self.build_ctx(grad, self.grad_var_mapping, True,
True, True, is_distributed)
ctx = CommContext(param_ctx.var_name(),
param_ctx.split_varnames(),
param_ctx.split_endpoints(),
param_ctx.sections(),
grad_ctx.origin_varnames(),
param_ctx.trainer_id(),
param_ctx.aggregate(),
param_ctx.is_sparse(),
param_ctx.is_distributed())
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
return send_ctx
def get_communicator_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
if self.is_geo_mode():
for pairs in self.merged_dense_pairs:
param = pairs[0]
ctx = self.build_ctx(param, self.param_var_mapping, False,
False, True)
send_ctx[ctx.var_name()] = ctx
for pairs in self.merged_sparse_pairs:
param = pairs[0]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
else:
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param, grad = merged
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
return send_ctx
def get_communicator_recv_context(self,
recv_type=1,
use_origin_program=False):
# recv_type
# 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
sparse_varnames = []
for pairs in self.origin_sparse_pairs:
param, grad = pairs
sparse_varnames.append(param.name)
dense_recv_ctx = {}
sparse_recv_ctx = {}
distributed_recv_ctx = {}
variables_pairs = self.merged_variables_pairs if not use_origin_program else self.origin_merged_variables_pairs
for merged in variables_pairs:
params = merged[0]
if params.merged_var.name in sparse_varnames:
continue
ctx = self.build_ctx(params, self.param_var_mapping, False, False,
False, False)
dense_recv_ctx[ctx.var_name()] = ctx
for pairs in self.origin_sparse_pairs:
param, grad = pairs
if param.name in distibuted_varnames:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, True)
distributed_recv_ctx[ctx.var_name()] = ctx
else:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, False)
sparse_recv_ctx[ctx.var_name()] = ctx
if recv_type == 1:
return dense_recv_ctx
if recv_type == 2:
return sparse_recv_ctx
if recv_type == 3:
return distributed_recv_ctx
if recv_type == 4:
dense_recv_ctx.update(sparse_recv_ctx)
dense_recv_ctx.update(distributed_recv_ctx)
return dense_recv_ctx
assert ValueError(
"recv_type can only be 1/2/3/4, 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL"
)
def get_the_one_trainer_send_context(self, split_dense_table):
if self.is_geo_mode():
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
distibuted_varnames = get_sparse_tablenames(
self.origin_main_program, True)
for merged in self.merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel = reduce(lambda x, y: x * y, var.shape[1:])
sparse_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(send_ctx) == 0:
raise ValueError(
"GeoSGD require sparse parameters in your net.")
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
else:
return self.get_the_one_send_context(split_dense_table)
def get_dense_send_context(self,
send_ctx,
idx,
merged_dense_pairs,
trainer_id,
split_dense_table=False):
if len(merged_dense_pairs) < 1:
return idx
if not split_dense_table:
origin_varnames = []
var_numel = 0
for merged in merged_dense_pairs:
grad = merged[1]
origin_varnames.append(grad.merged_var.name)
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel += reduce(lambda x, y: x * y, var.shape)
grad_name = "Dense@Grad"
trainer_id = self.get_role_id()
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"],
[var_numel], origin_varnames, trainer_id,
aggregate, False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
else:
for merged in merged_dense_pairs:
grad = merged[1]
origin_varname = grad.merged_var.name
var = self.origin_main_program.global_block().vars[
origin_varname]
var_numel = reduce(lambda x, y: x * y, var.shape)
grad_name = origin_varname
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[origin_varname], trainer_id, aggregate,
False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
return idx
def get_the_one_send_context(self,
split_dense_table=False,
use_origin_program=False,
ep_list=None):
if ep_list is None:
ep_list = ["127.0.0.1:6071"]
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
merged_dense_pairs = self.origin_merged_dense_pairs if use_origin_program else self.merged_dense_pairs
merged_sparse_pairs = self.origin_merged_sparse_pairs if use_origin_program else self.merged_sparse_pairs
idx += self.get_dense_send_context(send_ctx, idx, merged_dense_pairs,
trainer_id, split_dense_table)
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
for merged in merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
splited_varname = []
for i in range(len(ep_list)):
splited_varname.append("{}.block{}".format(param_name, i))
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
shape = list(var.shape)
shape[0] = 0 if is_distributed else shape[0]
sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape,
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
def get_the_one_recv_context(self,
is_dense=True,
split_dense_table=False,
use_origin_program=False):
recv_id_maps = {}
if is_dense:
send_ctx = self.get_the_one_send_context(
split_dense_table=split_dense_table,
use_origin_program=use_origin_program)
for idx, (name, ctx) in enumerate(send_ctx.items()):
if ctx.is_sparse():
continue
if ctx.is_tensor_table():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
else:
send_ctx = self.get_the_one_send_context()
for idx, (name, ctx) in enumerate(send_ctx.items()):
if not ctx.is_sparse():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
return recv_id_maps
def get_server_runtime_config(self):
return self.strategy.get_server_runtime_config()
def get_var_distributed(self, varname, is_param):
var_distributed = []
offset = 0
if is_param:
params = self.param_var_mapping[varname]
param_varnames = [var.name for var in params]
for ep, pairs in self.param_grad_ep_mapping.items():
for p in pairs["params"]:
if p.name in param_varnames:
offset += p.shape[0]
var_distributed.append((p.name, ep, p.shape[0]))
else:
grads = self.grad_var_mapping[varname]
grad_varnames = [var.name for var in grads]
for ep, pairs in self.param_grad_ep_mapping.items():
for g in pairs["grads"]:
if g.name in grad_varnames:
var_distributed.append((g.name, ep, g.shape[0]))
return var_distributed
def _step_ctx(self, idx):
name = STEP_COUNTER
trainer_id = self.get_role_id()
endpoints = self.get_ps_endpoints()
sections = [1] * len(endpoints)
names = [name] * len(endpoints)
ctx = CommContext(name, names, endpoints, sections, [name], trainer_id,
True, False, False, idx, True)
return name, ctx
def _create_vars_from_blocklist(self, block_list):
"""
Create vars for each split.
NOTE: only grads need to be named for different trainers, use
add_trainer_suffix to rename the grad vars.
Args:
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map = collections.OrderedDict()
var_mapping = collections.OrderedDict()
for block_str in block_list:
varname, offset, size = block_str.split(":")
if varname not in block_map:
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, split in six.iteritems(block_map):
orig_var = self.merged_variable_map[varname]
if len(split) == 1:
var_mapping[varname] = [orig_var]
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=orig_var,
block_id=0,
offset=0,
is_slice=False,
vtype="Param")
else:
var_mapping[varname] = []
orig_shape = orig_var.shape
orig_dim1_flatten = 1
if len(orig_shape) >= 2:
orig_dim1_flatten = reduce(lambda x, y: x * y,
orig_shape[1:])
for i, block in enumerate(split):
size = block[1]
rows = size // orig_dim1_flatten
splited_shape = [rows]
if len(orig_shape) >= 2:
splited_shape.extend(orig_shape[1:])
new_var_name = "%s.block%d" % (varname, i)
slice_var = vars_metatools.VarStruct(
name=new_var_name,
shape=splited_shape,
dtype=orig_var.dtype,
type=orig_var.type,
lod_level=orig_var.lod_level,
persistable=False)
var_mapping[varname].append(slice_var)
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=slice_var,
block_id=i,
offset=-1,
is_slice=False,
vtype="Param")
return var_mapping
def _dispatcher(self):
ps_dispatcher = RoundRobin(self.get_ps_endpoints())
ps_dispatcher.reset()
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
sparse_gradnames = [grad.name for _, grad in self.origin_sparse_pairs]
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname in sparse_gradnames:
continue
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname not in sparse_gradnames:
continue
ps_dispatcher.reset()
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
def _slice_variable(self,
var_list,
slice_count,
min_block_size,
uniform=False):
"""
We may need to split dense tensor to one or more blocks and put
them equally onto parameter server. One block is a sub-tensor
aligned by dim[0] of the tensor.
We need to have a minimal block size so that the calculations in
the parameter server side can gain better performance. By default
minimum block size 8K elements (maybe 16bit or 32bit or 64bit).
Args:
var_list (list): List of variables.
slice_count (int): Numel of count that variables will be sliced, which
could be the pserver services' count.
min_block_size (int): Minimum split block size.
Returns:
blocks (list[(varname, block_id, current_block_size)]): A list
of VarBlocks. Each VarBlock specifies a shard of the var.
"""
blocks = []
for var in var_list:
if not uniform:
var_numel = reduce(lambda x, y: x * y, var.shape)
split_count = 1
if min_block_size == -1:
split_count = 1
else:
split_count = slice_count
max_pserver_count = int(
math.floor(var_numel / float(min_block_size)))
if max_pserver_count == 0:
max_pserver_count = 1
if max_pserver_count < slice_count:
split_count = max_pserver_count
block_size = int(math.ceil(var_numel / float(split_count)))
if len(var.shape) >= 2:
# align by dim1(width)
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
remains = block_size % dim1
if remains != 0:
block_size += dim1 - remains
# update split_count after aligning
split_count = int(math.ceil(var_numel / float(block_size)))
for block_id in range(split_count):
curr_block_size = min(block_size, var_numel - (
(block_id) * block_size))
block = vars_metatools.VarBlock(var.name, block_id,
curr_block_size)
blocks.append(str(block))
else:
block_size = var.shape[0] / slice_count
remainder = var.shape[0] % slice_count
if block_size == 0:
dim0s = [block_size] * remainder
else:
dim0s = [block_size] * slice_count
for i in range(remainder):
dim0s[i] = dim0s[i] + 1
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
for block_id in range(len(dim0s)):
numel = dim0s[block_id] * dim1
block = vars_metatools.VarBlock(var.name, block_id, numel)
blocks.append(str(block))
return blocks
def _get_param_grad_blocks(self, pairs, min_block_size, uniform=False):
param_list = []
grad_list = []
param_grad_set = set()
for p, g in pairs:
# todo(tangwei12) skip parameter marked not trainable
# if type(p) == Parameter and p.trainable == False:
# continue
p = p.merged_var
g = g.merged_var
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
grad_blocks = self._slice_variable(grad_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
param_blocks = self._slice_variable(param_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
return param_blocks, grad_blocks
def _var_slice_and_distribute(self):
# update these mappings for further transpile:
# 1. param_var_mapping : param var name->[split params vars]
# 2. grad_var_mapping : grad var name->[split grads vars]
# 3. grad_param_mapping : grad.blockx->param.blockx
# 4. param_grad_ep_mapping : ep->{"params" : [], "grads" : [] }
dps, dgs = self._get_param_grad_blocks(self.merged_dense_pairs,
self.min_block_size, False)
sps, sgs = self._get_param_grad_blocks(self.merged_sparse_pairs,
self.min_block_size, True)
param_blocks = dps + sps
grad_blocks = dgs + sgs
assert (len(grad_blocks) == len(param_blocks))
# origin_param_name->[splited_param_vars]
self.param_var_mapping = self._create_vars_from_blocklist(param_blocks)
self.grad_var_mapping = self._create_vars_from_blocklist(grad_blocks)
# dict(grad_splited_var->param_splited_var)
self.grad_param_mapping = collections.OrderedDict()
for g, p in zip(grad_blocks, param_blocks):
g_name, g_bid, _ = g.split(":")
p_name, p_bid, _ = p.split(":")
self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \
self.param_var_mapping[p_name][int(p_bid)]
print_maps = {}
for k, v in self.grad_param_mapping.items():
print_maps[str(k)] = str(v)
# create mapping of endpoint->split var to create pserver side program
self.param_grad_ep_mapping = collections.OrderedDict()
[
self.param_grad_ep_mapping.update({
ep: {
"params": [],
"grads": []
}
}) for ep in self.get_ps_endpoints()
]
def _build_var_distributed(self):
self.var_distributed = vars_metatools.VarsDistributed()
sparse_pairs, dense_pairs = self.get_param_grads()
origin_for_sparse = []
origin_for_dense = []
param_name_grad_name = dict()
grad_name_to_param_name = dict()
for param, grad in sparse_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_sparse.append((param, grad))
for param, grad in dense_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_dense.append((param, grad))
for dense_pair in origin_for_dense:
param, grad = dense_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_dense_pairs.append((m_param, m_grad))
for sparse_pair in origin_for_sparse:
param, grad = sparse_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_sparse_pairs.append((m_param, m_grad))
for merged in self.merged_variables_pairs:
m_param, m_grad = merged
self.merged_variable_map[
m_param.merged_var.name] = m_param.merged_var
self.merged_variable_map[m_grad.merged_var.name] = m_grad.merged_var
param_merges = []
param_merges.extend(origin_for_sparse)
param_merges.extend(origin_for_dense)
for param, grad in param_merges:
param_name_grad_name[param.name] = grad.name
grad_name_to_param_name[grad.name] = param.name
self.origin_sparse_pairs = origin_for_sparse
self.origin_dense_pairs = origin_for_dense
self.param_name_to_grad_name = param_name_grad_name
self.grad_name_to_param_name = grad_name_to_param_name
sparse_pair_map = collections.OrderedDict()
for pair in self.origin_sparse_pairs + self.origin_dense_pairs:
param, grad = pair
sparse_pair_map[param.name] = str(param)
sparse_pair_map[grad.name] = str(grad)
self._var_slice_and_distribute()
self._dispatcher()
def get_param_grads(self):
origin_program = self.origin_main_program
def _get_params_grads(sparse_varnames):
block = origin_program.global_block()
dense_param_grads = []
sparse_param_grads = []
optimize_params = set()
origin_var_dict = origin_program.global_block().vars
role_id = int(core.op_proto_and_checker_maker.OpRole.Backward)
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr("op_role", role_id)
continue
if op.attr(OP_ROLE_VAR_ATTR_NAME):
param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if param_name not in optimize_params:
optimize_params.add(param_name)
param_grad = (origin_var_dict[param_name],
origin_var_dict[grad_name])
if param_name in sparse_varnames:
sparse_param_grads.append(param_grad)
else:
dense_param_grads.append(param_grad)
return sparse_param_grads, dense_param_grads
def _get_sparse_varnames():
varnames = []
for op in origin_program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
varnames.append(param_name)
return list(set(varnames))
sparse_varnames = _get_sparse_varnames()
sparse_param_grads, dense_param_grads = _get_params_grads(
sparse_varnames)
return sparse_param_grads, dense_param_grads
def remove_var_pair_by_grad(self, var_name):
for index, pair in enumerate(self.merged_variables_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_variables_pairs[index]
for index, pair in enumerate(self.merged_dense_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_dense_pairs[index]
return
for index, pair in enumerate(self.merged_sparse_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_sparse_pairs[index]
return
print("Not find {} in self.merge_pairs".format(var_name))
def _is_opt_role_op(op):
# NOTE : depend on oprole to find out whether this op is for
# optimize
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and \
int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):
return True
return False
def _get_optimize_ops(_program):
block = _program.global_block()
opt_ops = []
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr(
"op_role",
int(core.op_proto_and_checker_maker.OpRole.Backward))
continue
opt_ops.append(op)
return opt_ops
def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps):
if hasattr(compiled_config.origin_main_program, 'lr_sheduler'):
from paddle.optimizer.lr import LRScheduler
assert isinstance(compiled_config.origin_main_program.lr_sheduler,
LRScheduler), "must be LRScheduler"
ops = _get_optimize_ops(compiled_config.origin_main_program)
lr_param_dict = _get_lr_param_dict(ops)
lr_decay_main_program, lr_decay_startup_program, lr_name = _get_lr_sheduler_program(
compiled_config.origin_main_program.lr_sheduler, lr_param_dict,
lr_decay_steps)
compiled_config.add_tensor_table(
"@LR_DECAY_COUNTER@", lr_name, lr_decay_startup_program,
lr_decay_main_program, "GlobalStepTable")
def _get_lr_param_dict(opt_ops):
lr_param_dict = {}
for op in opt_ops:
lr_name = op.input("LearningRate")[0]
param_name = op.input("Param")[0]
if lr_name not in lr_param_dict:
lr_param_dict[lr_name] = []
lr_param_dict[lr_name].append(param_name)
return lr_param_dict
def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
schedler_decay = [
'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', 'ExponentialDecay'
]
from paddle.optimizer.lr import ExponentialDecay, NoamDecay, PiecewiseDecay, NaturalExpDecay, InverseTimeDecay
from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, noam_decay, piecewise_decay, natural_exp_decay, inverse_time_decay
decay_main_program = fluid.framework.Program()
decay_startup_program = fluid.framework.Program()
lr_name = ""
if isinstance(lr_sheduler, ExponentialDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = exponential_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, NoamDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0)
lr_name = lr.name
logging.warn("NoamDecay is set, warmup steps is [ %d ]" %
lr_sheduler.warmup_steps)
elif isinstance(lr_sheduler, NaturalExpDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, InverseTimeDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma,
True)
lr_name = lr.name
logging.warn(
"InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
else:
raise ValueError(
"Not supported current LearningRate strategy, please use follow decay strategy: {}".
format(schedler_decay))
return decay_main_program, decay_startup_program, lr_name
def _get_varname_parts(varname):
# returns origin, blockid, trainerid
orig_var_name = ""
trainer_part = ""
block_part = ""
trainer_idx = varname.find(".trainer_")
if trainer_idx >= 0:
trainer_part = varname[trainer_idx + 1:]
else:
trainer_idx = len(varname)
block_index = varname.find(".block")
if block_index >= 0:
block_part = varname[block_index + 1:trainer_idx]
else:
block_index = len(varname)
orig_var_name = varname[0:min(block_index, trainer_idx)]
return orig_var_name, block_part, trainer_part
def _orig_varname(varname):
orig, _, _ = _get_varname_parts(varname)
return orig
| apache-2.0 | -7,821,824,388,392,121,000 | 38.75695 | 145 | 0.537979 | false |
animekita/selvbetjening | selvbetjening/frontend/userportal/views.py | 1 | 7865 | # coding=UTF-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.contrib.formtools.preview import FormPreview
from django.contrib import messages
from django.contrib.auth import login, authenticate
from selvbetjening.core.user.models import SUser
from selvbetjening.businesslogic.members.forms import UserRegistrationForm, ProfileEditForm, UserWebsiteFormSet
from selvbetjening.frontend.userportal.forms import ChangePasswordForm, ChangePictureForm, \
PrivacyForm, ChangeUsernameForm
from selvbetjening.frontend.userportal.processor_handlers import profile_page_processors
from selvbetjening.frontend.userportal.models import UserPrivacy
def profile_redirect(request):
if isinstance(request.user, AnonymousUser):
return HttpResponseRedirect(reverse('members_login'))
else:
return HttpResponseRedirect(reverse('members_profile'))
@login_required
def public_profile_page(request,
username,
template_name='userportal/public_profile.html',
template_no_access_name='userportal/profile_no_access.html'):
user = get_object_or_404(SUser, username=username)
privacy, created = UserPrivacy.objects.get_or_create(user=user)
own_profile = False
if privacy.public_profile:
handler = profile_page_processors.get_handler(request, user)
add_to_profile = handler.view(own_profile)
return render(request,
template_name,
{
'viewed_user': user,
'privacy': privacy,
'add_to_profile': add_to_profile
})
else:
return render(request,
template_no_access_name,
{
'username': user.username
})
@login_required
def profile(request,
template_name='userportal/profile.html'):
user = request.user
privacy = UserPrivacy.full_access()
own_profile = True
own_privacy, created = UserPrivacy.objects.get_or_create(user=user)
handler = profile_page_processors.get_handler(request, user)
add_to_profile = handler.view(own_profile)
return render(request,
template_name,
{
'viewed_user': user,
'privacy': privacy,
'own_privacy': own_privacy,
'add_to_profile': add_to_profile
})
@login_required
def edit_profile(request,
template_name='userportal/edit_profile.html',
success_page='userportal_profile',
form_class=ProfileEditForm):
user = request.user
if request.method == 'POST':
form = form_class(request.POST, instance=user)
website_form = UserWebsiteFormSet(request.POST, instance=user)
if form.is_valid() and website_form.is_valid():
form.save()
website_form.save()
messages.success(request, _(u'Personal information updated'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class(instance=user)
website_form = UserWebsiteFormSet(instance=user)
return render(request,
template_name,
{
'form': form,
'website_form': website_form
})
@login_required
def edit_privacy(request,
form_class=PrivacyForm,
template_name='userportal/edit_privacy.html',
success_page='userportal_profile'):
privacy, created = UserPrivacy.objects.get_or_create(user=request.user)
if request.method == 'POST':
form = form_class(request.POST, instance=privacy)
if form.is_valid:
form.save()
messages.success(request, _(u'Privacy settings updated'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class(instance=privacy)
return render(request,
template_name,
{
'form': form
})
@login_required
def edit_picture(request,
form_class=ChangePictureForm,
success_page='userportal_profile',
template_name='userportal/edit_picture.html'):
profile = request.user
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
profile.picture = form.cleaned_data['picture']
profile.save()
messages.success(request, _(u'Profile picture changed'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class()
return render(request,
template_name,
{
'form': form,
'user': profile
})
@login_required
def edit_password(request,
template_name='userportal/edit_password.html',
post_change_redirect='userportal_profile',
change_password_form=ChangePasswordForm):
if request.method == 'POST':
form = change_password_form(request.user, request.POST)
if form.is_valid():
form.save()
messages.success(request, _(u'Password changed'))
return HttpResponseRedirect(reverse(post_change_redirect))
else:
form = change_password_form(request.user)
return render(request,
template_name,
{
'form': form,
})
class UsernameChangeView(FormPreview):
preview_template = 'userportal/edit_username_confirmed.html'
form_template = 'userportal/edit_username.html'
def __call__(self, request, *args, **kwargs):
return super(UsernameChangeView, self).__call__(request, *args, **kwargs)
def process_preview(self, request, form, context):
context['new_username'] = form.cleaned_data['new_username']
def done(self, request, cleaned_data):
request.user.username = cleaned_data['new_username']
request.user.save()
messages.success(request, _(u'Username changed'))
return HttpResponseRedirect(reverse('userportal_profile'))
edit_username = login_required(UsernameChangeView(ChangeUsernameForm))
def register(request,
success_page,
form_class=UserRegistrationForm,
login_on_success=False,
template_name='userportal/registration.html'):
""" Allows a new user to register an account.
success_page -- a reversable view name or a function returning
an url. The function takes a request and a user
object as input.
"""
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
user = form.save()
if login_on_success:
user = authenticate(username=user.username, password=request.POST['password'])
login(request, user)
if callable(success_page):
return HttpResponseRedirect(success_page(request, user))
else:
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class()
return render(request,
template_name,
{
'form': form
}) | mit | 2,199,686,237,095,671,600 | 30.464 | 111 | 0.595168 | false |
choderalab/ensembler | ensembler/pdb.py | 1 | 4246 | import sys
if sys.version_info > (3, 0):
from urllib.request import urlopen
from urllib.error import URLError
from io import StringIO
else:
from urllib2 import urlopen, URLError
from StringIO import StringIO
import gzip
import re
import six
def extract_residues_by_resnum(output_file, pdb_input_file, template):
"""
Parameters
----------
output_file: string or gzip.file_like
pdb_input_file: string or gzip.file_like
"""
if isinstance(pdb_input_file, six.string_types):
with gzip.open(pdb_input_file, 'r') as pdb_file:
pdbtext = pdb_file.readlines()
else:
pdbtext = pdb_input_file.readlines()
# list of resnum strings e.g. ['9', '29', '30B'] must be converted as follows to match the PDB format:
# [' 9 ', ' 29 ', ' 30B']
desired_resnums = ['%4s ' % r if re.match('[0-9]', r[-1]) else '%5s' % r for r in template.resolved_pdbresnums]
if isinstance(output_file, six.string_types):
ofile = open(output_file, 'w')
else:
ofile = output_file
try:
resnums_extracted = {}
model_index = 0
for bytesline in pdbtext:
line = bytesline.decode('UTF-8')
# For PDBs containing multiple MODELs (e.g. NMR structures), extract data only from the first model, ignore others.
if line[0:6] == 'MODEL ':
model_index += 1
if model_index == 2:
break
if line[0:6] in ['ATOM ', 'HETATM']:
resnum = line[22:27]
chainid = line[21]
if chainid == template.chainid:
if resnum in desired_resnums:
ofile.write(line)
resnums_extracted[resnum] = 1
except Exception as e:
print('Exception detected while extracting ATOM/HETATM records:')
print(e)
finally:
if isinstance(output_file, six.string_types):
ofile.close()
if len(resnums_extracted) != len(desired_resnums):
raise Exception(
'Number of residues (%d) extracted from PDB (%s) for template (%s) does not match desired number of residues (%d).' % (
len(resnums_extracted), template.pdbid, template.templateid, len(desired_resnums)
)
)
def retrieve_sifts(pdb_id):
"""Retrieves a SIFTS .xml file, given a PDB ID. Works by modifying the PDBe download URL.
Also removes annoying namespace stuff.
"""
sifts_download_base_url='ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
url = sifts_download_base_url + pdb_id.lower() + '.xml.gz'
try:
response = urlopen(url)
except URLError:
print('ERROR downloading SIFTS file with PDB ID: %s' % pdb_id)
raise
sifts_page = response.read(100000000) # Max 100MB
# Decompress string
sifts_page = gzip.GzipFile(fileobj=StringIO(sifts_page)).read()
# Removing all attribs from the entry tag, and the rdf tag and contents
sifts_page_processed = ''
skip_rdf_tag_flag = False
for line in sifts_page.splitlines():
if line[0:6] == '<entry':
sifts_page_processed += '<entry>' + '\n'
elif line[0:7] == ' <rdf:':
skip_rdf_tag_flag = True
pass
elif line[0:8] == ' </rdf:':
skip_rdf_tag_flag = False
pass
else:
if skip_rdf_tag_flag:
continue
sifts_page_processed += line + '\n'
return sifts_page_processed
def retrieve_pdb(pdb_id,compressed='no'):
"""Retrieves a PDB file, given a PDB ID. Works by modifying the PDB download URL.
"""
pdb_download_base_url='http://www.rcsb.org/pdb/files/'
url = pdb_download_base_url + pdb_id + '.pdb'
if compressed == 'yes':
url += '.gz'
response = urlopen(url)
pdb_file = response.read(10000000) # Max 10MB
return pdb_file
def extract_uniprot_acs_from_sifts_xml(siftsxml):
uniprot_crossrefs = siftsxml.findall('entity/segment/listResidue/residue/crossRefDb[@dbSource="UniProt"]')
uniprot_acs = list(set([uniprot_crossref.get('dbAccessionId') for uniprot_crossref in uniprot_crossrefs]))
return uniprot_acs
| gpl-2.0 | 5,171,825,560,777,637,000 | 35.921739 | 135 | 0.5935 | false |
sdbondi/Arduino-Talk | Comet/python/ArduinoServer.py | 1 | 6552 | #!/usr/bin/python
import human_curl as requests
import serial
import platform
import sys
import getopt
import socket
import json
import time
_WINDOWS = (platform.system() == 'Windows')
_AJAXURL = 'http://localhost/arduino/comet-router.php?action=%(action)s'
#_AJAXURL = 'http://themousepotatowebsite.co.za/experiments/arduino/comet-router.php?action=%(action)s'
#_AUTH = ('stanb', 'arduino1')
_AUTH=None
_CHAROFFSET = 32
_CMDMAP = {
'ping' : chr(_CHAROFFSET + 0),
'pinMode' : chr(_CHAROFFSET + 1),
'digitalWrite': chr(_CHAROFFSET + 2),
'digitalRead' : chr(_CHAROFFSET + 3),
'analogWrite' : chr(_CHAROFFSET + 4),
'analogRead' : chr(_CHAROFFSET + 5),
'beep' : chr(_CHAROFFSET + 11)
}
class ArduinoCommandServer(object):
def __init__(self, sc, opts):
if not sc:
raise ValueError('Serial connection required')
self.serial = sc
self.options = opts or {}
def getIncomingCommands(self):
global _AJAXURL, _AUTH
opts = self.options
url = _AJAXURL % { 'action': 'get_web_data'}
while True:
while True:
try:
resp = requests.get(url, timeout=70, auth=_AUTH)
break;
except requests.exceptions.CurlError as ex:
print 'ERROR ', ex.message, ' Retrying...'
#except requests.exceptions.Timeout:
# print 'Get request timed out. Retrying...'
if resp.status_code != 200 or resp.content == False:
print 'ERROR: status_code %d or no content' % resp.status_code
continue
obj = json.loads(resp.content);
if obj == False:
print 'ERROR: content parse error'
print resp.content
continue
if obj['state'] != 'OK':
print 'ERROR: ', obj['message']
continue;
if obj['result'] == 'TMOUT':
continue
return obj['result']
def toArduinoCommand(self, command):
global _CMDMAP, _CHAROFFSET
if not command['command'] in _CMDMAP:
print 'Unrecognised command: ', command['command']
return False
op_chr = _CMDMAP[command['command']]
if 'pin' in command:
pin = str(command['pin'])
if pin[0] == 'A':
pin = 14 + int(pin[1])
pin = int(pin)
result = op_chr+chr(pin + _CHAROFFSET)
if 'mode' in command:
result += 'i' if command['mode'] == 'input' else 'o'
if 'args' in command and isinstance(command['args'], list):
command['args'] = [str(c) for c in command['args']]
result += '-'.join(command['args'])
return result+'\n'
def toWeb(self, ar_cmd):
op_chr = ar_cmd[0]
if op_chr == 'A':
return 'ACK'
if op_chr == 'R':
return int(ar_cmd[1:])
if op_chr == 'F':
return { 'error': ar_cmd[1:] }
return False
def processCommands(self, commands):
results = []
for command in commands:
cmd_str = self.toArduinoCommand(command)
if not cmd_str:
results.append(False)
continue
ar_reply = ''
i = 0
while len(ar_reply) == 0:
if i % 10 == 0:
self.serial.write(cmd_str)
time.sleep(0.1)
ar_reply = self.serial.readline()
i += 1
functionStr = command['command']+'('
if 'pin' in command:
functionStr += str(command['pin'])
if 'args' in command and isinstance(command['args'], list):
if 'pin' in command:
functionStr += ', '
functionStr += ', '.join(command['args'])
print functionStr + ') -> ' + ar_reply.strip()
results.append(self.toWeb(ar_reply))
return results
def sendResponse(self, batch_id, results):
global _AJAXURL, _AUTH
opts = self.options
url = _AJAXURL % { 'action': 'put_ar_data'}
data = { 'object' : json.dumps({ 'id': batch_id, 'object': results })}
while True:
try:
resp = requests.post(url, data, timeout=10, auth=_AUTH)
break;
except requests.exceptions.CurlError as ex:
print 'ERROR ', ex.message, ' Retrying...'
#except requests.exceptions.Timeout:
# print 'Send request timed out. Retrying...'
if resp.status_code != 200 or resp.content == False:
print 'ERROR: status_code %d or no content' % resp.status_code
return False
obj = json.loads(resp.content);
if obj == False:
print 'ERROR: content parse error'
print resp.content
return False
if obj['state'] != 'OK':
print 'ERROR: ', obj['message']
return False
if obj['result'] == 'TMOUT':
return False
if obj['result'] == 'PASS':
return True
print 'Got unknown result: ', obj
return False
def start(self):
opts = self.options
while True:
print 'Waiting for incoming commands...'
results = self.getIncomingCommands()
print '================================'
print 'Got command(s).'
for _object in results:
batch_id = _object['id']
commands = _object['object']
print 'Batch ID: %d. Processing...' % batch_id
results = self.processCommands(commands)
print 'Sending reply...'
self.sendResponse(batch_id, results)
print 'Done'
print '================================\n\n'
def get_opts(args):
global _WINDOWS
try:
opts, args = getopt.getopt(args, '', ['baud=', 'serialPort='])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
optsmap = {
'baud': 9600,
'serialPort': not _WINDOWS and '/dev/ttyACM0'
}
for o, a in opts:
if o == "--baud":
optsmap['baud'] = int(a)
elif o == "--serialPort":
optsmap['serialPort'] = a
else:
assert False, "unhandled option"
if optsmap['serialPort'] == False:
raise ValueError('Argument --serialPort= is mandatory')
return optsmap
def main(args):
opts = get_opts(args)
# Check for arduino serial port
try:
sc = serial.Serial(opts['serialPort'], opts['baud'], timeout=0)
except serial.SerialException, err:
print str(err)
print 'Please ensure your Arduino is connected and the port is correct.'
sys.exit(2)
if not sc.isOpen():
print 'Unable to open serial connection to Arduino.'
sys.exit(1)
print 'Connected to serial on', opts['serialPort']
try:
# Start relay server
while 1:
server = ArduinoCommandServer(sc, opts)
server.start()
finally:
if sc and sc.isOpen():
sc.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 1,833,642,196,712,729,600 | 24.297297 | 103 | 0.573107 | false |
thorwhalen/ut | ml/sk/transformers.py | 1 | 4610 |
__author__ = 'thor'
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.neighbors import KNeighborsRegressor
from pandas import DataFrame
import numpy as np
from nltk import word_tokenize
from functools import reduce
class HourOfDayTransformer(TransformerMixin):
def __init__(self, date_field='datetime'):
self.date_field = date_field
def transform(self, X, **transform_params):
hours = DataFrame(X[self.date_field].apply(lambda x: x.hour))
return hours
def fit(self, X, y=None, **fit_params):
return self
class ModelTransformer(TransformerMixin):
"""
Sometimes transformers do need to be fitted.
ModelTransformer is used to wrap a scikit-learn model and make it behave like a transformer.
This is useful when you want to use something like a KMeans clustering model to generate features for another model.
It needs to be fitted in order to train the model it wraps.
"""
def __init__(self, model):
self.model = model
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
return self
def transform(self, X, **transform_params):
return DataFrame(self.model.predict(X))
class KVExtractor(TransformerMixin):
"""
Transform multiple key/value columns in a scikit-learn pipeline.
>>> import pandas as pd
>>> D = pd.DataFrame([ ['a', 1, 'b', 2], ['b', 2, 'c', 3]], columns = ['k1', 'v1', 'k2', 'v2'])
>>> kvpairs = [ ['k1', 'v1'], ['k2', 'v2'] ]
>>> KVExtractor( kvpairs ).transform(D)
[{'a': 1, 'b': 2}, {'c': 3, 'b': 2}]
"""
def __init__(self, kvpairs):
self.kpairs = kvpairs
def transform(self, X, *_):
result = []
for index, rowdata in X.iterrows():
rowdict = {}
for kvp in self.kpairs:
rowdict.update({rowdata[kvp[0]]: rowdata[kvp[1]]})
result.append(rowdict)
return result
def fit(self, *_):
return self
class ColumnSelectTransformer(BaseEstimator, TransformerMixin):
def __init__(self, keys):
self.keys = keys
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.keys]
class CategoryTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
D = []
for record in X.values:
D.append({k: 1 for k in record[0]})
return D
class AttributeTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def _flatten(self, d, parent_key='', sep='_'):
""" Flatten dictonary
"""
import collections
items = []
for k, v in list(d.items()):
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(list(self._flatten(v, new_key, sep=sep).items()))
else:
new_v = 1 if v == True else 0
items.append((new_key, new_v))
return dict(items)
def fit(self, X, y=None):
return self
def transform(self, X):
D = []
for record in X.values:
D.append(self._flatten(record[0]))
return D
class KNNImputer(TransformerMixin):
"""
Fill missing values using KNN Regressor
"""
def __init__(self, k):
self.k = k
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""
:param X: multidimensional numpy array like.
"""
rows, features = X.shape
mask = list([reduce(lambda h, t: h or t, x) for x in np.isnan(X)])
criteria_for_bad = np.where(mask)[0]
criteria_for_good = np.where(mask == np.zeros(len(mask)))[0]
X_bad = X[criteria_for_bad]
X_good = X[criteria_for_good]
knn = KNeighborsRegressor(n_neighbors=self.k)
for idx, x_bad in zip(criteria_for_bad.tolist(), X_bad):
missing = np.isnan(x_bad)
bad_dim = np.where(missing)[0]
good_dim = np.where(missing == False)[0]
for d in bad_dim:
x = X_good[:, good_dim]
y = X_good[:, d]
knn.fit(x, y)
X[idx, d] = knn.predict(x_bad[good_dim])
return X
class NLTKBOW(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return [{word: True for word in word_tokenize(document)}
for document in X] | mit | 1,656,264,518,869,154,300 | 25.964912 | 120 | 0.569848 | false |
LucasFerreiraDaSilva/ScrapingINMET | geraBase.py | 1 | 3704 | """
Autor: Lucas Ferreira da Silva
Email: [email protected]
Descricao: Script para download dos dados referentes a cada estacao metereologica
e criacao de uma pequena "base de dados" em formato JSON referente a
todas as estacoes
Execucao (comando): python3 geraBase.py
Saida: Arquivo JSON (estacoes.json) contendo dados de todas as estacoes
metereologicas do INMET
"""
import requests
import json
import bs4
import re
# URL base para Scraping das estacoes
url_map = "http://www.inmet.gov.br/sonabra/maps/pg_mapa.php"
res = requests.get (url_map)
res.raise_for_status()
# Separacao das estacoes
list_markers = (res.text).split("//************* ESTACÃO ")
del list_markers[0]
# Inicializacao da lista de dados das estacoes para posterior tratamento
list_stations = []
# Scraping dos dados mais brutos de cada estacao
for i in list_markers:
st = (i.split("var imagem",maxsplit=1))[0].split("var ")
# Capturar id da estação
station_id = str((st[0].split(maxsplit=1))[0])
# Capturar label da estacao
station_label = re.search(r"(?<=')[^']+(?=')", str(st[-1])).group(0)
# Capturar html da estacao
station_html = str(st[2].split("html = ", maxsplit=1)[1])
# Criacao de dicionario auxiliar de dados de cada estacao
station_info = {}
station_info['id'] = station_id
station_info['label'] = station_label
station_info['html'] = station_html
list_stations.append(station_info)
# Inicializacao do dicionario de estacoes
stations = {}
# Scraping refinado dos dados de cada estacao
for x in list_stations:
soup = bs4.BeautifulSoup(x['html'], 'html.parser')
# Captura o link da tabela de dados
link = ""
for a in soup.find_all('a'):
l = a.get('href')
if (l.find("pg_dspDadosCodigo_sim.php?", 32) != -1):
link = l
break
aux = (x['html'].split("<b><b>", maxsplit=1))[1].split("<table ", maxsplit=1)
# Captura lista dos dados geograficos
localization = ((aux[1].split("</table>", maxsplit=1))[1].split("</font>", maxsplit=1)[0]).split("<br>")
# Captura demais dados da estacao
data_aux = ((aux[0].replace("<b>", "")).replace("</b>","")).split("<br>")
data = []
for d in data_aux:
if (d.find("<a ", 0, 4) == -1) and (d.find("</a>", 0, 4) == -1) and (len(d) > 0):
data.append(d)
# Criacao do objeto estacao para o JSON
station_data = {}
details = {}
details['estacao'] = data[0].split(": ")[1]
details['codigo_omm'] = data[1].split(": ")[1]
if (len(data) > 2):
details['registro'] = data[2].split(": ")[1]
details['temp_max'] = (data[3].split(": ")[1]).replace("º","")
details['temp_min'] = (data[4].split(": ")[1]).replace("º","")
details['umidade'] = data[5].split(": ")[1]
details['pressao'] = data[6].split(": ")[1]
details['precipitacao'] = data[7].split(": ")[1]
details['vento_dir'] = (data[8].split(": ")[1]).replace("º","graus")
details['vento_vel'] = data[9].split(": ")[1]
station_data['label'] = x['label']
station_data['url'] = link
station_data['latitude'] = (localization[1].split(": ")[1]).replace("º","")
station_data['longitude'] = (localization[2].split(": ")[1]).replace("º","")
station_data['altitude'] = localization[3].split(": ")[1]
station_data['abertura'] = localization[0].split(": ")[1]
station_data['detalhes'] = details
stations[str(x['id'])] = station_data
# Escrita dos dados em arquivo JSON
with open('estacoes.json', 'w') as fp:
json.dump(stations, fp, indent=4, ensure_ascii=False, sort_keys=True)
print("Database successfully generated!")
| mit | 8,083,539,564,384,112,000 | 31.707965 | 108 | 0.606061 | false |
BlackHole/enigma2-1 | lib/python/Components/Converter/TemplatedMultiContent.py | 2 | 2918 | from Components.Converter.StringList import StringList
class TemplatedMultiContent(StringList):
"""Turns a python tuple list into a multi-content list which can be used in a listbox renderer."""
def __init__(self, args):
StringList.__init__(self, args)
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_HALIGN_RIGHT, RT_VALIGN_TOP, RT_VALIGN_CENTER, RT_VALIGN_BOTTOM, RT_WRAP, BT_SCALE
from skin import parseFont
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmap, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend, MultiContentTemplateColor, MultiContentEntryProgress
l = locals()
del l["self"] # cleanup locals a bit
del l["args"]
self.active_style = None
self.template = eval(args, {}, l)
assert "fonts" in self.template
assert "itemHeight" in self.template
assert "template" in self.template or "templates" in self.template
assert "template" in self.template or "default" in self.template["templates"] # we need to have a default template
if not "template" in self.template: # default template can be ["template"] or ["templates"]["default"]
self.template["template"] = self.template["templates"]["default"][1]
self.template["itemHeight"] = self.template["template"][0]
def changed(self, what):
if not self.content:
from enigma import eListboxPythonMultiContent
self.content = eListboxPythonMultiContent()
# also setup fonts (also given by source)
index = 0
for f in self.template["fonts"]:
self.content.setFont(index, f)
index += 1
# if only template changed, don't reload list
if what[0] == self.CHANGED_SPECIFIC and what[1] == "style":
pass
elif self.source:
self.content.setList(self.source.list)
self.setTemplate()
self.downstream_elements.changed(what)
def setTemplate(self):
if self.source:
style = self.source.style
if style == self.active_style:
return
# if skin defined "templates", that means that it defines multiple styles in a dict. template should still be a default
templates = self.template.get("templates")
template = self.template.get("template")
itemheight = self.template["itemHeight"]
selectionEnabled = self.template.get("selectionEnabled", True)
scrollbarMode = self.template.get("scrollbarMode", "showOnDemand")
if templates and style and style in templates: # if we have a custom style defined in the source, and different templates in the skin, look it up
template = templates[style][1]
itemheight = templates[style][0]
if len(templates[style]) > 2:
selectionEnabled = templates[style][2]
if len(templates[style]) > 3:
scrollbarMode = templates[style][3]
self.content.setTemplate(template)
self.content.setItemHeight(itemheight)
self.selectionEnabled = selectionEnabled
self.scrollbarMode = scrollbarMode
self.active_style = style
| gpl-2.0 | 1,706,497,243,091,871,200 | 40.098592 | 207 | 0.735778 | false |
Grumblesaur/quickgen | quickgen.py | 1 | 3966 | #!/usr/local/bin/python -tt
# -*- coding: utf-8 -*-
import os, sys, random
#supply input as raw_input if running Python 3 or higher
if sys.version_info >= (3,0):
raw_input = input
def parse(structure, part, phonemes):
#grab a random phoneme from the relevant category and return it
#structure can be O, N, or C, passed as 0, 1, or 2, respectively
#initialize the segment string as empty
seg = ""
#focus in on relevant O, N, or C possibilities
pattern = part[structure]
#ensure that values fall within the bounds of list
listrange = len(pattern)
#pick an O, N, or C to construct
index = random.randrange(0, listrange)
onc = pattern[index] #obtain an onset, nucleus, or coda pattern
if "," in onc:
onc = onc.split(",") #if it is a cluster, split on commas
#this creates a list of indices to be accessed
#loop to construct O, N, or C
for i in range(0, len(onc)):
pclass = int(onc[i]) #obtain an index for a class of phoneme
phone = random.randrange(0, len(phonemes[pclass]))
#obtain an index for a specific phone
seg += phonemes[pclass][phone] #add phone to segment
return seg #return the segment to the main script
#end parse function definition
#ask for name of input file (default = "input.txt")
inn = raw_input("What is the name of your input file? (Leave blank for 'input.txt') ")
if inn == "":
inn = "input.txt"
#ask for name of output file (default = "output.txt")
out = raw_input("What is the name of your output file? (Leave blank for 'output.txt') ")
if out == "":
out = "output.txt"
seed = raw_input("Insert seed for RNG (leave blank for system time) ")
if seed == "":
seed = None
else:
seed = int(seed)
#use system time for seed
random.seed(seed)
#prepare lists
consonants = []
vowels = []
parts = []
structures = []
#prepare the output file
fout = open(out, 'w')
#extract from input file
with open(inn) as fin:
#get consonants
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
consonants.append(list)
#get vowels
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
vowels.append(list)
#get parts
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
parts.append(list)
#get structures
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
structures.append(list)
#un-nest the syllable patterns
structures = structures[0]
#ask for number of words (default = 100)
i = raw_input("How many words would you like to build? (Leave blank for 50) ")
if i == "":
i = 50
else:
i = int(i)
low = raw_input("Enter minimum number of syllables per word (Defaults to 1) ")
if low == "":
low = 1
else:
low = int(low)
high = raw_input("Enter maximum number of syllables per word (Defaults to 5) ")
if high == "":
high = 5
else:
high = int(high)
while i > 0:
#working word variable
word = ""
#create word in this loop
for j in range(0, int(random.triangular(low, high + 1, low + 1))):
#working syllable variable
syll = ""
#choose a random syllable pattern to follow
form = structures[random.randrange(0, len(structures))]
for k in range(0, len(form)):
if form[k] == "O":
#retrieve a string that is a valid onset
syll += parse(0, parts, consonants)
elif form[k] == "C":
#retrieve a string that is a valid coda
syll += parse(2, parts, consonants)
elif form[k] == "N":
#retrieve a string that is a valid nucleus
syll += parse(1, parts, vowels)
#add new syllable to the word
word += syll
#print out the word followed by a newline
fout.write(word)
fout.write('\n')
#decrement loop iterator
i -= 1
#close files
fin.close()
fout.close()
sys.stdout.write("Program finished. \n")
#end program
| gpl-2.0 | -7,959,536,635,195,563,000 | 21.793103 | 88 | 0.648512 | false |
donkirkby/live-py-plugin | setup.py | 1 | 1353 | import setuptools
with open("space_tracer.md") as f:
long_description = f.read()
about = {}
with open("plugin/PySrc/space_tracer/about.py") as f:
exec(f.read(), about)
# noinspection PyUnresolvedReferences
setuptools.setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type="text/markdown",
url=about['__url__'],
packages=setuptools.find_packages('plugin/PySrc/'),
package_dir={'': 'plugin/PySrc/'},
entry_points=dict(console_scripts=[
'space_tracer = space_tracer:main']),
classifiers=[ # from https://pypi.org/classifiers/
"Intended Audience :: Developers",
"Topic :: Software Development :: Debuggers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Environment :: Console"
],
)
| mit | 5,494,482,452,774,835,000 | 35.567568 | 55 | 0.617886 | false |
suqinhuang/virt-test | virttest/cartesian_config_unittest.py | 1 | 2937 | #!/usr/bin/python
import unittest, logging, os
import gzip
import cartesian_config
mydir = os.path.dirname(__file__)
testdatadir = os.path.join(mydir, 'unittest_data')
class CartesianConfigTest(unittest.TestCase):
def _checkDictionaries(self, parser, reference):
result = list(parser.get_dicts())
# as the dictionary list is very large, test each item individually:
self.assertEquals(len(result), len(reference))
for resdict,refdict in zip(result, reference):
# checking the dict name first should make some errors more visible
self.assertEquals(resdict.get('name'), refdict.get('name'))
self.assertEquals(resdict, refdict)
def _checkConfigDump(self, config, dump):
"""Check if the parser output matches a config file dump"""
configpath = os.path.join(testdatadir, config)
dumppath = os.path.join(testdatadir, dump)
if dumppath.endswith('.gz'):
df = gzip.GzipFile(dumppath, 'r')
else:
df = open(dumppath, 'r')
# we could have used pickle, but repr()-based dumps are easier to
# enerate, debug, and edit
dumpdata = eval(df.read())
p = cartesian_config.Parser(configpath)
self._checkDictionaries(p, dumpdata)
def _checkStringConfig(self, string, reference):
p = cartesian_config.Parser()
p.parse_string(string)
self._checkDictionaries(p, reference)
def _checkStringDump(self, string, dump):
p = cartesian_config.Parser()
p.parse_string(string)
dumpdata = None
exec "dumpdata = " + dump
self._checkDictionaries(p, dumpdata)
def testSimpleVariant(self):
self._checkStringConfig("""
c = abc
variants:
a:
x = va
b:
x = vb
""",
[dict(name='a', shortname='a', dep=[], x='va', c='abc'),
dict(name='b', shortname='b', dep=[], x='vb', c='abc')])
def testFilterMixing(self):
self._checkStringDump("""
variants:
- unknown_qemu:
- rhel64:
only unknown_qemu
variants:
- kvm:
- nokvm:
variants:
- testA:
nokvm:
no unknown_qemu
- testB:
""",
"""[
{'dep': [],
'name': 'testA.kvm.unknown_qemu',
'shortname': 'testA.kvm.unknown_qemu'},
{'dep': [],
'name': 'testB.kvm.unknown_qemu',
'shortname': 'testB.kvm.unknown_qemu'},
{'dep': [],
'name': 'testB.nokvm.unknown_qemu',
'shortname': 'testB.nokvm.unknown_qemu'},
]""")
def testHugeTest1(self):
self._checkConfigDump('testcfg.huge/test1.cfg',
'testcfg.huge/test1.cfg.repr.gz')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 2,506,656,116,492,436,000 | 29.59375 | 79 | 0.548178 | false |
irblsensitivity/irblsensitivity | scripts/features/MethodFeatures.py | 1 | 6286 | #-*- coding: utf-8 -*-
'''
Created on 2016. 11. 19
Updated on 2016. 01. 09
'''
from __future__ import print_function
import os
import re
from utils import PrettyStringBuilder
from utils import Progress
import javalang
class Resource(object):
Stopwords = None
EngDictionary = None
@staticmethod
def init():
if Resource.Stopwords is None:
Resource.Stopwords = Resource.load_base(u'stopwords')
if Resource.EngDictionary is None:
Resource.EngDictionary = Resource.load_base(u'en.dict')
@staticmethod
def load_base(_filename):
listDic = {}
f = open(_filename, 'r')
while True:
word = f.readline()
if word is None or len(word)==0: break
if len(word) <= 2: continue
word = word[:-2]
listDic[word] = 1
return listDic
class MethodWorker(object):
__name__ = u'MethodWithComments'
basepath = u'/var/experiments/BugLocalization/dist/features/'
def run(self, _group, _project, _versionName, _srcBase):
print(u'preparing resources...', end=u'')
Resource.init()
print(u'Done')
workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods')
filename = os.path.join(workingPath, u'%s.txt' % _versionName)
if os.path.exists(workingPath) is False: os.makedirs(workingPath)
if os.path.exists(filename) is True: return
methods={}
files = self.listing_files(_srcBase)
progress = Progress(u'Calculating method', 2, 10, True)
progress.set_upperbound(len(files))
progress.start()
for fname in files:
text = open(fname, 'r').read()
key = fname[len(_srcBase) + 1:]
names = []
try:
ADT = javalang.parse.parse(text)
cntConstructors, cntConstComments, cntConstInDic = self.count(ADT, javalang.tree.ConstructorDeclaration)
cntMethods, cntComments, cntMethodInDic = self.count(ADT, javalang.tree.MethodDeclaration)
methods[key] = {'methods':cntMethods+ cntConstructors,
'withComments':cntComments + cntConstComments,
'InDicMethods':cntMethodInDic + cntConstInDic}
except javalang.parser.JavaSyntaxError as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0, 'error':'SyntaxError'}
except javalang.tokenizer.LexerError as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'LexerError'}
except Exception as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'Exception'}
progress.check()
progress.done()
self.storeData(filename, methods)
pass
def listing_files(self, _path):
results = []
for root, dirs, files in os.walk(_path):
for fname in files:
if fname.endswith('.java') is False:continue
results.append(os.path.join(root, fname))
return results
def count(self, _ADT, _filter):
cntMethods = 0
cntComments = 0
names = set([])
methodDecls = _ADT.filter(_filter)
for path, node in methodDecls:
cntMethods += 1
names.add(node.name)
if node.documentation is None or len(node.documentation) == 0: continue
doc = javalang.javadoc.parse(node.documentation)
if doc.description is None or len(doc.description) == 0: continue
cntComments += 1
cntInDic = 0
for name in names:
tokens = self.splitCamel(name)
tokens = self.removingStopwords(tokens)
if self.checkingEngDic(tokens) > 0:
cntInDic += 1
return cntMethods, cntComments, cntInDic #, list(names)
def splitCamel(self, token):
corpus = []
token = re.sub(r'([A-Z]+)(in|to|for|at|with|on|off|over)([A-Z]+\w+)', r'\1 \2 \3', token) # Lower case between Upper Cases (ex. XMLtoTEXT)
token = re.sub(r'([a-z0-9])([A-Z]\w+)', r'\1 \2', token) # UpperCase after LowerCase
items = token.split(' ')
for item in items:
item = item.strip()
if item == '': continue
if re.sub(r'[A-Z]+', '', item) != '':
item = re.sub(r'([A-Z]+)([A-Z]+\w+)', r'\1 \2', item) # ALLFiles ==> ALL Files
items2 = item.split(' ')
for item2 in items2:
if item.strip() == '': continue
corpus.append(item2)
else:
corpus.append(item)
return corpus
def removingStopwords(self, _tokens):
newer = set([])
for token in _tokens:
if len(token) <= 2: continue
if token.lower() in Resource.Stopwords: continue
newer.add(token)
return list(newer)
def checkingEngDic(self, _tokens):
count = 0
for token in _tokens:
if token in Resource.EngDictionary:
count += 1
continue
if token.lower() in Resource.EngDictionary:
count += 1
continue
nword = token[0].upper() + token[1:].lower()
if nword in Resource.EngDictionary:
count += 1
return count
#####################################
# managing cache
#####################################
def storeData(self, _filename, _data):
pretty = PrettyStringBuilder(_indent_depth=1)
text = pretty.toString(_data)
f = open(_filename, 'w')
f.write(text)
f.close()
def clear(self, _group, _project):
workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods')
try:
shutil.rmtree(workingPath)
print(u'Removed : %s' % workingPath)
except Exception as e:
print(u'No Path : %s' % workingPath)
###############################################################################################################
###############################################################################################################
###############################################################################################################
import shutil
from commons import Subjects
def clear():
S = Subjects()
for group in S.groups: # ['JBoss']: #
for project in S.projects[group]:
obj = MethodWorker()
obj.clear(group, project)
def work():
S = Subjects()
for group in ['JBoss', 'Wildfly']:#S.groups: # ['JBoss']: #
for project in S.projects[group]:
for versionName in S.bugs[project].keys():
if versionName == 'all' : continue
print(u'MethodWithComments for %s / %s / %s' % (group, project, versionName))
obj = MethodWorker()
obj.run(group, project, versionName, S.getPath_source(group, project, versionName))
if __name__ == "__main__":
#clear()
work()
pass | apache-2.0 | 4,348,889,472,794,515,500 | 30.081633 | 141 | 0.600223 | false |
aplicatii-romanesti/allinclusive-kodi-pi | .kodi/addons/plugin.video.kidsplace/brightcovePlayer.py | 1 | 1587 | import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def play(const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
| apache-2.0 | -1,561,841,478,157,068,000 | 35.068182 | 156 | 0.669187 | false |
iamantony/PythonNotes | src/objects/matrix.py | 1 | 5188 | __author__ = 'Antony Cherepanov'
from exceptions import Exception
class MatrixException(Exception):
pass
class Matrix(object):
def __init__(self, t_rowNum=0, t_colNum=0, t_values=None):
if not self.__checkDimensionType(t_rowNum) or\
not self.__checkDimensionType(t_colNum):
raise MatrixException("Invalid number of matrix size")
self.__rows = max(t_rowNum, 0)
self.__cols = max(t_colNum, 0)
numOfElements = self.__rows * self.__cols
if t_values is None or \
not isinstance(t_values, list) or \
len(t_values) != numOfElements:
self.__matrix = [0 for i in range(numOfElements)]
else:
self.__matrix = t_values
def __checkDimensionType(self, t_dim):
if isinstance(t_dim, int):
return True
return False
def __str__(self):
return "Matrix of " + str(self.__rows) + " rows and " +\
str(self.__cols) + " cols: " + str(self.__matrix)
def __add__(self, other):
if not isinstance(other, Matrix) or \
(self.__rows != other.rows() and self.__cols != other.cols()):
raise MatrixException("Failed to add matrix")
sumData = list()
for i in range(self.__rows):
for j in range(self.__cols):
value = self.GetValue(i, j) + other.GetValue(i, j)
sumData.append(value)
result = Matrix(self.__rows, self.__cols, sumData)
return result
def __sub__(self, other):
if not isinstance(other, Matrix) or \
(self.__rows != other.rows() and self.__cols != other.cols()):
raise MatrixException("Failed to subtract matrix")
subData = list()
for i in range(self.__rows):
for j in range(self.__cols):
value = self.GetValue(i, j) - other.GetValue(i, j)
subData.append(value)
result = Matrix(self.__rows, self.__cols, subData)
return result
def __mul__(self, other):
if not isinstance(other, Matrix) or \
self.__cols != other.rows():
raise MatrixException("Failed to multiply matrix")
mulData = list()
# Iterate by elements of result matrix
for i in range(self.__rows):
for j in range(other.cols()):
sumValue = 0
for iter in range(self.__cols):
sumValue += self.GetValue(i, iter) * other.GetValue(iter, j)
mulData.append(sumValue)
result = Matrix(self.__rows, other.cols(), mulData)
return result
def rows(self):
return self.__rows
def cols(self):
return self.__cols
def IsSquare(self):
if self.__cols == self.__rows:
return True
return False
def __getIndex(self, t_row, t_col):
if not self.__checkDimensionType(t_row) or\
not self.__checkDimensionType(t_col):
raise MatrixException("Invalid coordinates type")
index = self.__cols * t_row + t_col
if index < 0 or len(self.__matrix) <= index:
return None
return index
def GetValue(self, t_row, t_col):
index = self.__getIndex(t_row, t_col)
if index is None:
raise MatrixException("Invalid index")
return self.__matrix[index]
def SetValue(self, t_row, t_col, t_value):
index = self.__getIndex(t_row, t_col)
if index is None:
raise MatrixException("Invalid index")
self.__matrix[index] = t_value
def GetSlice(self, t_topLeft, t_bottomRight):
# TODO: Definitely there could be a better approach
if 2 != len(t_topLeft) or 2 != len(t_bottomRight):
raise MatrixException("Invalid slice coordinates")
data = list()
startI = t_topLeft[0]
endI = t_bottomRight[0] + 1
startJ = t_topLeft[1]
endJ = t_bottomRight[1] + 1
for i in range(startI, endI):
for j in range(startJ, endJ):
value = self.GetValue(i, j)
data.append(value)
result = Matrix(endI - startI, endJ - startJ, data)
return result
def SetSlice(self, t_topLeft, t_bottomRight, t_slice):
if 2 != len(t_topLeft) or 2 != len(t_bottomRight) or \
not isinstance(t_slice, Matrix):
raise MatrixException("Invalid slice coordinates or slice matrix")
startI = t_topLeft[0]
endI = t_bottomRight[0] + 1
startJ = t_topLeft[1]
endJ = t_bottomRight[1] + 1
if (endI - startI) != t_slice.cols() or\
(endJ - startJ) != t_slice.rows():
return False
for i, slI in zip(range(startI, endI), range(t_slice.rows())):
for j, slJ in zip(range(startJ, endJ), range(t_slice.cols())):
value = t_slice.GetValue(slI, slJ)
self.SetValue(i, j, value)
return True | mit | -3,138,855,623,441,978,400 | 31.701299 | 80 | 0.528142 | false |
jhermann/kunstkopf | src/kunstkopf/__init__.py | 1 | 1258 | # -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace
"""
kunstkopf [ˈkʊnstkɔp͜f] is a set of tools that handle audio (meta-)data and control hi-fi gear.
Copyright © 2015 Jürgen Hermann <[email protected]>
Licensed under the GNU General Public License, Version 3.0
"""
# Copyright © 2015 Jürgen Hermann <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see {http://www.gnu.org/licenses/}.
__url__ = "https://github.com/jhermann/kunstkopf"
__version__ = "0.1.0"
__license__ = "GPLv3"
__author__ = "Jürgen Hermann"
__author_email__ = "[email protected]"
__keywords__ = "python audio tool tagging indexing searching syncing"
__all__ = []
| gpl-3.0 | -7,133,038,733,929,701,000 | 38.03125 | 99 | 0.68775 | false |
arteria/django-hijack-admin | hijack_admin/tests/test_settings.py | 1 | 3172 | """Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:', }
}
LOGIN_REDIRECT_URL = '/hello'
HIJACK_LOGIN_REDIRECT_URL = '/hello/'
HIJACK_LOGOUT_REDIRECT_URL = '/hello/'
ROOT_URLCONF = 'hijack_admin.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (os.path.join(APP_ROOT, 'static'), )
NOSE_ARGS = []
TEMPLATE_DIRS = (os.path.join(APP_ROOT, 'tests/test_app/templates'), )
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIDDLEWARE_CLASSES = MIDDLEWARE
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRS,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.template.context_processors.request',
],
},
},
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
'compat',
'hijack',
'hijack_admin',
'hijack_admin.tests.test_app',
]
SECRET_KEY = 'foobar'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
HIJACK_ALLOW_GET_REQUESTS = True
| mit | 1,946,250,209,781,247,500 | 28.924528 | 73 | 0.671816 | false |
Freeseer/freeseer | src/freeseer/framework/config/exceptions.py | 1 | 1601 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
class StorageNotSetError(Exception):
def __init__(self):
super(StorageNotSetError, self).__init__('no ConfigStorage was given to this Config')
class OptionError(Exception):
def __init__(self, name, option):
super(OptionError, self).__init__(name)
class InvalidOptionValueError(OptionError):
pass
class InvalidOptionDefaultValueError(OptionError):
pass
class OptionValueNotSetError(OptionError):
pass
class InvalidDecodeValueError(Exception):
def __init__(self, value):
message = 'Unable to decode value "{}"'.format(value)
super(InvalidDecodeValueError, self).__init__(message)
| gpl-3.0 | -7,931,345,560,944,593,000 | 28.648148 | 93 | 0.722673 | false |
HBNLdev/DataStore | db/sas_tools.py | 1 | 2566 | ''' tools for working with .sas7bdat files '''
import os
from collections import OrderedDict
import pandas as pd
from sas7bdat import SAS7BDAT
from .knowledge.questionnaires import map_ph4, map_ph4_ssaga
map_subject = {'core': {'file_pfixes': []}}
parent_dir = '/processed_data/zork/zork-phase4-69/session/'
n_header_lines = 30
def extract_descriptions(path):
''' given path to .sas7bdat file, returns dictionary mapping column labels
to their verbose descriptions in the SAS header.
dictionary will only contain an entry if there was new information present
(if there was a description, and it was different from the label) '''
f = SAS7BDAT(path)
kmap = OrderedDict()
for line in str(f.header).splitlines()[n_header_lines + 1:]:
line_parts = line.split(maxsplit=4)
label = line_parts[1]
try:
description = line_parts[4].rstrip()
if description == label or description[0] == '$':
continue
else:
kmap[label] = description
except IndexError:
pass
return kmap
def exemplary_files(kdict):
''' given a questionnaire knowledge map,
return a new dictionary mapping questionnaire names to the filepath
of an exemplary .sas7bdat file for each file prefix '''
exemplars = {}
for test, tdict in kdict.items():
for fpx in tdict['file_pfixes']:
fd = parent_dir + test
fn = fpx + '.sas7bdat'
fp = os.path.join(fd, fn)
if os.path.exists(fp):
exemplars[test] = fp
else:
print(fp, 'did not exist')
return exemplars
def build_labelmaps():
''' return a dict in which keys are questionnaires names and values are
dictionaries mapping column labels to descriptions '''
comb_dict = map_ph4.copy()
comb_dict.update(map_ph4_ssaga)
exemplars = exemplary_files(comb_dict)
big_kmap = {}
for test, fp in exemplars.items():
kmap = extract_descriptions(fp)
big_kmap[test] = kmap
return big_kmap
def df_fromsas(fullpath, id_lbl='ind_id'):
''' convert .sas7bdat to dataframe.
unused because fails on incorrectly formatted files. '''
# read csv in as dataframe
df = pd.read_sas(fullpath, format='sas7bdat')
# convert id to str and save as new column
df[id_lbl] = df[id_lbl].apply(int).apply(str)
df['ID'] = df[id_lbl]
return df
| gpl-3.0 | -7,685,914,298,742,378,000 | 30.481013 | 82 | 0.606002 | false |
OpenTreeOfLife/gcmdr | run_synth_studies_mono.py | 1 | 1437 | import load_synth_extract
from plants import studytreelist as plantslist
from metazoa import studytreelist as metalist
from fungi import studytreelist as fungilist
from microbes import studytreelist as microbelist
studytreelist = []
studytreelist.extend(metalist)
studytreelist.extend(fungilist)
studytreelist.extend(microbelist)
studytreelist.extend(plantslist)
if __name__ == "__main__":
from wopr_conf_TEMP import *
synthottolid="93302" # cellular organisms
# studytreelist = ["420_522"]
# studytreelist = ["2460_5285"] # Pyron Squamata study
# studytreelist = ["2573_5959"] # Sauria
# studytreelist = ["2573_5959"]
# from metazoa import studytreelist as metalist
# studytreelist = []
# studytreelist.extend(metalist)
# studytreelist = [
# "1634_3303", # Chiroptera. Agnarsson et al. 2011. PLoS Currents Tree of Life
# ]
print "loading synthottolid:",synthottolid
print "loading studytreelist:",studytreelist
for i in studytreelist:
tstudy_list = [i]
generallogfileloc = "synth_studies_submission/"+i+".log"
ttfntreefn = "synth_studies_submission/"+i+".tre"
infmonofn = "synth_studies_submission/"+i+".inf_mono"
load_synth_extract.run_load_single_ttfn_inf_mono(dott,dload,studyloc,tstudy_list,javapre,
treemloc,generallogfileloc,dsynth,synthottolid,treefn,ttfntreefn,infmonofn)
| bsd-2-clause | -8,043,959,451,811,851,000 | 33.214286 | 123 | 0.695198 | false |
10clouds/edx-platform | openedx/core/djangoapps/credentials/utils.py | 1 | 3508 | """Helper functions for working with Credentials."""
from __future__ import unicode_literals
import logging
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.programs.utils import get_programs_for_credentials
from openedx.core.lib.edx_api_utils import get_edx_api_data
log = logging.getLogger(__name__)
def get_user_credentials(user):
"""Given a user, get credentials earned from the Credentials service.
Arguments:
user (User): The user to authenticate as when requesting credentials.
Returns:
list of dict, representing credentials returned by the Credentials
service.
"""
credential_configuration = CredentialsApiConfig.current()
user_query = {'username': user.username}
# Bypass caching for staff users, who may be generating credentials and
# want to see them displayed immediately.
use_cache = credential_configuration.is_cache_enabled and not user.is_staff
cache_key = credential_configuration.CACHE_KEY + '.' + user.username if use_cache else None
credentials = get_edx_api_data(
credential_configuration, user, 'user_credentials', querystring=user_query, cache_key=cache_key
)
return credentials
def get_user_program_credentials(user):
"""Given a user, get the list of all program credentials earned and returns
list of dictionaries containing related programs data.
Arguments:
user (User): The user object for getting programs credentials.
Returns:
list, containing programs dictionaries.
"""
programs_credentials_data = []
credential_configuration = CredentialsApiConfig.current()
if not credential_configuration.is_learner_issuance_enabled:
log.debug('Display of certificates for programs is disabled.')
return programs_credentials_data
credentials = get_user_credentials(user)
if not credentials:
log.info('No credential earned by the given user.')
return programs_credentials_data
programs_credentials = []
for credential in credentials:
try:
if 'program_id' in credential['credential'] and credential['status'] == 'awarded':
programs_credentials.append(credential)
except KeyError:
log.exception('Invalid credential structure: %r', credential)
if programs_credentials:
programs_credentials_data = get_programs_for_credentials(user, programs_credentials)
return programs_credentials_data
def get_programs_credentials(user):
"""Return program credentials data required for display.
Given a user, find all programs for which certificates have been earned
and return list of dictionaries of required program data.
Arguments:
user (User): user object for getting programs credentials.
Returns:
list of dict, containing data corresponding to the programs for which
the user has been awarded a credential.
"""
programs_credentials = get_user_program_credentials(user)
credentials_data = []
for program in programs_credentials:
try:
program_data = {
'display_name': program['name'],
'subtitle': program['subtitle'],
'credential_url': program['credential_url'],
}
credentials_data.append(program_data)
except KeyError:
log.warning('Program structure is invalid: %r', program)
return credentials_data | agpl-3.0 | 8,678,225,686,083,167,000 | 35.936842 | 103 | 0.696693 | false |
naojsoft/qplan | scripts/qexec.py | 1 | 4295 | #!/usr/bin/env python
#
# qexec.py -- Subaru Telescope Queue Execution Tool
#
"""
Usage:
qexec.py --help
qexec.py [options]
"""
import sys, os
from ginga.misc.Bunch import Bunch
from qplan import main, version
defaultServiceName = 'qexecute'
plugins = [
Bunch(name='slewchart', module='SlewChart', klass='SlewChart',
ptype='global', tab='Slew Chart', ws='sub2', start=True),
Bunch(name='airmasschart', module='AirMassChart', klass='AirMassChart',
ptype='global', tab='Airmass Chart', ws='sub1', start=True),
Bunch(name='schedule', module='Schedule', klass='Schedule',
ptype='global', tab='Schedule', ws='left', start=True),
Bunch(name='execute', module='Execute', klass='Execute',
ptype='global', tab='Execute', ws='report', start=True),
Bunch(name='logger', module='Logger', klass='Logger',
ptype='global', tab='Log', ws='report', start=False),
Bunch(name='cp', module='ControlPanel', klass='ControlPanel',
ptype='global', tab='Control Panel', ws='right', start=True),
Bunch(name='night_activity', module='SumChart', klass='NightSumChart',
ptype='global', tab='Night Activity Chart', ws='sub1', start=True),
Bunch(name='night_sched', module='SumChart', klass='SchedSumChart',
ptype='global', tab='Schedules Chart', ws='sub1', start=True),
Bunch(name='proposals', module='SumChart', klass='ProposalSumChart',
ptype='global', tab='Proposals Chart', ws='sub1', start=True),
Bunch(name='semester', module='SumChart', klass='SemesterSumChart',
ptype='global', tab='Semester Chart', ws='sub1', start=True),
Bunch(name='errors', module='Errors', klass='Errors',
ptype='global', tab='Errors', ws='right', start=True),
]
# plugins = [
# # pluginName, moduleName, className, workspaceName, tabName
# ('slewchart', 'SlewChart', 'SlewChart', 'sub2', 'Slew Chart'),
# ('airmasschart', 'AirMassChart', 'AirMassChart', 'sub1', 'AirMass Chart'),
# ('schedule', 'Schedule', 'Schedule', 'left', 'Schedule'),
# ('execute', 'Execute', 'Execute', 'report', 'Execute'),
# ('logger', 'Logger', 'Logger', 'report', 'Log'),
# ('cp', 'ControlPanel', 'ControlPanel', 'right', 'Control Panel'),
# #('resolution', 'Resolution', 'Resolution', 'right', 'OB Resolution'),
# ('night_activity', 'SumChart', 'NightSumChart', 'sub1', 'Night Activity Chart'),
# ('night_sched', 'SumChart', 'SchedSumChart', 'sub1', 'Schedules Chart'),
# ('proposals', 'SumChart', 'ProposalSumChart', 'sub1', 'Proposals Chart'),
# ('semester', 'SumChart', 'SemesterSumChart', 'sub1', 'Semester Chart'),
# ]
if __name__ == "__main__":
viewer = main.QueuePlanner(layout=main.default_layout)
# use our version of plugins
viewer.add_plugins(plugins)
# Parse command line options with optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage,
version=('%%prog %s' % version.version))
viewer.add_default_options(optprs)
optprs.add_option("--svcname", dest="svcname", metavar="NAME",
default=defaultServiceName,
help="Register using NAME as service name")
## optprs.add_option("--monitor", dest="monitor", metavar="NAME",
## default='monitor',
## help="Synchronize from monitor named NAME")
## optprs.add_option("--monchannels", dest="monchannels",
## default='status', metavar="NAMES",
## help="Specify monitor channels to subscribe to")
## optprs.add_option("--monport", dest="monport", type="int",
## help="Register monitor using PORT", metavar="PORT")
(options, args) = optprs.parse_args(sys.argv[1:])
if options.display:
os.environ['DISPLAY'] = options.display
# Are we debugging this?
if options.debug:
import pdb
pdb.run('viewer.main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('viewer.main(options, args)')
else:
viewer.main(options, args)
# END
| bsd-3-clause | -8,172,853,372,845,459,000 | 40.298077 | 86 | 0.611176 | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/additions/porn/adultbay.py | 1 | 12372 | # -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2015
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
class adultbayGenreScreen(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.genreData)
def genreData(self):
self.filmliste.append(("--- Search ---", None))
self.filmliste.append(("Newest (Clips)", "http://adultbay.org/category/clips/"))
self.filmliste.append(("Newest (Movies)", "http://adultbay.org/category/movies/"))
self.filmliste.append(("Clips", None))
self.filmliste.append(("Movies", None))
self.filmliste.append(("HDTV", None))
self.filmliste.append(("DVD-R", "http://adultbay.org/category/dvd-r/"))
self.filmliste.append(("Hentai", "http://adultbay.org/category/hentai/"))
self.ml.setList(map(self._defaultlistcenter, self.filmliste))
self.keyLocked = False
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '+')
Link = self.suchString
Name = "--- Search ---"
self.session.open(adultbayListScreen, Link, Name)
def keyOK(self):
if self.keyLocked:
return
if not config.mediaportal.premiumize_use.value:
message = self.session.open(MessageBoxExt, _("The Adult Bay only works with enabled MP premiumize.me option (MP Setup)!"), MessageBoxExt.TYPE_INFO, timeout=10)
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
if Name == "--- Search ---":
self.suchen()
elif Link != None:
self.session.open(adultbayListScreen, Link, Name)
else:
self.session.open(adultbaySubGenreScreen, Name)
class adultbaySubGenreScreen(MPScreen):
def __init__(self, session, Name):
self.Name = Name
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
url = "http://adultbay.org/"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
parse = re.search('class="cat-item.*?>'+self.Name+'</a>(.*?)</ul>', data, re.S)
raw = re.findall('<li\sclass="cat-item.*?a\shref="(.*?)".*?>(.*?)</a>', parse.group(1), re.S)
if raw:
self.filmliste = []
for (Url, Title) in raw:
self.filmliste.append((decodeHtml(Title), Url))
self.filmliste.sort()
self.ml.setList(map(self._defaultlistcenter, self.filmliste))
self.keyLocked = False
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(adultbayListScreen, Link, Name)
class adultbayListScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.page = 1
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self.filmliste = []
if re.match(".*?Search", self.Name):
url = "http://adultbay.org/search/%s/page/%s/" % (self.Link, str(self.page))
else:
if self.page == 1:
url = self.Link
else:
url = self.Link + "page/" + str(self.page) + "/"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
if re.match('.*?<h2>Not Found</h2>', data, re.S):
self.filmliste.append((_('No movies found!'), None, None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
elif re.match('.*?<h2>Sorry: No Results</h2>', data, re.S):
self.filmliste.append((_('No movies found!'), None, None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
elif re.match('.*?Search is temporarily disabled', data, re.S):
self.filmliste.append(("Search is temporarily disabled...", None, None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
else:
parse = re.search('class="wp-pagenavi">(.*?)</div>', data, re.S)
if parse:
lastpage = re.findall('\d{0,1},{0,1}\d+', parse.group(1), re.S)
lastpage = [x.replace(',', '') for x in lastpage]
lastpage = [int(x) for x in lastpage]
lastpage.sort(key=int)
self.lastpage = int(lastpage[-1])
self['page'].setText("%s / %s" % (str(self.page), str(self.lastpage)))
else:
parse = re.search('class="navigation">.*?/page/(.*?)/.*?Older Entries', data, re.S)
if parse:
self.lastpage = int(parse.group(1))
else:
self.lastpage = 1
self['page'].setText("%s / %s" % (str(self.page), str(self.lastpage)))
raw = re.findall('class="post".*?<a\shref="(.*?)".*?img\ssrc="(.*?)".*?(<strong>|<p>)(.*?)(</strong>|<br\s/>|</p>).*?<p>(.*?)(Read\smore|\(more...\))', data, re.S)
if raw:
for (link, image, trash, title, trash, desc, trash) in raw:
title = stripAllTags(title).strip()
desc = stripAllTags(desc).strip()
self.filmliste.append((decodeHtml(title), link, image, desc))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
self['name'].setText(title)
desc = self['liste'].getCurrent()[0][3]
self['handlung'].setText(desc)
coverUrl = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(coverUrl)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][0]
if Link == None:
return
Title = self['liste'].getCurrent()[0][1]
Cover = self['liste'].getCurrent()[0][2]
self.session.open(StreamAuswahl, Link, Title, Cover)
class StreamAuswahl(MPScreen):
def __init__(self, session, Title, Link, Cover):
self.Link = Link
self.Title = Title
self.Cover = Cover
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("%s" %self.Title)
self.filmliste = []
self.keyLocked = True
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
CoverHelper(self['coverArt']).getCover(self.Cover)
self.keyLocked = True
url = self.Link
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadPageData).addErrback(self.dataError)
def loadPageData(self, data):
parse = re.search('class="post_header">(.*?)Recommends:</h2>', data, re.S)
streams = re.findall('(http://(?!adultbay.org)(.*?)\/.*?)[\'|"|\&|<]', parse.group(1), re.S)
if streams:
for (stream, hostername) in streams:
if isSupportedHoster(hostername, True):
hostername = hostername.replace('www.','')
self.filmliste.append((hostername, stream))
# remove duplicates
self.filmliste = list(set(self.filmliste))
if len(self.filmliste) == 0:
self.filmliste.append((_('No supported streams found!'), None))
self.ml.setList(map(self._defaultlisthoster, self.filmliste))
self.keyLocked = False
def keyOK(self):
if self.keyLocked:
return
url = self['liste'].getCurrent()[0][1]
if url == None:
return
get_stream_link(self.session).check_link(url, self.got_link)
def got_link(self, stream_url):
if stream_url == None:
message = self.session.open(MessageBoxExt, _("Stream not found, try another Stream Hoster."), MessageBoxExt.TYPE_INFO, timeout=3)
else:
title = self.Title
self.session.open(SimplePlayer, [(self.Title, stream_url, self.Cover)], showPlaylist=False, ltype='adultbay', cover=True) | gpl-2.0 | 2,939,069,052,170,796,000 | 36.374622 | 166 | 0.66637 | false |
kll334477/NewsScrapy | thepaper/thepaper/spiders/wshang_spider.py | 1 | 5240 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'yinzishao'
"""
手机版没有cookie,更方便
但是pc版的首页是所有分类混在一起的
手机版则是新闻在各个分类,所以爬取的时候需要爬各个分类。
"""
import re
import scrapy
from bs4 import BeautifulSoup
import logging
from thepaper.items import NewsItem
import json
logger = logging.getLogger("WshangSpider")
from thepaper.settings import *
from thepaper.util import judge_news_crawl
#TODO:
class NbdSpider(scrapy.spiders.Spider):
domain = "http://m.iwshang.com/"
name = "wshang"
# allowed_domains = ["i.wshang.com",]
flag = {}
start_urls = [
"http://m.iwshang.com/",
]
#pc端新闻页面url
pc_news_url = "http://i.wshang.com/Post/Default/Index/pid/%s.html"
def parse(self, response):
"""
:param response:
:return:抛出每个类别的post请求
post参数:
inslider
page
pagesize
Content-Type:application/x-www-form-urlencoded
"""
soup = BeautifulSoup(response.body)
menu = soup.find_all("a",class_="ui-more") #所有的类别的链接
if menu:
for topic in menu:
topic_name = topic.text.replace(u"查看","")
topic_url = topic.get("href")
self.flag.setdefault(topic_url,0)
page="1"
#post_data需要字符串
post_data = {
"inslider":"0",
"page":page,
"pagesize":"10"
}
# yield scrapy.Request(topic_url,
# callback=self.parse_topic,
# method="POST",
# headers={"Content-Type":"application/x-www-form-urlencoded"},
# body=json.dumps(post_data)
# )
yield scrapy.FormRequest(
url=topic_url,
formdata=post_data,
callback=self.parse_topic,
meta={"page":page,"topic_name":topic_name}
)
def parse_topic(self,response):
topic_url = response.url
# print topic_url
body = json.loads(response.body)
news_list = body["data"]
page = response.meta.get("page","1")
topic_name = response.meta.get("topic_name",None)
#http://m.iwshang.com/category/20 没有新闻
if not news_list:
self.flag[topic_url]=page
for news in news_list:
news_date_timestamp = news.get("published",None)
struct_date = datetime.datetime.fromtimestamp(int(news_date_timestamp))
news_date = struct_date.strftime("%Y-%m-%d %H:%M:%S")
title = news.get("title",None)
news_no = news.get("contentid",None)
abstract = news.get("description",None)
pic = news.get("thumb",None)
news_url = news.get("url",None) #手机端新闻页面链接
referenceid = news.get("referenceid",None) #pc端的id,手机端的id跟pc端的id不一样
pc_news_url = self.pc_news_url % referenceid #pc端新闻页面链接
item = NewsItem(
news_date=news_date,
title=title,
news_no=news_no,
abstract=abstract,
pic=pic,
news_url=pc_news_url,
topic=topic_name
)
item = judge_news_crawl(item)
if item:
# yield item
yield scrapy.Request(pc_news_url,callback=self.parse_news,meta={"item":item})
else:
self.flag[topic_url]=page
if not self.flag[topic_url]:
page = str(int(page)+1)
post_data = {
"inslider":"0",
"page":page,
"pagesize":"10"
}
yield scrapy.FormRequest(
url=topic_url,
formdata=post_data,
callback=self.parse_topic,
meta={"page":page}
)
def parse_news(self,response):
item = response.meta.get("item",NewsItem())
soup = BeautifulSoup(response.body)
#手机
# content = soup.find("div",id="content-show").get_text(strip=True) if soup.find("div",id="content-show") else None
#pc
content = soup.find("div",class_="article-cont").get_text(strip=True) if soup.find("div",class_="article-cont") else None
article_head = soup.find("div",class_="article-head")
author=None
if article_head:
author = article_head.p.text.split(u"/")[1]
article_tag_list = soup.find("div",class_="article-tag")("a") if soup.find("div",class_="article-tag") else []
tags = [tag.text for tag in article_tag_list]
item["tags"] = tags
item["author"] = author
item["content"] = content
item["crawl_date"] = NOW
yield item
| lgpl-3.0 | 526,355,286,195,562,100 | 34.7 | 129 | 0.503601 | false |
archesproject/arches | arches/management/commands/card_component.py | 1 | 3937 | """
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import uuid
from arches.management.commands import utils
from arches.app.models import models
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import IntegrityError
class Command(BaseCommand):
"""
Commands for managing Arches functions
"""
def add_arguments(self, parser):
parser.add_argument("operation", nargs="?")
parser.add_argument("-s", "--source", action="store", dest="source", default="", help="Widget json file to be loaded")
parser.add_argument("-n", "--name", action="store", dest="name", default="", help="The name of the widget to unregister")
def handle(self, *args, **options):
if options["operation"] == "register":
self.register(source=options["source"])
if options["operation"] == "unregister":
self.unregister(name=options["name"])
if options["operation"] == "list":
self.list()
if options["operation"] == "update":
self.update(source=options["source"])
def register(self, source):
"""
Inserts a card component into the arches db
"""
import json
details = {}
with open(source) as f:
details = json.load(f)
try:
uuid.UUID(details["componentid"])
except:
details["componentid"] = str(uuid.uuid4())
print("Registering card component with componentid: {}".format(details["componentid"]))
instance = models.CardComponent(
componentid=details["componentid"],
name=details["name"],
description=details["description"],
component=details["component"],
componentname=details["componentname"],
defaultconfig=details["defaultconfig"],
)
instance.save()
def update(self, source):
"""
Updates an existing card component in the arches db
"""
import json
details = {}
with open(source) as f:
details = json.load(f)
instance = models.CardComponent.objects.get(name=details["name"])
instance.description = details["description"]
instance.component = details["component"]
instance.componentname = details["componentname"]
instance.defaultconfig = details["defaultconfig"]
instance.save()
def unregister(self, name):
"""
Removes a function from the system
"""
try:
instances = models.CardComponent.objects.filter(name=name)
if str(instances[0].componentid) != "f05e4d3a-53c1-11e8-b0ea-784f435179ea":
instances[0].delete()
else:
print("You cannot unregister the default card component.")
except Exception as e:
print(e)
def list(self):
"""
Lists registered card components
"""
try:
instances = models.CardComponent.objects.all()
for instance in instances:
print(instance.name)
except Exception as e:
print(e)
| agpl-3.0 | -963,692,031,037,702,900 | 30.496 | 129 | 0.626365 | false |
wroersma/volatility | volatility/plugins/overlays/windows/win10.py | 1 | 21936 | # Volatility
# Copyright (c) 2008-2015 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: The Volatility Foundation
@license: GNU General Public License 2.0
@contact: [email protected]
This file provides support for Windows 10.
"""
import volatility.plugins.overlays.windows.windows as windows
import volatility.obj as obj
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.plugins.overlays.windows.win8 as win8
try:
import distorm3
has_distorm = True
except ImportError:
has_distorm = False
class _HMAP_ENTRY(obj.CType):
@property
def BlockAddress(self):
return self.PermanentBinAddress & 0xFFFFFFFFFFF0
class Win10Registry(obj.ProfileModification):
"""The Windows 10 registry HMAP"""
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4}
def modification(self, profile):
profile.object_classes.update({"_HMAP_ENTRY": _HMAP_ENTRY})
class Win10x64DTB(obj.ProfileModification):
"""The Windows 10 64-bit DTB signature"""
before = ['WindowsOverlay', 'Windows64Overlay', 'Win8x64DTB']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'memory_model': lambda x: x == '64bit',
}
def modification(self, profile):
profile.merge_overlay({
'VOLATILITY_MAGIC': [ None, {
'DTBSignature' : [ None, ['VolatilityMagic', dict(value = "\x03\x00\xb6\x00")]],
}]})
class Win10x86DTB(obj.ProfileModification):
"""The Windows 10 32-bit DTB signature"""
before = ['WindowsOverlay', 'Win8x86DTB']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'memory_model': lambda x: x == '32bit',
}
def modification(self, profile):
build = profile.metadata.get("build", 0)
if build >= 15063:
signature = "\x03\x00\x2C\x00"
else:
signature = "\x03\x00\x2A\x00"
profile.merge_overlay({
'VOLATILITY_MAGIC': [ None, {
'DTBSignature' : [ None, ['VolatilityMagic', dict(value = signature)]],
}]})
class Win10KDBG(windows.AbstractKDBGMod):
"""The Windows 10 KDBG signatures"""
before = ['Win8KDBG']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x >= 14393}
kdbgsize = 0x368
class ObHeaderCookieStore(object):
"""A class for finding and storing the nt!ObHeaderCookie value"""
_instance = None
def __init__(self):
self._cookie = None
def cookie(self):
return self._cookie
def findcookie(self, kernel_space):
"""Find and read the nt!ObHeaderCookie value.
On success, return True and save the cookie value in self._cookie.
On Failure, return False.
This method must be called before performing any tasks that require
object header validation including handles, psxview (due to pspcid)
and the object scanning plugins (psscan, etc).
NOTE: this cannot be implemented as a volatility "magic" class,
because it must be persistent across various classes and sources.
We don't want to recalculate the cookie value multiple times.
"""
meta = kernel_space.profile.metadata
vers = (meta.get("major", 0), meta.get("minor", 0))
# this algorithm only applies to Windows 10 or greater
if vers < (6, 4):
return True
# prevent subsequent attempts from recalculating the existing value
if self._cookie:
return True
if not has_distorm:
debug.warning("distorm3 module is not installed")
return False
kdbg = tasks.get_kdbg(kernel_space)
if not kdbg:
debug.warning("Cannot find KDBG")
return False
nt_mod = None
for mod in kdbg.modules():
nt_mod = mod
break
if nt_mod == None:
debug.warning("Cannot find NT module")
return False
addr = nt_mod.getprocaddress("ObGetObjectType")
if addr == None:
debug.warning("Cannot find nt!ObGetObjectType")
return False
# produce an absolute address by adding the DLL base to the RVA
addr += nt_mod.DllBase
if not nt_mod.obj_vm.is_valid_address(addr):
debug.warning("nt!ObGetObjectType at {0} is invalid".format(addr))
return False
# in theory...but so far we haven't tested 32-bits
model = meta.get("memory_model")
if model == "32bit":
mode = distorm3.Decode32Bits
else:
mode = distorm3.Decode64Bits
data = nt_mod.obj_vm.read(addr, 100)
ops = distorm3.Decompose(addr, data, mode, distorm3.DF_STOP_ON_RET)
addr = None
# search backwards from the RET and find the MOVZX
if model == "32bit":
# movzx ecx, byte ptr ds:_ObHeaderCookie
for op in reversed(ops):
if (op.size == 7 and
'FLAG_DST_WR' in op.flags and
len(op.operands) == 2 and
op.operands[0].type == 'Register' and
op.operands[1].type == 'AbsoluteMemoryAddress' and
op.operands[1].size == 8):
addr = op.operands[1].disp & 0xFFFFFFFF
break
else:
# movzx ecx, byte ptr cs:ObHeaderCookie
for op in reversed(ops):
if (op.size == 7 and
'FLAG_RIP_RELATIVE' in op.flags and
len(op.operands) == 2 and
op.operands[0].type == 'Register' and
op.operands[1].type == 'AbsoluteMemory' and
op.operands[1].size == 8):
addr = op.address + op.size + op.operands[1].disp
break
if not addr:
debug.warning("Cannot find nt!ObHeaderCookie")
return False
if not nt_mod.obj_vm.is_valid_address(addr):
debug.warning("nt!ObHeaderCookie at {0} is not valid".format(addr))
return False
cookie = obj.Object("unsigned int", offset = addr, vm = nt_mod.obj_vm)
self._cookie = int(cookie)
return True
@staticmethod
def instance():
if not ObHeaderCookieStore._instance:
ObHeaderCookieStore._instance = ObHeaderCookieStore()
return ObHeaderCookieStore._instance
class VolatilityCookie(obj.VolatilityMagic):
"""The Windows 10 Cookie Finder"""
def v(self):
if self.value is None:
return self.get_best_suggestion()
else:
return self.value
def get_suggestions(self):
if self.value:
yield self.value
for x in self.generate_suggestions():
yield x
def generate_suggestions(self):
store = ObHeaderCookieStore.instance()
store.findcookie(self.obj_vm)
yield store.cookie()
class Win10Cookie(obj.ProfileModification):
"""The Windows 10 Cookie Finder"""
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
}
def modification(self, profile):
profile.merge_overlay({
'VOLATILITY_MAGIC': [ None, {
'ObHeaderCookie' : [ 0x0, ['VolatilityCookie', dict(configname = "COOKIE")]],
}]})
profile.object_classes.update({'VolatilityCookie': VolatilityCookie})
class _OBJECT_HEADER_10(win8._OBJECT_HEADER):
@property
def TypeIndex(self):
"""Wrap the TypeIndex member with a property that decodes it
with the nt!ObHeaderCookie value."""
cook = obj.VolMagic(self.obj_vm).ObHeaderCookie.v()
addr = self.obj_offset
indx = int(self.m("TypeIndex"))
return ((addr >> 8) ^ cook ^ indx) & 0xFF
def is_valid(self):
"""Determine if a given object header is valid"""
if not obj.CType.is_valid(self):
return False
if self.InfoMask > 0x88:
return False
if self.PointerCount > 0x1000000 or self.PointerCount < 0:
return False
return True
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'Silo',
12: 'DebugObject',
13: 'Event',
14: 'Mutant',
15: 'Callback',
16: 'Semaphore',
17: 'Timer',
18: 'IRTimer',
19: 'Profile',
20: 'KeyedEvent',
21: 'WindowStation',
22: 'Desktop',
23: 'Composition',
24: 'RawInputManager',
25: 'TpWorkerFactory',
26: 'Adapter',
27: 'Controller',
28: 'Device',
29: 'Driver',
30: 'IoCompletion',
31: 'WaitCompletionPacket',
32: 'File',
33: 'TmTm',
34: 'TmTx',
35: 'TmRm',
36: 'TmEn',
37: 'Section',
38: 'Session',
39: 'Partition',
40: 'Key',
41: 'ALPC Port',
42: 'PowerRequest',
43: 'WmiGuid',
44: 'EtwRegistration',
45: 'EtwConsumer',
46: 'DmaAdapter',
47: 'DmaDomain',
48: 'PcwObject',
49: 'FilterConnectionPort',
50: 'FilterCommunicationPort',
51: 'NetworkNamespace',
52: 'DxgkSharedResource',
53: 'DxgkSharedSyncObject',
54: 'DxgkSharedSwapChainObject',
}
class _OBJECT_HEADER_10_1AC738FB(_OBJECT_HEADER_10):
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'DebugObject',
12: 'Event',
13: 'Mutant',
14: 'Callback',
15: 'Semaphore',
16: 'Timer',
17: 'IRTimer',
18: 'Profile',
19: 'KeyedEvent',
20: 'WindowStation',
21: 'Desktop',
22: 'Composition',
23: 'RawInputManager',
24: 'TpWorkerFactory',
25: 'Adapter',
26: 'Controller',
27: 'Device',
28: 'Driver',
29: 'IoCompletion',
30: 'WaitCompletionPacket',
31: 'File',
32: 'TmTm',
33: 'TmTx',
34: 'TmRm',
35: 'TmEn',
36: 'Section',
37: 'Session',
38: 'Partition',
39: 'Key',
40: 'ALPC Port',
41: 'PowerRequest',
42: 'WmiGuid',
43: 'EtwRegistration',
44: 'EtwConsumer',
45: 'DmaAdapter',
46: 'DmaDomain',
47: 'PcwObject',
48: 'FilterConnectionPort',
49: 'FilterCommunicationPort',
50: 'NetworkNamespace',
51: 'DxgkSharedResource',
52: 'DxgkSharedSyncObject',
53: 'DxgkSharedSwapChainObject',
}
class _OBJECT_HEADER_10_DD08DD42(_OBJECT_HEADER_10):
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'PsSiloContextPaged',
12: 'PsSiloContextNonPaged',
13: 'DebugObject',
14: 'Event',
15: 'Mutant',
16: 'Callback',
17: 'Semaphore',
18: 'Timer',
19: 'IRTimer',
20: 'Profile',
21: 'KeyedEvent',
22: 'WindowStation',
23: 'Desktop',
24: 'Composition',
25: 'RawInputManager',
26: 'CoreMessaging',
27: 'TpWorkerFactory',
28: 'Adapter',
29: 'Controller',
30: 'Device',
31: 'Driver',
32: 'IoCompletion',
33: 'WaitCompletionPacket',
34: 'File',
35: 'TmTm',
36: 'TmTx',
37: 'TmRm',
38: 'TmEn',
39: 'Section',
40: 'Session',
41: 'Partition',
42: 'Key',
43: 'RegistryTransaction',
44: 'ALPC',
45: 'PowerRequest',
46: 'WmiGuid',
47: 'EtwRegistration',
48: 'EtwConsumer',
49: 'DmaAdapter',
50: 'DmaDomain',
51: 'PcwObject',
52: 'FilterConnectionPort',
53: 'FilterCommunicationPort',
54: 'NdisCmState',
55: 'DxgkSharedResource',
56: 'DxgkSharedSyncObject',
57: 'DxgkSharedSwapChainObject',
58: 'VRegConfigurationContext',
59: 'VirtualKey',
}
class _OBJECT_HEADER_10_15063(_OBJECT_HEADER_10):
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'ActivityReference',
12: 'PsSiloContextPaged',
13: 'PsSiloContextNonPaged',
14: 'DebugObject',
15: 'Event',
16: 'Mutant',
17: 'Callback',
18: 'Semaphore',
19: 'Timer',
20: 'IRTimer',
21: 'Profile',
22: 'KeyedEvent',
23: 'WindowStation',
24: 'Desktop',
25: 'Composition',
26: 'RawInputManager',
27: 'CoreMessaging',
28: 'TpWorkerFactory',
29: 'Adapter',
30: 'Controller',
31: 'Device',
32: 'Driver',
33: 'IoCompletion',
34: 'WaitCompletionPacket',
35: 'File',
36: 'TmTm',
37: 'TmTx',
38: 'TmRm',
39: 'TmEn',
40: 'Section',
41: 'Session',
42: 'Partition',
43: 'Key',
44: 'RegistryTransaction',
45: 'ALPC Port',
46: 'PowerRequest',
47: 'WmiGuid',
48: 'EtwRegistration',
49: 'EtwSessionDemuxEntry',
50: 'EtwConsumer',
51: 'DmaAdapter',
52: 'DmaDomain',
53: 'PcwObject',
54: 'FilterConnectionPort',
55: 'FilterCommunicationPort',
56: 'NdisCmState',
57: 'DxgkSharedResource',
58: 'DxgkSharedSyncObject',
59: 'DxgkSharedSwapChainObject',
60: 'DxgkCurrentDxgProcessObject',
61: 'VRegConfigurationContext'
}
class _HANDLE_TABLE_10_DD08DD42(win8._HANDLE_TABLE_81R264):
def decode_pointer(self, value):
value = value & 0xFFFFFFFFFFFFFFF8
value = value >> self.DECODE_MAGIC
if (value & (1 << 47)):
value = value | 0xFFFF000000000000
return value
class Win10ObjectHeader(obj.ProfileModification):
before = ["Win8ObjectClasses"]
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4}
def modification(self, profile):
metadata = profile.metadata
build = metadata.get("build", 0)
if build >= 15063:
header = _OBJECT_HEADER_10_15063
## update the handle table here as well
if metadata.get("memory_model") == "64bit":
profile.object_classes.update({
"_HANDLE_TABLE": _HANDLE_TABLE_10_DD08DD42})
elif build >= 14393:
header = _OBJECT_HEADER_10_DD08DD42
## update the handle table here as well
if metadata.get("memory_model") == "64bit":
profile.object_classes.update({
"_HANDLE_TABLE": _HANDLE_TABLE_10_DD08DD42})
elif build >= 10240:
header = _OBJECT_HEADER_10_1AC738FB
else:
header = _OBJECT_HEADER_10
profile.object_classes.update({"_OBJECT_HEADER": header})
class Win10PoolHeader(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x == 10240}
def modification(self, profile):
meta = profile.metadata
memory_model = meta.get("memory_model", "32bit")
if memory_model == "32bit":
pool_types = {'_POOL_HEADER' : [ 0x8, {
'PreviousSize' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 9, native_type='unsigned short')]],
'PoolIndex' : [ 0x0, ['BitField', dict(start_bit = 9, end_bit = 16, native_type='unsigned short')]],
'BlockSize' : [ 0x2, ['BitField', dict(start_bit = 0, end_bit = 9, native_type='unsigned short')]],
'PoolType' : [ 0x2, ['BitField', dict(start_bit = 9, end_bit = 16, native_type='unsigned short')]],
'Ulong1' : [ 0x0, ['unsigned long']],
'PoolTag' : [ 0x4, ['unsigned long']],
'AllocatorBackTraceIndex' : [ 0x4, ['unsigned short']],
'PoolTagHash' : [ 0x6, ['unsigned short']],
}]}
else:
pool_types = {'_POOL_HEADER' : [ 0x10, {
'PreviousSize' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 8, native_type='unsigned short')]],
'PoolIndex' : [ 0x0, ['BitField', dict(start_bit = 8, end_bit = 16, native_type='unsigned short')]],
'BlockSize' : [ 0x2, ['BitField', dict(start_bit = 0, end_bit = 8, native_type='unsigned short')]],
'PoolType' : [ 0x2, ['BitField', dict(start_bit = 8, end_bit = 16, native_type='unsigned short')]],
'Ulong1' : [ 0x0, ['unsigned long']],
'PoolTag' : [ 0x4, ['unsigned long']],
'ProcessBilled' : [ 0x8, ['pointer64', ['_EPROCESS']]],
'AllocatorBackTraceIndex' : [ 0x8, ['unsigned short']],
'PoolTagHash' : [ 0xa, ['unsigned short']],
}]}
profile.vtypes.update(pool_types)
class Win10x64(obj.Profile):
""" A Profile for Windows 10 x64 """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 9841
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x64_10586(obj.Profile):
""" A Profile for Windows 10 x64 (10.0.10586.306 / 2016-04-23) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 10240
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_1AC738FB_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x64_14393(obj.Profile):
""" A Profile for Windows 10 x64 (10.0.14393.0 / 2016-07-16) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 14393
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_DD08DD42_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x86(obj.Profile):
""" A Profile for Windows 10 x86 """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 9841
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x86_10586(obj.Profile):
""" A Profile for Windows 10 x86 (10.0.10586.420 / 2016-05-28) """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 10240
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_44B89EEA_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x86_14393(obj.Profile):
""" A Profile for Windows 10 x86 (10.0.14393.0 / 2016-07-16) """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 14393
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_9619274A_vtypes'
_md_product = ["NtProductWinNt"]
class Win2016x64_14393(Win10x64_14393):
""" A Profile for Windows Server 2016 x64 (10.0.14393.0 / 2016-07-16) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 14393
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_DD08DD42_vtypes'
_md_product = ["NtProductLanManNt", "NtProductServer"]
class Win10x86_15063(obj.Profile):
""" A Profile for Windows 10 x86 (10.0.15063.0 / 2017-04-04) """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 15063
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_15063_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x64_15063(obj.Profile):
""" A Profile for Windows 10 x64 (10.0.15063.0 / 2017-04-04) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 15063
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_15063_vtypes'
_md_product = ["NtProductWinNt"]
| gpl-2.0 | -771,911,055,582,781,700 | 30.026874 | 119 | 0.552744 | false |
aYukiSekiguchi/ACCESS-Chromium | chrome/test/chromeos/autotest/files/client/site_tests/network_PyAutoConnectivityTests/network_PyAutoConnectivityTests.py | 1 | 3025 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import dbus
import os
import pwd
import shutil
import subprocess
from autotest_lib.client.bin import utils
from autotest_lib.client.cros import constants, chrome_test, cros_ui, login
class network_PyAutoConnectivityTests(chrome_test.ChromeTestBase):
"""Wrapper for running Chrome's PyAuto-based functional tests.
Performs all setup and fires off the FULL suite.
"""
version = 1
def initialize(self):
chrome_test.ChromeTestBase.initialize(self)
assert os.geteuid() == 0, 'Need superuser privileges'
deps_dir = os.path.join(self.autodir, 'deps')
subprocess.check_call(['chown', '-R', 'chronos', self.cr_source_dir])
# Setup suid python binary which can enable chrome testing interface
suid_python = os.path.join(self.test_binary_dir, 'suid-python')
py_path = subprocess.Popen(['which', 'python'],
stdout=subprocess.PIPE).communicate()[0]
py_path = py_path.strip()
assert os.path.exists(py_path), 'Could not find python'
if os.path.islink(py_path):
linkto = os.readlink(py_path)
py_path = os.path.join(os.path.dirname(py_path), linkto)
shutil.copy(py_path, suid_python)
os.chown(suid_python, 0, 0)
os.chmod(suid_python, 04755)
# chronos should own the current dir
chronos_id = pwd.getpwnam('chronos')
os.chown(os.getcwd(), chronos_id.pw_uid, chronos_id.pw_gid)
# Make sure Chrome minidumps are written locally
minidumps_file = '/mnt/stateful_partition/etc/enable_chromium_minidumps'
if not os.path.exists(minidumps_file):
open(minidumps_file, 'w').close()
# Allow browser restart by its babysitter (session_manager)
if os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE):
os.remove(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
cros_ui.nuke()
assert os.path.exists(minidumps_file)
# Setup /tmp/disable_chrome_restart
# Disallow further browser restart by its babysitter
if not os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE):
open(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE, 'w').close()
assert os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
def run_once(self):
"""Run pyauto functional tests."""
# Enable chrome testing interface and Login
deps_dir = os.path.join(self.autodir, 'deps')
pyautolib_dir = os.path.join(self.cr_source_dir,
'chrome', 'test', 'pyautolib')
functional_cmd = cros_ui.xcommand_as(
'%s/chrome_test/test_src/chrome/test/functional/'
'pyauto_functional.py --suite=CHROMEOS_CONNECTIVITY -v' % deps_dir)
utils.system(functional_cmd)
| bsd-3-clause | -7,244,026,314,286,542,000 | 41.605634 | 80 | 0.648264 | false |
StefanWinterfeldt/Buildicator | messageSinks/consoleMessageSink.py | 1 | 2127 | # Copyright 2014 Stefan Winterfeldt <[email protected]>
# <[email protected]
# BITZ GmbH <[email protected]>
#
#This file is part of Buildicator.
#
#Buildicator is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Buildicator is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Buildicator. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the console message sink.
All message sink modules must implement the 'getInstance' method, returning
an instance of the message sink class that has been initialized with the
appropriate args dictionary.
"""
from messageSinks.abstractMessageSink import AbstractMessageSink
import libs.statusEnum as statusEnum
class ConsoleMessageSink(AbstractMessageSink):
"""A message sink that simply displays messages on the console.
This message sink uses the following arguments:
errorMessage - The message to display in case of an error status.
failureMessage - The message to display in case of a failure status.
successMessage - The message to display in case of a success status.
"""
def __init__(self, args):
self.errorMessage = args['errorMessage']
self.failureMessage = args['failureMessage']
self.successMessage = args['successMessage']
def showStatus(self, status):
if status == statusEnum.STATUS_ERROR:
print(self.errorMessage)
elif status == statusEnum.STATUS_FAILURE:
print(self.failureMessage)
elif status == statusEnum.STATUS_SUCCESS:
print(self.successMessage)
def getInstance(args):
return ConsoleMessageSink(args) | gpl-3.0 | 237,320,716,383,971,300 | 38.407407 | 76 | 0.720733 | false |
IMIO/django-fixmystreet | django_fixmystreet/api/reports/serializers.py | 1 | 1936 | # -*- coding: utf-8 -*-
from rest_framework import serializers
from . import models
class ReportAssignmentAcceptSerializer(serializers.Serializer):
reference_id = serializers.CharField()
comment = serializers.CharField(required=False)
created_at = serializers.DateTimeField()
def restore_object(self, attrs, instance=None):
# Update existing instance.
if instance:
instance.reference_id = attrs.get("reference_id", instance.reference_id)
instance.comment = attrs.get("comment", instance.comment)
instance.created_at = attrs.get("created_at", instance.created_at)
return instance
# Create new instance.
return models.ReportAssignmentAccept(**attrs)
class ReportAssignmentRejectSerializer(serializers.Serializer):
comment = serializers.CharField()
created_at = serializers.DateTimeField()
def restore_object(self, attrs, instance=None):
# Update existing instance.
if instance:
instance.comment = attrs.get("comment", instance.comment)
instance.created_at = attrs.get("created_at", instance.created_at)
return instance
# Create new instance.
return models.ReportAssignmentReject(**attrs)
class ReportAssignmentCloseSerializer(serializers.Serializer):
reference_id = serializers.CharField()
comment = serializers.CharField(required=False)
created_at = serializers.DateTimeField()
def restore_object(self, attrs, instance=None):
# Update existing instance.
if instance:
instance.reference_id = attrs.get("reference_id", instance.reference_id)
instance.comment = attrs.get("comment", instance.comment)
instance.created_at = attrs.get("created_at", instance.created_at)
return instance
# Create new instance.
return models.ReportAssignmentClose(**attrs)
| agpl-3.0 | -65,268,529,041,024,920 | 33.571429 | 84 | 0.681302 | false |
nickaugust/pychatkit | clients.py | 1 | 1973 | #!/usr/bin/env python
import asyncio
import logging
logger = logging.getLogger("chatkit:" + __name__)
class WSClientManager:
def __init__(self):
self._clients = []
def all(self):
return self._clients
def add(self, client):
logging.info("+ WSClient {}".format(client))
self._clients.append(client)
def remove(self, client):
logging.info("- WSClient {}".format(client))
self._clients.remove(client)
class WSClient:
objects = WSClientManager()
def __init__(self, server, ws, user=None, token=None):
self.server = server
self._ws = ws
self.user = user
self.token = token
WSClient.objects.add(self)
@asyncio.coroutine
def disconnect(self, message):
self.server.disconnect(self, message)
@asyncio.coroutine
def send(self, data):
if self._ws.state != "OPEN":
logging.info("WS state not OPEN, disconnecting" +
str(self.user))
self.disconnect("WS state not OPEN.")
return
logging.info("> {} {}".format(self.user, data))
yield from self._ws.send(data)
@asyncio.coroutine
def send_one(self, to_client, data):
if to_client._ws.state != "OPEN":
to_client.disconnect("WS state not OPEN.")
yield from to_client._ws.send(data)
logging.info("> {} {}".format(to_client.user, data))
@asyncio.coroutine
def send_all(self, from_client, data):
for c in WSClient.clients:
yield from self.send_one(c, data)
@asyncio.coroutine
def send_others(self, from_client, data):
for c in WSClient.clients:
if c != from_client:
yield from self.send_one(c, data)
@asyncio.coroutine
def get_others(self, client):
for c in WSClient.clients:
resp = "join {}".format(c.user.username)
yield from self.send_one(self, resp)
| mit | -1,395,320,659,051,184,000 | 26.788732 | 61 | 0.581855 | false |
likit/BioUtils | fetch_entrez_from_geneid.py | 1 | 1631 | '''Selects protein sequences from NCBI that are in a list
from Geisha text file.
Output is written to standard output.
'''
import os
import sys
import time
from Bio import SeqIO, Entrez
def parse(infile):
'''Return a set of gene IDs from an input file.'''
for line in open(infile):
geneid = line.split()[0]
yield geneid
def fetch(geneid):
print >> sys.stderr, 'fetching.. gene ID: %s' % geneid
handle = Entrez.efetch(db='gene', retmode='xml', id=geneid)
xmldata = Entrez.read(handle)
product = xmldata[0]['Entrezgene_locus'][0]\
['Gene-commentary_products'][0]
prodtype = product['Gene-commentary_type'].attributes['value']
print >> sys.stderr, 'product type = %s' % (prodtype)
seq_gi = xmldata[0]['Entrezgene_locus'][0]\
['Gene-commentary_products'][0]\
['Gene-commentary_seqs'][0]\
['Seq-loc_whole']['Seq-id']\
['Seq-id_gi']
handle = Entrez.efetch(db='nucleotide', retmode='text',
rettype='fasta', id=seq_gi)
seq = SeqIO.read(handle, 'fasta')
return seq
def main():
infile = sys.argv[1]
Entrez.email = sys.argv[2]
outfile = os.path.splitext(infile)[0] + ".fa"
records = []
for geneid in parse(infile):
try:
records.append(fetch(geneid))
except:
print >> sys.stderr, 'Cannot retrieve a sequence'
continue
time.sleep(3)
SeqIO.write(records, outfile, 'fasta')
print >> sys.stderr, 'Total sequences = %d' % len(records)
if __name__=='__main__':
main()
| bsd-2-clause | -5,944,984,507,646,516,000 | 24.888889 | 66 | 0.578786 | false |
forseti-security/forseti-security | tests/services/scanner/scanner_base_db.py | 1 | 4263 | """Helper base class for testing scanners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
from datetime import timedelta
import os
import unittest.mock as mock
from sqlalchemy.orm import sessionmaker
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.scanner import scanner
from google.cloud.forseti.services.inventory import storage
from google.cloud.forseti.services.scanner import dao as scanner_dao
from tests.services.util.db import create_test_engine_with_file
from tests.unittest_utils import ForsetiTestCase
FAKE_INV_INDEX_ID = 'aaa'
FAKE_VIOLATION_HASH = (u'111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111'
'11111111111111111111')
FAKE_VIOLATIONS = [
{'resource_id': 'fake_firewall_111',
'full_name': 'full_name_111',
'rule_name': 'disallow_all_ports_111',
'rule_index': 111,
'violation_data':
{'policy_names': ['fw-tag-match_111'],
'recommended_actions':
{'DELETE_FIREWALL_RULES': ['fw-tag-match_111']}},
'violation_type': 'FIREWALL_BLACKLIST_VIOLATION_111',
'resource_type': 'firewall_rule',
'resource_data': 'inventory_data_111',
'resource_name': 'fw-tag-match_111',
},
{'resource_id': 'fake_firewall_222',
'full_name': 'full_name_222',
'rule_name': 'disallow_all_ports_222',
'rule_index': 222,
'violation_data':
{'policy_names': ['fw-tag-match_222'],
'recommended_actions':
{'DELETE_FIREWALL_RULES': ['fw-tag-match_222']}},
'violation_type': 'FIREWALL_BLACKLIST_VIOLATION_222',
'resource_type': 'firewall_rule',
'resource_data': 'inventory_data_222',
'resource_name': 'fw-tag-match_222',
}
]
# pylint: disable=bad-indentation
class ScannerBaseDbTestCase(ForsetiTestCase):
"""Base class for database centric tests."""
def setUp(self):
"""Setup method."""
ForsetiTestCase.setUp(self)
self.engine, self.dbfile = create_test_engine_with_file()
session_maker = sessionmaker()
self.session = session_maker(bind=self.engine)
storage.initialize(self.engine)
scanner_dao.initialize(self.engine)
self.session.flush()
self.violation_access = scanner_dao.ViolationAccess(self.session)
self.inv_index_id1, self.inv_index_id2, self.inv_index_id3 = (
_setup_inv_indices(self.session))
def tearDown(self):
"""Teardown method."""
os.unlink(self.dbfile)
ForsetiTestCase.tearDown(self)
def populate_db(
self, violations=FAKE_VIOLATIONS, inv_index_id=FAKE_INV_INDEX_ID,
scanner_index_id=None, succeeded=['IamPolicyScanner'], failed=[]):
"""Populate the db with violations.
Args:
violations (dict): the violations to write to the test database
inv_index_id (str): the inventory index to use
scanner_index_id (str): the scanner index to use
succeeded (list): names of scanners that ran successfully
failed (list): names of scanners that failed
"""
if not scanner_index_id:
scanner_index_id = scanner.init_scanner_index(
self.session, inv_index_id)
self.violation_access.create(violations, scanner_index_id)
scanner.mark_scanner_index_complete(
self.session, scanner_index_id, succeeded, failed)
return scanner_index_id
def _setup_inv_indices(session):
"""The method under test returns the newest `ScannerIndex` row."""
with mock.patch.object(date_time, 'get_utc_now_datetime') as mock_date_time:
time1 = datetime.utcnow()
time2 = time1 + timedelta(minutes=5)
time3 = time1 + timedelta(minutes=7)
mock_date_time.side_effect = [time1, time2, time3]
iidx1 = storage.InventoryIndex.create()
iidx2 = storage.InventoryIndex.create()
iidx3 = storage.InventoryIndex.create()
session.add(iidx1)
session.add(iidx2)
session.add(iidx3)
session.flush()
return (iidx1.id, iidx2.id, iidx3.id)
| apache-2.0 | -3,047,828,724,957,816,300 | 36.394737 | 80 | 0.65658 | false |
vatlab/SOS | src/sos/tasks.py | 1 | 77822 | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import copy
import os
import fasteners
import pickle
import time
import lzma
import math
import struct
from enum import Enum
from collections import namedtuple
from collections.abc import Sequence
from datetime import datetime
from typing import Union, Dict, List
from .utils import (
env,
expand_time,
linecount_of_file,
sample_lines,
short_repr,
tail_of_file,
pretty_size,
expand_size,
format_HHMMSS,
DelayedAction,
format_duration,
)
from .targets import sos_targets
monitor_interval = 5
resource_monitor_interval = 60
class TaskParams(object):
"""A parameter object that encaptulates parameters sending to
task executors. This would makes the output of workers, especially
in the web interface much cleaner (issue #259)"""
def __init__(self, name, global_def, task, sos_dict, tags):
self.name = name
self.global_def = global_def
self.task = task
self.sos_dict = sos_dict
self.tags = tags
# remove builtins that could be saved in a dictionary
if "CONFIG" in self.sos_dict and "__builtins__" in self.sos_dict["CONFIG"]:
self.sos_dict["CONFIG"].pop("__builtins__")
def __repr__(self):
return self.name
class MasterTaskParams(TaskParams):
def __init__(self, num_workers=None):
self.ID = "t0"
self.name = self.ID
self.global_def = ""
self.task = ""
self.sos_dict = {
"_runtime": {"num_workers": num_workers},
"_input": sos_targets(),
"_output": sos_targets(),
"_depends": sos_targets(),
"step_input": sos_targets(),
"step_output": sos_targets(),
"step_depends": sos_targets(),
"step_name": "",
"_index": 0,
}
self.tags = []
# a collection of tasks that will be executed by the master task
self.task_stack = []
def _parse_num_workers(self, num_workers):
# return number of nodes and workers
if isinstance(num_workers, Sequence) and len(num_workers) >= 1:
val = str(num_workers[0])
n_workers = val.rsplit(":", 1)[-1] if ":" in val else val
n_nodes = len(num_workers)
elif isinstance(num_workers, str):
n_workers = (
num_workers.rsplit(":", 1)[-1] if ":" in num_workers else num_workers
)
n_nodes = 1
elif isinstance(num_workers, int):
n_workers = num_workers
n_nodes = 1
elif num_workers is None:
n_workers = 1
n_nodes = 1
else:
raise RuntimeError(
f"Unacceptable value for parameter trunk_workers {num_workers}"
)
try:
n_workers = int(n_workers)
except Exception:
raise ValueError(
f"Unacceptable value for option trunk_workers {num_workers}"
)
if n_workers <= 0:
raise ValueError(
f"Unacceptable value for option trunk_workers {num_workers}"
)
return n_nodes, n_workers
def num_tasks(self):
return len(self.task_stack)
def push(self, task_id, params):
# update walltime, cores, and mem
# right now we require all tasks to have same resource requirment, which is
# quite natural because they are from the same step
#
# update input, output, and depends
#
# walltime etc
n_nodes, n_workers = self._parse_num_workers(
self.sos_dict["_runtime"]["num_workers"]
)
if not self.task_stack:
for key in (
"walltime",
"max_walltime",
"cores",
"nodes",
"max_cores",
"mem",
"max_mem",
"name",
"workdir",
"verbosity",
"sig_mode",
"run_mode",
):
if (
key in params.sos_dict["_runtime"]
and params.sos_dict["_runtime"][key] is not None
):
self.sos_dict["_runtime"][key] = params.sos_dict["_runtime"][key]
self.sos_dict["step_name"] = params.sos_dict["step_name"]
self.tags = params.tags
else:
for key in (
"walltime",
"max_walltime",
"cores",
"max_cores",
"mem",
"max_mem",
"name",
"workdir",
):
val0 = self.task_stack[0][1].sos_dict["_runtime"].get(key, None)
val = params.sos_dict["_runtime"].get(key, None)
if val0 != val:
raise ValueError(f"All tasks should have the same resource {key}")
if val0 is None:
continue
# If there are multiple nodes and multiple workers, there are
# n_workers * n_nodes workers at the same time, so the jobs
# will be completed in n_batches
n_batches = math.ceil(
(len(self.task_stack) + 1) / (n_workers * n_nodes)
)
if key == "walltime":
# the real walltime would be the total time on one node
self.sos_dict["_runtime"]["walltime"] = format_HHMMSS(
n_batches * expand_time(val0)
)
elif key == "mem":
# number of columns * mem for each + 100M for master
self.sos_dict["_runtime"]["mem"] = n_workers * expand_size(val0)
elif key == "cores":
self.sos_dict["_runtime"]["cores"] = n_workers * val0
elif key == "name":
self.sos_dict["_runtime"][
"name"
] = f"{val0}_{len(self.task_stack) + 1}"
self.tags.extend(params.tags)
# if cores is unspecified but there are more than one workers
if (
"cores" not in self.sos_dict["_runtime"]
and n_workers is not None
and n_workers > 1
):
self.sos_dict["_runtime"]["cores"] = n_workers
#
# input, output, preserved vars etc
for key in ["_input", "_output", "_depends"]:
if key in params.sos_dict and isinstance(params.sos_dict[key], sos_targets):
if key == "__builtins__":
continue
# do not extend duplicated input etc
self.sos_dict[key].extend(params.sos_dict[key])
#
self.task_stack.append([task_id, params])
self.tags = sorted(list(set(self.tags)))
#
id_prefix = f't{len(self.task_stack)}'
self.ID = f"{id_prefix}{self.task_stack[0][0][:-(len(id_prefix))]}"
self.name = self.ID
def finalize(self):
if not self.task_stack:
return
common_dict = None
common_keys = set()
for _, params in self.task_stack:
if common_dict is None:
common_dict = params.sos_dict
common_keys = set(params.sos_dict.keys())
else:
common_keys = {
key
for key in common_keys
if key in params.sos_dict
and common_dict[key] == params.sos_dict[key]
}
if not common_keys:
break
# if there is only one subtask, _output will be moved out of subtasks and makes
# the retrival of outputs difficult.
common_keys.discard("_output")
self.common_dict = {x: common_dict[x] for x in common_keys}
for _, params in self.task_stack:
params.sos_dict = {
k: v for k, v in params.sos_dict.items() if k not in common_keys
}
#
n_nodes = self._parse_num_workers(self.sos_dict["_runtime"]["num_workers"])[0]
# trunk_workers and cores cannot be specified together, so if n_nodes > 1,
# nodes should not have been specified.
if n_nodes is not None and n_nodes > 1:
self.sos_dict["_runtime"]["nodes"] = n_nodes
return self
def combine_results(task_id, results):
# now we collect result
all_res = {
"ret_code": 0,
"output": None,
"subtasks": {},
"shared": {},
"skipped": 0,
"signature": {},
}
for res in results:
tid = res["task"]
all_res["subtasks"][tid] = res
if "exception" in res:
all_res["exception"] = res["exception"]
all_res["ret_code"] += 1
continue
all_res["ret_code"] += res["ret_code"]
if all_res["output"] is None:
all_res["output"] = copy.deepcopy(res["output"])
else:
try:
all_res["output"].extend(res["output"], keep_groups=True)
except Exception:
env.logger.warning(
f"Failed to extend output {all_res['output']} with {res['output']}"
)
all_res["shared"].update(res["shared"])
# does not care if one or all subtasks are executed or skipped.
all_res["skipped"] += res.get("skipped", 0)
if "signature" in res:
all_res["signature"].update(res["signature"])
if all_res["ret_code"] != 0:
if all_res["ret_code"] == len(results):
if env.config["run_mode"] == "run":
env.logger.info(f"All {len(results)} tasks in {task_id} ``failed``")
else:
env.logger.debug(f"All {len(results)} tasks in {task_id} ``failed``")
else:
if env.config["run_mode"] == "run":
env.logger.info(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
else:
env.logger.debug(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
# if some failed, some skipped, not skipped
if "skipped" in all_res:
all_res.pop("skipped")
elif all_res["skipped"]:
if all_res["skipped"] == len(results):
if env.config["run_mode"] == "run":
env.logger.info(
f"All {len(results)} tasks in {task_id} ``ignored`` or skipped"
)
else:
env.logger.debug(
f"All {len(results)} tasks in {task_id} ``ignored`` or skipped"
)
else:
# if only partial skip, we still save signature and result etc
if env.config["run_mode"] == "run":
env.logger.info(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
else:
env.logger.debug(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
all_res.pop("skipped")
else:
if env.config["run_mode"] == "run":
env.logger.info(f"All {len(results)} tasks in {task_id} ``completed``")
else:
env.logger.debug(f"All {len(results)} tasks in {task_id} ``completed``")
return all_res
class TaskStatus(Enum):
new = 0
pending = 1
submitted = 2
running = 3
aborted = 4
failed = 5
completed = 6
class TaskFile(object):
"""
The task file has the following format:
1. A binary header with the information of the structure of the file
with field defined by TaskHeader
2. compressed pickled param of task
3. compressed pulse file
4. compressed pickled result
5. compressed stdout
6. compressed stderr
7. compressed pickled signatures
"""
TaskHeader_v1 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader_v2 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size shell_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader_v3 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size runtime_size shell_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader = TaskHeader_v3
header_fmt_v1 = "!2h 8d 6i 128s"
header_fmt_v2 = "!2h 8d 7i 124s"
header_fmt_v3 = "!2h 8d 8i 120s"
header_fmt = header_fmt_v3
header_size = 220 # struct.calcsize(header_fmt)
tags_offset = [92, 96, 100] # struct.calcsize(status_fmt + '6i')
tags_size = [128, 124, 120]
def __init__(self, task_id: str):
self.task_id = task_id
self.task_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task_id + ".task"
)
def save(self, params):
if os.path.isfile(self.task_file):
if self.status == "running":
env.logger.debug(f"Task {self.task_id} is running and is not updated")
return
# keep original stuff but update params, which could contain
# new runtime info
self.params = params
return
# updating job_file will not change timestamp because it will be Only
# the update of runtime info
now = time.time()
# we keep in both places because params.tags is the only place to have it for subtasks
tags = params.tags
params_block = lzma.compress(pickle.dumps(params))
# env.logger.error(f'saving {self.task_id} params of size {len(params_block)}')
header = self.TaskHeader(
version=3,
status=TaskStatus.new.value,
last_modified=now,
new_time=now,
pending_time=0,
running_time=0,
submitted_time=0,
aborted_time=0,
failed_time=0,
completed_time=0,
params_size=len(params_block),
runtime_size=0,
shell_size=0,
pulse_size=0,
stdout_size=0,
stderr_size=0,
result_size=0,
signature_size=0,
tags=" ".join(sorted(tags)).ljust(128).encode(),
)
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "wb+") as fh:
self._write_header(fh, header)
fh.write(params_block)
def exists(self):
return os.path.isfile(self.task_file)
def _reset(self, fh):
# remove result, input, output etc and set the status of the task to new
header = self._read_header(fh)
now = time.time()
header = header._replace(
version=2,
status=TaskStatus.new.value,
last_modified=now,
new_time=now,
pending_time=0,
submitted_time=0,
running_time=0,
aborted_time=0,
failed_time=0,
completed_time=0,
runtime_size=0,
shell_size=0,
pulse_size=0,
stdout_size=0,
stderr_size=0,
result_size=0,
signature_size=0,
)
self._write_header(fh, header)
fh.truncate(self.header_size + header.params_size)
return header
def reset(self):
# remove result, input, output etc and set the status of the task to new
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
self._reset(fh)
def _read_header(self, fh):
fh.seek(0, 0)
data = fh.read(self.header_size)
if struct.unpack("!h", data[:2])[0] == 1:
header = self.TaskHeader_v1._make(struct.unpack(self.header_fmt_v1, data))
if header.version not in (1, 2, 3):
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
return self.TaskHeader(
runtime_size=0, shell_size=0, **header._asdict()
)._replace(version=3)
if struct.unpack("!h", data[:2])[0] == 2:
header = self.TaskHeader_v2._make(struct.unpack(self.header_fmt_v2, data))
if header.version not in (1, 2, 3):
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
return self.TaskHeader(runtime_size=0, **header._asdict())._replace(
version=3
)
header = self.TaskHeader._make(struct.unpack(self.header_fmt, data))
if header.version not in (1, 2, 3):
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
return header
def _write_header(self, fh, header):
fh.seek(0, 0)
fh.write(struct.pack(self.header_fmt, *header))
def _get_content(self, exts):
if isinstance(exts, str):
exts = [exts]
content = b""
for ext in exts:
filename = self.task_file[:-5] + ext
if not os.path.isfile(filename):
continue
with open(filename, "rb") as fh:
content += fh.read()
if not content:
return b""
return lzma.compress(content)
def add_outputs(self, keep_result=False):
# get header
shell = self._get_content(".sh")
pulse = self._get_content(".pulse")
stdout = self._get_content([".out", ".sosout"])
stderr = self._get_content([".err", ".soserr"])
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
if header.result_size != 0:
if not keep_result:
result_size = 0
signature_size = 0
else:
result_size = header.result_size
signature_size = header.signature_size
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size,
0,
)
result = fh.read(header.result_size)
signature = fh.read(header.signature_size)
else:
result_size = 0
signature_size = 0
header = header._replace(
shell_size=len(shell),
pulse_size=len(pulse),
stdout_size=len(stdout),
stderr_size=len(stderr),
result_size=result_size,
signature_size=signature_size,
)
self._write_header(fh, header)
fh.seek(self.header_size + header.params_size + header.runtime_size, 0)
if shell:
fh.write(shell)
if pulse:
fh.write(pulse)
if stdout:
fh.write(stdout)
if stderr:
fh.write(stderr)
if result_size > 0:
fh.write(result)
if signature_size > 0:
fh.write(signature)
def add_result(self, result: dict = {}):
if not result:
params = self._get_params()
# this is a master task, get all sub task IDs
if hasattr(params, "task_stack"):
missing_tasks = set([x[0] for x in params.task_stack])
#
cache_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", self.task_id + ".cache"
)
results = []
if os.path.isfile(cache_file):
try:
with open(cache_file, "rb") as f:
while True:
res = pickle.load(f)
if not "task" in res:
# something is wrong
break
missing_tasks.remove(res["task"])
results.append(res)
os.remove(cache_file)
except Exception:
# we read until an error occurs
pass
if not results:
# if there is no result at all, do not save result
return
else:
# now, if we have some results, we need to fill the rest of the aborted ones
results.extend(
[
{
"task": t,
"ret_code": 2,
"shared": {},
"exception": RuntimeError(f"Subtask {t} is aborted"),
}
for t in missing_tasks
]
)
result = combine_results(self.task_id, results)
else:
# single task, no result, do not save
return
# add signature if exists
signature = result.get("signature", {})
result.pop("signature", None)
#
result_block = lzma.compress(pickle.dumps(result))
signature_block = lzma.compress(pickle.dumps(signature)) if signature else b""
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
header = header._replace(
result_size=len(result_block),
signature_size=len(signature_block),
)
self._write_header(fh, header)
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
)
fh.write(result_block)
if signature:
fh.write(signature_block)
def _get_info(self):
with open(self.task_file, "rb") as fh:
return self._read_header(fh)
def _set_info(self, info):
with open(self.task_file, "r+b") as fh:
fh.write(struct.pack(self.header_fmt, *info))
info = property(_get_info, _set_info)
def has_shell(self):
return self.info.shell_size > 0
def has_pulse(self):
return self.info.pulse_size > 0
def has_result(self):
return self.info.result_size > 0
def has_stdout(self):
return self.info.stdout_size > 0
def has_stderr(self):
return self.info.stderr_size > 0
def has_signature(self):
return self.info.signature_size > 0
def _get_params(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.params_size == 0 and header.runtime_size == 0:
return {}
fh.seek(self.header_size, 0)
if header.params_size == 0:
return {}
else:
try:
return pickle.loads(lzma.decompress(fh.read(header.params_size)))
except Exception as e:
raise RuntimeError(
f"Failed to obtain params of task {self.task_id}: {e}"
)
def _set_params(self, params):
params_block = lzma.compress(pickle.dumps(params))
# env.logger.error(f'updating {self.task_id} params of size {len(params_block)}')
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
if len(params_block) == header.params_size:
fh.seek(self.header_size, 0)
fh.write(params_block)
else:
fh.read(header.params_size)
runtime = fh.read(header.runtime_size)
shell = fh.read(header.shell_size)
pulse = fh.read(header.pulse_size)
stdout = fh.read(header.stdout_size)
stderr = fh.read(header.stderr_size)
result = fh.read(header.result_size)
signature = fh.read(header.signature_size)
header = header._replace(params_size=len(params_block))
self._write_header(fh, header)
fh.write(params_block)
if runtime:
fh.write(runtime)
if shell:
fh.write(shell)
if pulse:
fh.write(pulse)
if stdout:
fh.write(stdout)
if stderr:
fh.write(stderr)
if result:
fh.write(result)
if signature:
fh.write(signature)
fh.truncate(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
+ header.result_size
+ header.signature_size
)
params = property(_get_params, _set_params)
def _get_runtime(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.runtime_size == 0:
return {}
fh.seek(self.header_size + header.params_size, 0)
try:
return pickle.loads(lzma.decompress(fh.read(header.runtime_size)))
except Exception as e:
env.logger.error(
f"Failed to obtain runtime of task {self.task_id}: {e}"
)
return {"_runtime": {}}
def _set_runtime(self, runtime):
runtime_block = lzma.compress(pickle.dumps(runtime))
# env.logger.error(f'updating {self.task_id} params of size {len(params_block)}')
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
if len(runtime_block) == header.runtime_size:
fh.seek(self.header_size + header.params_size, 0)
fh.write(runtime_block)
else:
params = fh.read(header.params_size)
fh.seek(
self.header_size + header.params_size + header.runtime_size, 0
)
shell = fh.read(header.shell_size) if header.shell_size else b""
pulse = fh.read(header.pulse_size) if header.pulse_size else b""
stdout = fh.read(header.stdout_size) if header.stdout_size else b""
stderr = fh.read(header.stderr_size) if header.stderr_size else b""
result = fh.read(header.result_size) if header.result_size else b""
signature = (
fh.read(header.signature_size) if header.signature_size else b""
)
header = header._replace(runtime_size=len(runtime_block))
self._write_header(fh, header)
fh.write(params)
fh.write(runtime_block)
if shell:
fh.write(shell)
if pulse:
fh.write(pulse)
if stdout:
fh.write(stdout)
if stderr:
fh.write(stderr)
if result:
fh.write(result)
if signature:
fh.write(signature)
fh.truncate(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
+ header.result_size
+ header.signature_size
)
runtime = property(_get_runtime, _set_runtime)
def get_params_and_runtime(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.params_size == 0 and header.runtime_size == 0:
return {}
fh.seek(self.header_size, 0)
if header.params_size == 0:
params = {}
else:
try:
params = pickle.loads(lzma.decompress(fh.read(header.params_size)))
except Exception as e:
env.logger.error(
f"Failed to obtain params with runtime of task {self.task_id}: {e}"
)
params = {}
if "_runtime" not in params.sos_dict:
params.sos_dict["_runtime"] = {}
if header.runtime_size > 0:
try:
runtime = pickle.loads(
lzma.decompress(fh.read(header.runtime_size))
)
except Exception as e:
env.logger.error(
f"Failed to obtain runtime of task {self.task_id}: {e}"
)
runtime = {"_runtime": {}}
else:
runtime = {"_runtime": {}}
return params, runtime
def _get_status(self):
if not os.path.isfile(self.task_file):
return "missing"
try:
with open(self.task_file, "rb") as fh:
fh.seek(2, 0)
return TaskStatus(struct.unpack("!h", fh.read(2))[0]).name
except Exception as e:
env.logger.warning(
f"Incompatible task file {self.task_file} is removed. This might was most likely generated by a previous version of SoS but please report a bug if you can reproduce this warning message: {e}"
)
os.remove(self.task_file)
def _get_version(self):
with open(self.task_file, "rb") as fh:
fh.seek(0, 0)
return struct.unpack("!h", fh.read(2))[0]
version = property(_get_version)
def _get_last_updated(self):
with open(self.task_file, "rb") as fh:
fh.seek(4, 0)
return struct.unpack("!d", fh.read(8))[0]
last_updated = property(_get_last_updated)
def _set_status(self, status):
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
fh.seek(2, 0)
if status == "skipped":
# special status, set completed_time = running_time
# to make sure duration is zero
now = time.time()
sts = TaskStatus["completed"].value
# update status and last modified
fh.write(struct.pack("!hd", sts, now))
# also set 'run'
fh.seek(3 * 8, 1)
fh.write(struct.pack("!d", now))
# from the current location, move by status
fh.seek(2 * 8, 1)
fh.write(struct.pack("!d", now))
else:
if status == "running":
# setting to running status ... refresh the pulse file
pulse_file = os.path.join(
os.path.expanduser("~"),
".sos",
"tasks",
self.task_id + ".pulse",
)
with open(pulse_file, "w") as pd:
pd.write(f"#task: {self.task_id}\n")
pd.write(
f'#started at {datetime.now().strftime("%A, %d. %B %Y %I:%M%p")}\n#\n'
)
# wait for the pulse file to be created before updating task status
while True:
if os.path.isfile(pulse_file):
break
else:
time.sleep(0.01)
# if completed, we make sure that the duration will not
# be zero even if the task is completed very rapidly
now = time.time() + (0.01 if status == "completed" else 0)
sts = TaskStatus[status].value
# update status and last modified
fh.write(struct.pack("!hd", sts, now))
# from the current location, move by status
fh.seek(sts * 8, 1)
fh.write(struct.pack("!d", now))
# if restarting the task, make sure all irrelevant files
# are removed or finishing tasks.
if status in ("aborted", "completed", "failed", "pending"):
# terminal status
remove_task_files(
self.task_id,
[
".sh",
".job_id",
".sosout",
".soserr",
".out",
".err",
".pulse",
".cache",
],
)
status = property(_get_status, _set_status)
def _get_tags(self):
try:
with open(self.task_file, "rb") as fh:
fh.seek(0, 0)
ver = struct.unpack("!h", fh.read(2))[0]
fh.seek(self.tags_offset[ver - 1], 0)
return fh.read(self.tags_size[ver - 1]).decode().strip()
except Exception:
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
def _set_tags(self, tags: list):
with open(self.task_file, "r+b") as fh:
fh.seek(0, 0)
ver = struct.unpack("!h", fh.read(2))[0]
fh.seek(self.tags_offset[ver - 1], 0)
fh.write(" ".join(sorted(tags)).ljust(self.tags_size[ver - 1]).encode())
tags = property(_get_tags, _set_tags)
def _get_shell(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.shell_size == 0:
return ""
fh.seek(self.header_size + header.params_size + header.runtime_size, 0)
try:
return lzma.decompress(fh.read(header.shell_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode shell: {e}")
return ""
shell = property(_get_shell)
def _get_pulse(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.pulse_size == 0:
return ""
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size,
0,
)
try:
return lzma.decompress(fh.read(header.pulse_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode pulse: {e}")
return ""
pulse = property(_get_pulse)
def _get_stdout(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.stdout_size == 0:
return ""
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.pulse_size
+ header.shell_size,
0,
)
try:
return lzma.decompress(fh.read(header.stdout_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode stdout: {e}")
return ""
stdout = property(_get_stdout)
def _get_stderr(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.stderr_size == 0:
return ""
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size,
0,
)
try:
return lzma.decompress(fh.read(header.stderr_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode stderr: {e}")
return ""
stderr = property(_get_stderr)
def _get_result(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.result_size == 0:
return {}
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size,
0,
)
try:
return pickle.loads(lzma.decompress(fh.read(header.result_size)))
except Exception as e:
env.logger.warning(f"Failed to decode result: {e}")
return {"ret_code": 1}
result = property(_get_result)
def _get_signature(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.signature_size == 0:
return {}
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
+ header.result_size,
0,
)
try:
return pickle.loads(lzma.decompress(fh.read(header.signature_size)))
except Exception as e:
env.logger.warning(f"Failed to decode signature: {e}")
return {"ret_code": 1}
signature = property(_get_signature)
def tags_created_start_and_duration(self, formatted=False):
try:
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
try:
tags = header.tags.decode().strip()
except Exception:
raise ValueError(
f"{self.task_file} is in a format that is no longer supported."
)
ct = header.new_time
if header.running_time != 0:
st = header.running_time
if TaskStatus(header.status) == TaskStatus.running:
dr = time.time() - st
else:
dr = header.last_modified - st
else:
return (
tags,
("Created " + format_duration(time.time() - ct, True) + " ago")
if formatted
else ct,
"",
"",
)
if not formatted:
return tags, ct, st, dr
#
return (
tags,
"Created " + format_duration(time.time() - ct, True) + " ago",
"Started " + format_duration(time.time() - st) + " ago",
("Ran for " + format_duration(int(dr)))
if dr > 0
else "Signature checked",
)
except Exception:
# missing tag file or something went wrong
return "", "", "", ""
def taskDuration(task):
filename = os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{task}.task")
return os.path.getatime(filename) - os.path.getmtime(filename)
def remove_task_files(task: str, exts: list):
task_dir = os.path.join(os.path.expanduser("~"), ".sos", "tasks")
for ext in exts:
filename = os.path.join(task_dir, task + ext)
if os.path.isfile(filename):
try:
os.remove(filename)
except Exception:
# if the file cannot be removed now, we use a thread to wait a
# bit and try to remove it later. The function should not
# wait for the thread though
try:
DelayedAction(os.remove, filename)
except Exception:
pass
def check_task(task, hint={}) -> Dict[str, Union[str, Dict[str, float]]]:
# when testing. if the timestamp is 0, the file does not exist originally, it should
# still does not exist. Otherwise the file should exist and has the same timestamp
if (
hint
and hint["status"] not in ("pending", "running")
and all(
(os.path.isfile(f) and os.stat(f).st_mtime == v)
if v
else (not os.path.isfile(f))
for f, v in hint["files"].items()
)
):
return {}
# status of the job, please refer to https://github.com/vatlab/SOS/issues/529
# for details.
#
task_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".task")
if not os.path.isfile(task_file):
return dict(status="missing", files={task_file: 0})
mtime = os.stat(task_file).st_mtime
def task_changed():
return os.stat(task_file).st_mtime != mtime
tf = TaskFile(task)
status = tf.status
if status in ["failed", "completed", "aborted"]:
# thse are terminal states. We simply return them
# only change of the task file will trigger recheck of status
stdout_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".sosout"
)
stderr_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"
)
# 1242
if os.path.isfile(stdout_file) or os.path.isfile(stderr_file):
tf.add_outputs(keep_result=True)
# 1323
tf.add_result()
remove_task_files(task, [".sosout", ".soserr", ".out", ".err"])
# stdout and stderr files should not exist
status_files = {
task_file: os.stat(task_file).st_mtime,
stdout_file: 0,
stderr_file: 0,
}
return dict(status=status, files=status_files)
pulse_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".pulse")
# check the existence and validity of .pulse file
if os.path.isfile(pulse_file):
try:
status_files = {
task_file: os.stat(task_file).st_mtime,
pulse_file: os.stat(pulse_file).st_mtime,
}
# if we have hint, we know the time stamp of last
# status file.
if (
not hint
or pulse_file not in hint["files"]
or status_files[pulse_file] != hint["files"][pulse_file]
):
return dict(status="running", files=status_files)
elapsed = time.time() - status_files[pulse_file]
if elapsed < 60:
return dict(status="running", files=status_files)
syserr_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".err"
)
# if the system does not return any error message, write sos-specific one
if os.path.isfile(syserr_file) and os.path.getsize(syserr_file) > 0:
try:
with open(syserr_file) as syserr:
env.logger.warning("".join(syserr.readlines()[-5:]))
except Exception as e:
env.logger.warning(
f"{task} is suspected to be killed but {syserr_file} cannot be read: {e}"
)
else:
soserr_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"
)
with open(soserr_file, "a") as err:
err.write(
f"Task {task} inactive for more than {int(elapsed)} seconds, might have been killed."
)
env.logger.warning(
f"Task {task} inactive for more than {int(elapsed)} seconds, might have been killed."
)
tf.add_outputs()
# 1323
tf.add_result()
# assume aborted
tf.status = "aborted"
return dict(
status="aborted",
files={task_file: os.stat(task_file).st_mtime, pulse_file: 0},
)
except Exception:
# the pulse file could disappear when the job is completed.
if task_changed():
return check_task(task)
raise
elif status == "running":
# starting of task will create a pulse file. If the pulse file is gone
# and the status is still showing as running, something is wrong.
# if there is no pulse file .
tf.status = "aborted"
with open(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"),
"a",
) as err:
err.write(f"Task {task} considered as aborted due to missing pulse file.")
env.logger.warning(
f"Task {task} considered as aborted due to missing pulse file."
)
tf.add_outputs()
# 1323
tf.add_result()
return dict(
status="aborted",
files={task_file: os.stat(task_file).st_mtime, pulse_file: 0},
)
# if there is no pulse file
job_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".sh")
def has_job():
job_id_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".job_id"
)
return (
os.path.isfile(job_file)
and os.stat(job_file).st_mtime >= os.stat(task_file).st_mtime
and os.path.isfile(job_id_file)
and os.stat(job_id_file).st_mtime >= os.stat(job_file).st_mtime
)
if has_job():
try:
if status != "submitted":
tf.status = "submitted"
return dict(
status="submitted",
files={
task_file: os.stat(task_file).st_mtime,
job_file: os.stat(job_file).st_mtime,
pulse_file: 0,
},
)
except Exception:
# the pulse file could disappear when the job is completed.
if task_changed():
return check_task(task)
else:
raise
else:
# status not changed
try:
if (
hint
and hint["status"] in ("new", "pending")
and hint["files"][task_file] == os.stat(task_file).st_mtime
):
return {}
else:
return dict(
status=status,
files={task_file: os.stat(task_file).st_mtime, job_file: 0},
)
except Exception:
# the pulse file could disappear when the job is completed.
if task_changed():
return check_task(task)
else:
raise
def check_tasks(tasks, is_all: bool):
if not tasks:
return {}
cache_file: str = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", "status_cache.pickle"
)
#
status_cache: Dict = {}
if os.path.isfile(cache_file):
try:
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "rb") as cache:
status_cache = pickle.load(cache)
except Exception:
# if the cache file is corrupted, remove it. #1275
os.remove(cache_file)
# at most 20 threads
from multiprocessing.pool import ThreadPool as Pool
p = Pool(min(20, len(tasks)))
# the result can be {} for unchanged, or real results
raw_status = p.starmap(check_task, [(x, status_cache.get(x, {})) for x in tasks])
# if check all, we clear the cache and record all existing tasks
has_changes: bool = any(x for x in raw_status)
if has_changes:
if is_all:
status_cache = {
k: v if v else status_cache[k] for k, v in zip(tasks, raw_status)
}
else:
status_cache.update({k: v for k, v in zip(tasks, raw_status) if v})
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "wb") as cache:
pickle.dump(status_cache, cache)
return status_cache
def print_task_status(
tasks,
check_all=False,
verbosity: int = 1,
html: bool = False,
numeric_times=False,
age=None,
tags=None,
status=None,
):
# # verbose is ignored for now
# if not check_all and not tasks:
# from .signatures import WorkflowSignatures
# workflow_signatures = WorkflowSignatures()
# tasks = [
# x for x in workflow_signatures.tasks() if os.path.isfile(
# os.path.join(
# os.path.expanduser('~'), '.sos', 'tasks', x + '.task'))
# ]
import glob
all_tasks: List = []
if check_all:
tasks = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task")
)
all_tasks = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in tasks]
if not all_tasks:
return
else:
for t in tasks:
matched_names = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task")
)
matched = [
(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in matched_names
]
if not matched:
all_tasks.append((t, None))
else:
all_tasks.extend(matched)
if age is not None:
age = expand_time(age, default_unit="d")
if age > 0:
all_tasks = [x for x in all_tasks if time.time() - x[1] >= age]
else:
all_tasks = [x for x in all_tasks if time.time() - x[1] <= -age]
all_tasks = sorted(list(set(all_tasks)), key=lambda x: 0 if x[1] is None else x[1])
if tags:
all_tasks = [
x
for x in all_tasks
if TaskFile(x[0]).exists()
and any(y in tags for y in TaskFile(x[0]).tags.split())
]
if not all_tasks:
env.logger.debug("No matching tasks are identified.")
return
raw_status = check_tasks([x[0] for x in all_tasks], check_all)
obtained_status = [raw_status[x[0]]["status"] for x in all_tasks]
#
# automatically remove non-running tasks that are more than 30 days old
to_be_removed = [
t
for s, (t, d) in zip(obtained_status, all_tasks)
if d is not None and time.time() - d > 30 * 24 * 60 * 60 and s != "running"
]
if status:
all_tasks = [x for x, s in zip(all_tasks, obtained_status) if s in status]
obtained_status = [x for x in obtained_status if x in status]
#
from .monitor import summarizeExecution
if html:
# HTML output
from .utils import isPrimitive
import pprint
print('<table width="100%" class="resource_table">')
def row(th=None, td=None):
if td is None:
print(f'<tr><th align="right" width="30%">{th}</th><td></td></tr>')
elif th is None:
print(f'<tr><td colspan="2" align="left" width="30%">{td}</td></tr>')
else:
print(
f'<tr><th align="right" width="30%">{th}</th><td align="left"><div class="one_liner">{td}</div></td></tr>'
)
for s, (t, d) in zip(obtained_status, all_tasks):
tf = TaskFile(t)
ts, ct, st, dr = tf.tags_created_start_and_duration(formatted=True)
row("ID", t)
row("Status", s)
row("Created", ct)
if st:
row("Started", st)
if dr:
row("Duration", dr)
params = tf.params
row("Task")
if hasattr(params, "task_stack"):
row(
td=f'<pre style="text-align:left">{params.task_stack[0][1].task}</pre>'
)
else:
row(td=f'<pre style="text-align:left">{params.task}</pre>')
row("Tags")
row(td=f'<pre style="text-align:left">{tf.tags}</pre>')
if params.global_def:
row("Global")
row(td=f'<pre style="text-align:left">{params.global_def}</pre>')
# row('Environment')
global_runtime = tf.runtime["_runtime"]
job_vars = params.sos_dict
job_vars["_runtime"].update(global_runtime)
for k in sorted(job_vars.keys()):
v = job_vars[k]
if not k.startswith("__") and not k == "CONFIG":
if k == "_runtime":
for _k, _v in v.items():
if isPrimitive(_v) and _v not in (None, "", [], (), {}):
row(_k, _v)
elif isPrimitive(v) and v not in (None, "", [], (), {}):
row(
k, f'<pre style="text-align:left">{pprint.pformat(v)}</pre>'
)
pulse_content = ""
if tf.has_result():
if s not in ("pending", "submitted", "running"):
res = tf.result
if "start_time" in res and "end_time" in res:
row(
"Duration",
format_duration(res["end_time"] - res["start_time"]),
)
if "peak_cpu" in res:
row("Peak CPU", f'{res["peak_cpu"]*100} %')
if "peak_mem" in res:
row("Peak mem", pretty_size(res["peak_mem"]))
# this is a placeholder for the frontend to draw figure
row(td=f'<div id="res_{t}"></div>')
elif s == "running":
pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", t + ".pulse"
)
if os.path.isfile(pulse_file):
with open(pulse_file) as pulse:
pulse_content = pulse.read()
summary = summarizeExecution(t, pulse_content, status=s)
if summary:
# row('Execution')
for line in summary.split("\n"):
fields = line.split(None, 1)
if fields[0] == "task":
continue
row(fields[0], "" if fields[1] is None else fields[1])
# this is a placeholder for the frontend to draw figure
row(td=f'<div id="res_{t}"></div>')
if s not in ("pending", "submitted", "running"):
#
if tf.has_shell():
shell = tf.shell
numLines = shell.count("\n")
row("shell", f"{numLines} lines")
row(td=f'<small><pre style="text-align:left">{shell}</pre></small>')
if tf.has_stdout():
stdout = tf.stdout
numLines = stdout.count("\n")
row(
"stdout",
"(empty)"
if numLines == 0
else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}',
)
if numLines > 200:
stdout = "\n".join(stdout.splitlines()[-200:])
row(
td=f'<small><pre style="text-align:left">{stdout}</pre></small>'
)
if tf.has_stderr():
stderr = tf.stderr
numLines = stderr.count("\n")
row(
"stderr",
"(empty)"
if numLines == 0
else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}',
)
if numLines > 200:
stderr = "\n".join(stderr.splitlines()[-200:])
row(
td=f'<small><pre style="text-align:left">{stderr}</pre></small>'
)
elif s == "running":
files = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", t + ".*")
)
for f in sorted(
[
x
for x in files
if os.path.splitext(x)[-1] not in (".task", ".pulse")
]
):
numLines = linecount_of_file(f)
rhead = os.path.splitext(f)[-1]
if rhead == ".sh":
rhead = "shell"
elif rhead == ".job_id":
rhead = "job ID"
elif rhead == ".err":
rhead = "stderr"
elif rhead == ".out":
rhead = "stdout"
elif rhead == ".soserr":
rhead = "sos error"
elif rhead == ".sosout":
rhead = "sos output"
row(
rhead,
"(empty)"
if numLines == 0
else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}',
)
try:
row(
td=f'<small><pre style="text-align:left">{tail_of_file(f, 200, ansi2html=True)}</pre></small>'
)
except Exception:
row(
td='<small><pre style="text-align:left">ignored.</pre><small>'
)
print("</table>")
#
if not pulse_content:
return
# A sample of 400 point should be enough to show the change of resources
lines = sample_lines(pulse_content, 400).splitlines()
if len(lines) <= 2:
return
# read the pulse file and plot it
# time proc_cpu proc_mem children children_cpu children_mem
try:
etime = []
cpu = []
mem = []
for line in lines:
if line.startswith("#") or not line.strip():
continue
fields = line.split()
etime.append(float(fields[0]))
cpu.append(float(fields[1]) + float(fields[4]))
mem.append(float(fields[2]) / 1e6 + float(fields[5]) / 1e6)
if not etime:
return
except Exception:
return
#
print(
"""
<script>
function loadFiles(files, fn) {
if (!files.length) {
files = [];
}
var head = document.head || document.getElementsByTagName('head')[0];
function loadFile(index) {
if (files.length > index) {
if (files[index].endsWith('.css')) {
var fileref = document.createElement('link');
fileref.setAttribute("rel", "stylesheet");
fileref.setAttribute("type", "text/css");
fileref.setAttribute("href", files[index]);
} else {
var fileref = document.createElement('script');
fileref.setAttribute("type", "text/javascript");
fileref.setAttribute("src", files[index]);
}
console.log('Load ' + files[index]);
head.appendChild(fileref);
index = index + 1;
// Used to call a callback function
fileref.onload = function() {
loadFile(index);
}
} else if (fn) {
fn();
}
}
loadFile(0);
}
function plotResourcePlot_"""
+ t
+ """() {
// get the item
// parent element is a table cell, needs enlarge
document.getElementById(
"res_"""
+ t
+ """").parentElement.setAttribute("height", "300px;");
$("#res_"""
+ t
+ """").css("height", "300px");
$("#res_"""
+ t
+ """").css("width", "100%");
$("#res_"""
+ t
+ """").css("min-height", "300px");
var cpu = ["""
+ ",".join([f"[{x*1000},{y}]" for x, y in zip(etime, cpu)])
+ """];
var mem = ["""
+ ",".join([f"[{x*1000},{y}]" for x, y in zip(etime, mem)])
+ """];
$.plot('#res_"""
+ t
+ """', [{
data: cpu,
label: "CPU (%)"
},
{
data: mem,
label: "mem (M)",
yaxis: 2
}
], {
xaxes: [{
mode: "time"
}],
yaxes: [{
min: 0
}, {
position: "right",
tickFormatter: function(v, axis) {
return v.toFixed(1) + 'M';
}
}],
legend: {
position: "nw"
}
});
}
var dt = 100;
// the frontend might be notified before the table is inserted as results.
function showResourceFigure_"""
+ t
+ """() {
if ( $("#res_"""
+ t
+ """").length === 0) {
dt = dt * 1.5; // slow-down checks for datatable as time goes on;
setTimeout(showResourceFigure_"""
+ t
+ """, dt);
return;
} else {
$("#res_"""
+ t
+ """").css('width', "100%").css('height', "300px");
loadFiles(["http://www.flotcharts.org/flot/jquery.flot.js",
"http://www.flotcharts.org/flot/jquery.flot.time.js"
], plotResourcePlot_"""
+ t
+ """);
}
}
showResourceFigure_"""
+ t
+ """()
</script>
"""
)
elif verbosity == 0:
print("\n".join(obtained_status))
elif verbosity == 1:
for s, (t, d) in zip(obtained_status, all_tasks):
print(f"{t}\t{s}")
elif verbosity == 2:
tsize = 20
for s, (t, d) in zip(obtained_status, all_tasks):
ts, _, _, dr = TaskFile(t).tags_created_start_and_duration(
formatted=not numeric_times
)
tsize = max(tsize, len(ts))
print(f"{t}\t{ts.ljust(tsize)}\t{dr:<14}\t{s}")
elif verbosity == 3:
tsize = 20
for s, (t, d) in zip(obtained_status, all_tasks):
ts, ct, st, dr = TaskFile(t).tags_created_start_and_duration(
formatted=not numeric_times
)
tsize = max(tsize, len(ts))
print(f"{t}\t{ts.ljust(tsize)}\t{ct:<14}\t{st:<14}\t{dr:<14}\t{s}")
elif verbosity == 4:
import pprint
for s, (t, d) in zip(obtained_status, all_tasks):
tf = TaskFile(t)
if s == "missing":
print(f"{t}\t{s}\n")
continue
ts, ct, st, dr = tf.tags_created_start_and_duration(formatted=True)
print(f"{t}\t{s}\n")
print(f"{ct}")
if st:
print(f"{st}")
if dr:
print(f"{dr}")
params = tf.params
print("TASK:\n=====")
if hasattr(params, "task_stack"):
# show task of subtask
print(f"#1 of {len(params.task_stack)} subtasks:")
print(params.task_stack[0][1].task)
else:
print(params.task)
print("TAGS:\n=====")
print(tf.tags)
print()
if params.global_def:
print("GLOBAL:\n=======")
print(params.global_def)
print()
print("ENVIRONMENT:\n============")
global_runtime = tf.runtime["_runtime"]
job_vars = params.sos_dict
job_vars["_runtime"].update(global_runtime)
for k in sorted(job_vars.keys()):
v = job_vars[k]
print(f"{k:22}{short_repr(v) if verbosity == 3 else pprint.pformat(v)}")
print()
if tf.has_result():
if s not in ("pending", "submitted", "running"):
res = tf.result
print("EXECUTION STATS:\n================")
if "start_time" in res and "end_time" in res:
print(
f'Duration:\t{format_duration(res["end_time"] - res["start_time"])}'
)
if "peak_cpu" in res:
print(f'Peak CPU:\t{res["peak_cpu"]*100} %')
if "peak_mem" in res:
print(f'Peak mem:\t{pretty_size(res["peak_mem"])}')
elif s == "running":
# we have separate pulse, out and err files
pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", t + ".pulse"
)
if os.path.isfile(pulse_file):
print("EXECUTION STATS:\n================")
with open(pulse_file) as pulse:
print(summarizeExecution(t, pulse.read(), status=s))
# if there are other files such as job file, print them.
def show_file(task, exts):
if isinstance(exts, str):
exts = [exts]
for ext in exts:
f = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ext
)
if not os.path.isfile(f) or os.path.getsize(f) == 0:
return
print(
f'\n{os.path.basename(f)}:\n{"="*(len(os.path.basename(f))+1)}'
)
try:
with open(f) as fc:
print(fc.read())
except Exception:
print("Binary file")
if s == "running":
show_file(t, ".sh")
show_file(t, ".job_id")
show_file(t, [".sosout", ".out"])
show_file(t, [".soserr", ".err"])
elif s == "submitted":
show_file(t, ".sh")
show_file(t, ".job_id")
elif s != "pending":
if tf.has_shell():
print("\nexecution script:\n================\n" + tf.shell)
else:
show_file(t, ".sh")
if tf.has_stdout():
print("\nstandard output:\n================\n" + tf.stdout)
else:
show_file(t, [".sosout", ".out"])
if tf.has_stderr():
print("\nstandard error:\n================\n" + tf.stderr)
else:
show_file(t, [".soserr", ".err"])
# remove jobs that are older than 1 month
if to_be_removed:
purge_tasks(to_be_removed, verbosity=0)
def kill_tasks(tasks, tags=None):
#
import glob
from multiprocessing.pool import ThreadPool as Pool
if not tasks:
tasks = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task")
)
all_tasks = [os.path.basename(x)[:-5] for x in tasks]
else:
all_tasks = []
for t in tasks:
matched = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task")
)
matched = [os.path.basename(x)[:-5] for x in matched]
if not matched:
env.logger.warning(f"{t} does not match any existing task")
else:
all_tasks.extend(matched)
if tags:
all_tasks = [
x for x in all_tasks if any(x in tags for x in TaskFile(x).tags.split())
]
if not all_tasks:
env.logger.debug("No task to kill")
return
all_tasks = sorted(list(set(all_tasks)))
# at most 20 threads
p = Pool(min(20, len(all_tasks)))
killed = p.map(kill_task, all_tasks)
for s, t in zip(killed, all_tasks):
print(f"{t}\t{s}")
def kill_task(task):
tf = TaskFile(task)
status = tf.status
if status == "completed":
return "completed"
with open(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"), "a"
) as err:
err.write(f"Task {task} killed by sos kill command or task engine.")
tf.add_outputs()
# 1323
tf.add_result()
TaskFile(task).status = "aborted"
remove_task_files(
task, [".sosout", ".soserr", ".out", ".err", ".pulse", ".sh", ".job_id"]
)
return "aborted"
def purge_tasks(tasks, purge_all=None, age=None, status=None, tags=None, verbosity=2):
# verbose is ignored for now
# if not tasks and not purge_all:
# # if not --all and no task is specified, find all tasks in the current directory
# from .signatures import WorkflowSignatures
# workflow_signatures = WorkflowSignatures()
# tasks = [
# x for x in workflow_signatures.tasks() if os.path.isfile(
# os.path.join(
# os.path.expanduser('~'), '.sos', 'tasks', x + '.task'))
# ]
import glob
if tasks:
all_tasks = []
for t in tasks:
matched = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task")
)
matched = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in matched]
if not matched:
print(f"{t}\tmissing")
all_tasks.extend(matched)
elif purge_all or age or status or tags:
tasks = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task")
)
all_tasks = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in tasks]
else:
raise ValueError(
"Please specify either tasks or one or more of --all, --status, --tags--age"
)
#
if age is not None:
age = expand_time(age, default_unit="d")
if age > 0:
all_tasks = [x for x in all_tasks if time.time() - x[1] >= age]
else:
all_tasks = [x for x in all_tasks if time.time() - x[1] <= -age]
if status:
# at most 20 threads
task_status = check_tasks([x[0] for x in all_tasks], not tasks)
all_tasks = [x for x in all_tasks if task_status[x[0]]["status"] in status]
if tags:
all_tasks = [
x for x in all_tasks if any(x in tags for x in TaskFile(x[0]).tags.split())
]
#
# remoe all task files
all_tasks = set([x[0] for x in all_tasks])
if all_tasks:
#
# find all related files, including those in nested directories
from collections import defaultdict
to_be_removed = defaultdict(list)
for dirname, _, filelist in os.walk(
os.path.join(os.path.expanduser("~"), ".sos", "tasks")
):
for f in filelist:
ID = os.path.basename(f).split(".", 1)[0]
if ID in all_tasks:
to_be_removed[ID].append(os.path.join(dirname, f))
#
cache_file: str = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", "status_cache.pickle"
)
if os.path.isfile(cache_file):
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "rb") as cache:
status_cache = pickle.load(cache)
else:
status_cache = {}
for task in all_tasks:
removed = True
for f in to_be_removed[task]:
try:
if verbosity > 3:
if (
"TASK" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file("TASK", f"Remove {f}")
os.remove(f)
except Exception as e:
removed = False
if verbosity > 0:
env.logger.warning(f"Failed to purge task {task[0]}: {e}")
status_cache.pop(task, None)
if removed and verbosity > 1:
print(f"{task}\tpurged")
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "wb") as cache:
pickle.dump(status_cache, cache)
elif verbosity > 1:
env.logger.debug("No matching tasks to purge")
if purge_all and age is None and status is None and tags is None:
matched = glob.glob(os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*"))
count = 0
for f in matched:
if os.path.isdir(f):
import shutil
try:
shutil.rmtree(f)
count += 1
except Exception as e:
if verbosity > 0:
env.logger.warning(f"Failed to remove {f}: {e}")
else:
try:
os.remove(f)
count += 1
except Exception as e:
if verbosity > 0:
env.logger.warning(f"Failed to remove {e}")
if count > 0 and verbosity > 1:
env.logger.info(f"{count} other files and directories are removed.")
return ""
| gpl-3.0 | -1,311,812,755,292,229,400 | 36.3247 | 207 | 0.464547 | false |
bjodah/PyLaTeX | pylatex/base_classes/command.py | 1 | 10139 | # -*- coding: utf-8 -*-
"""
This module implements a class that implements a latex command.
This can be used directly or it can be inherited to make an easier interface
to it.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from .latex_object import LatexObject
from ..utils import dumps_list
class CommandBase(LatexObject):
"""A class that represents a LaTeX command.
The name of this class (when lowercased) will be the name of this command.
To supply a different name set the ``_latex_name`` attribute.
"""
def __init__(self, arguments=None, options=None, *,
extra_arguments=None):
r"""
Args
----
arguments: None, str, list or `~.Arguments`
The main arguments of the command.
options: None, str, list or `~.Options`
Options of the command. These are placed in front of the arguments.
extra_arguments: None, str, list or `~.Arguments`
Extra arguments for the command. When these are supplied the
options will be placed before them instead of before the normal
arguments. This allows for a way of having one or more arguments
before the options.
"""
self._set_parameters(arguments, 'arguments')
self._set_parameters(options, 'options')
if extra_arguments is None:
self.extra_arguments = None
else:
self._set_parameters(extra_arguments, 'extra_arguments')
super().__init__()
def _set_parameters(self, parameters, argument_type):
parameter_cls = Options if argument_type == 'options' else Arguments
if parameters is None:
parameters = parameter_cls()
elif not isinstance(parameters, parameter_cls):
parameters = parameter_cls(parameters)
# Pass on escaping to generated parameters
parameters._default_escape = self._default_escape
setattr(self, argument_type, parameters)
def __key(self):
"""Return a hashable key, representing the command.
Returns
-------
tuple
"""
return (self.latex_name, self.arguments, self.options,
self.extra_arguments)
def __eq__(self, other):
"""Compare two commands.
Args
----
other: `~.Command` instance
The command to compare this command to
Returns
-------
bool:
If the two instances are equal
"""
if isinstance(other, Command):
return self.__key() == other.__key()
return False
def __hash__(self):
"""Calculate the hash of a command.
Returns
-------
int:
The hash of the command
"""
return hash(self.__key())
def dumps(self):
"""Represent the command as a string in LaTeX syntax.
Returns
-------
str
The LaTeX formatted command
"""
options = self.options.dumps()
arguments = self.arguments.dumps()
if self.extra_arguments is None:
return r'\{command}{options}{arguments}'\
.format(command=self.latex_name, options=options,
arguments=arguments)
extra_arguments = self.extra_arguments.dumps()
return r'\{command}{arguments}{options}{extra_arguments}'\
.format(command=self.latex_name, arguments=arguments,
options=options, extra_arguments=extra_arguments)
class Command(CommandBase):
"""A class that represents a LaTeX command.
This class is meant for one-off commands. When a command of the same type
is used multiple times it is better to subclass `.CommandBase`.
"""
_repr_attributes_mapping = {'command': 'latex_name'}
def __init__(self, command=None, arguments=None, options=None, *,
extra_arguments=None, packages=None):
r"""
Args
----
command: str
Name of the command
arguments: None, str, list or `~.Arguments`
The main arguments of the command.
options: None, str, list or `~.Options`
Options of the command. These are placed in front of the arguments.
extra_arguments: None, str, list or `~.Arguments`
Extra arguments for the command. When these are supplied the
options will be placed before them instead of before the normal
arguments. This allows for a way of having one or more arguments
before the options.
packages: list of `~.Package` instances
A list of the packages that this command requires
Examples
--------
>>> Command('documentclass',
>>> options=Options('12pt', 'a4paper', 'twoside'),
>>> arguments='article').dumps()
'\\documentclass[12pt,a4paper,twoside]{article}'
>>> Command('com')
'\\com'
>>> Command('com', 'first')
'\\com{first}'
>>> Command('com', 'first', 'option')
'\\com[option]{first}'
>>> Command('com', 'first', 'option', 'second')
'\\com{first}[option]{second}'
"""
self.latex_name = command
if packages is not None:
self.packages |= packages
super().__init__(arguments, options, extra_arguments=extra_arguments)
class UnsafeCommand(Command):
"""An unsafe version of the `Command` class.
This class is meant for one-off commands that should not escape their
arguments and options. Use this command with care and only use this when
the arguments are hardcoded.
When an unsafe command of the same type is used multiple times it is better
to subclass `.CommandBase` and set the ``_default_escape`` attribute to
false.
"""
_default_escape = False
class Parameters(LatexObject):
"""The base class used by `~Options` and `~Arguments`.
This class should probably never be used on its own and inhereting from it
is only useful if a class like `~Options` or `~Arguments` is needed again.
"""
def __init__(self, *args, **kwargs):
r"""
Args
----
\*args:
Positional parameters
\*\*kwargs:
Keyword parameters
"""
if len(args) == 1 and not isinstance(args[0], str):
if hasattr(args[0], 'items') and len(kwargs) == 0:
kwargs = args[0] # do not just iterate over the dict keys
args = ()
elif hasattr(args[0], '__iter__'):
args = args[0]
self._positional_args = list(args)
self._key_value_args = dict(kwargs)
super().__init__()
def __key(self):
"""Generate a unique hashable key representing the parameter object.
Returns
-------
tuple
"""
return tuple(self._list_args_kwargs())
def __eq__(self, other):
"""Compare two parameters.
Returns
-------
bool
"""
return type(self) == type(other) and self.__key() == other.__key()
def __hash__(self):
"""Generate a hash of the parameters.
Returns
-------
int
"""
return hash(self.__key())
def _format_contents(self, prefix, separator, suffix):
"""Format the parameters.
The formatting is done using the three arguments suplied to this
function.
Arguments
---------
prefix: str
separator: str
suffix: str
Returns
-------
str
"""
params = self._list_args_kwargs()
if len(params) <= 0:
return ''
string = prefix + dumps_list(params, escape=self.escape,
token=separator) + suffix
return string
def _list_args_kwargs(self):
"""Make a list of strings representing al parameters.
Returns
-------
list
"""
params = []
params.extend(self._positional_args)
params.extend(['{k}={v}'.format(k=k, v=v) for k, v in
self._key_value_args.items()])
return params
class Options(Parameters):
"""A class implementing LaTex options for a command.
It supports normal positional parameters, as well as key-value pairs.
Options are the part of a command located between the square brackets
(``[]``). The positional parameters will be outputted in order and will
appear before the key-value-pairs. The key value-pairs won't be outputted
in the order in which they were entered
Examples
--------
>>> args = Options('a', 'b', 'c').dumps()
'[a,b,c]'
>>> Options('clip', width=50, height='25em', trim='1 2 3 4').dumps()
'[clip,trim=1 2 3 4,width=50,height=25em]'
"""
def dumps(self):
"""Represent the parameters as a string in LaTeX syntax.
This is to be appended to a command.
Returns
-------
str
"""
return self._format_contents('[', ',', ']')
class Arguments(Parameters):
"""A class implementing LaTex arguments for a command.
It supports normal positional parameters, as well as key-value pairs.
Arguments are the part of a command located between the curly braces
(``{}``). The positional parameters will be outputted in order and will
appear before the key-value-pairs. The key value-pairs won't be outputted
in the order in which they were entered
Examples
--------
>>> args = Arguments('a', 'b', 'c').dumps()
'{a}{b}{c}'
>>> args = Arguments('clip', width=50, height='25em').dumps()
>>> args.dumps()
'{clip}{width=50}{height=25em}'
"""
def dumps(self):
"""Represent the parameters as a string in LaTeX syntax.
This is to be appended to a command.
Returns
-------
str
"""
return self._format_contents('{', '}{', '}')
| mit | -7,111,534,106,801,539,000 | 27.085873 | 79 | 0.56909 | false |
sapientsalamander/Pi_Packet_Project | sender_files/python_files/lcd_packet_generation.py | 1 | 6475 | """Configures a scapy packet with user input from the lcd screen.
Uses the LCD_Input_Wrapper class to interact with the user. All functions
that use the screen directly require a lock object so that only one function
can write to the screen at a time; otherwise garbage may result.
configure_delay: gets the delay between packets from the user.
configure_layer: gets the field values for a layer from the user.
configure_packet: handles the creation of a scapy packet using the LCD screen.
generate_packet: constructs the packet layer by layer.
select_layer: gets the next layer from the user or signals to finish or cancel.
size_and_gen_packet: builds packet, modifying size if necessary.
"""
import sys
import time
import scapy.all as scapy
from shared_files import conversions
from shared_files import computations
import dictionaries as dicts
import data_sanitization as ds
def configure_delay(lcd, lcd_lock):
"""Configures the delay in seconds between packets to be sent.
Args:
lcd (LCD_Input_Wrapper object): the lcd screen to interact with
lcd_lock(RLock object): the lock associated with the screen
Returns:
tuple (int, int): the chosen delay in seconds and microseconds.
"""
with lcd_lock:
delay = lcd.get_input('Delay:\n%i%i%i.%i%i%i%i',
'%08.4f' % float(dicts.DEFAULTS['Other']
['delay']))
delay = float(delay[7:])
delay_seconds = int(delay)
delay_useconds = (delay * 1000000 - delay_seconds * 1000000)
return (delay_seconds, delay_useconds)
def configure_layer(lcd, lcd_lock, layer):
"""Configures a layer's fields as defined in the layer dictionary.
Args:
lcd (LCD_Input_Wrapper object): the lcd screen to interact with
lcd_lock(RLock object): the lock associated with the screen
layer(scapy class): scapy class of the layer to configure
Returns:
scapy layer object: scapy packet layer with the fields filled in.
"""
p_layer = layer()
for key in dicts.LCD_INPUT_FORMAT[layer].keys():
try:
default = ds.sanitize(dicts.DEFAULTS[layer.name][key],
dicts.SAN_LCD[layer][key])
except KeyError:
continue
val = lcd.get_input(key + ':\n' + dicts.LCD_INPUT_FORMAT[layer][key],
default)
field, value = val.replace('\n', '').split(':')
value = ds.sanitize(value, dicts.SAN_SCAPY[layer][key])
setattr(p_layer, field, value)
return p_layer
def configure_packet(lcd, lcd_lock):
"""Configures a packet layer by layer.
Args:
lcd (LCD_Input_Wrapper object): the lcd screen to interact with
lcd_lock(RLock object): the lock associated with the screen
Returns:
A generated packet, or None if generation was cancelled.
"""
packet = []
with lcd_lock:
while True:
layer = select_layer(lcd, lcd_lock)
if layer == 'Finish':
return size_and_gen_packet(lcd, lcd_lock, packet)
elif layer == 'Cancel':
return None
elif layer == 'Load Packet':
return computations.read_pcap_file(dicts.DEFAULTS['Other']
['pkt_file'])
elif layer == scapy.Raw:
packet.append(configure_Raw_layer(lcd, lcd_lock))
else:
packet.append(configure_layer(lcd, lcd_lock, layer))
# TODO Modify the way input is handled so this doesn't need to be separate
# TODO Related to the TCP Flags todo
def configure_Raw_layer(lcd, lcd_lock):
"""Configures a scapy Raw layer.
Args:
lcd (LCD_Input_Wrapper object): the lcd screen to interact with
lcd_lock(RLock object): the lock associated with the screen
Returns:
str: the chosen message.
"""
msg_options = ["Here's a message\nFinis",
'Hello, world!',
'-Insert message\n here-',
'This message is\nthe longest one.']
with lcd_lock:
msg = msg_options[lcd.get_input(msg_options)]
return scapy.Raw(msg)
def generate_packet(packet_layers):
"""Constructs a packet consisting of the provided layers.
The / operator is used to stack packet layers in Scapy. Together, the
stacked layers form the whole packet. The first layer is used to create the
packet object and subsequent layers are added onto the bottom.
Args:
packet_layers (list): a list of the configured layer objects.
Returns:
a fully built scapy packet.
"""
packet = packet_layers[0]
packet_layers = packet_layers[1:]
for layer in packet_layers:
packet = packet / layer
return packet
def select_layer(lcd, lcd_lock):
"""Selects the next layer to be configured.
Args:
lcd (LCD_Input_Wrapper object): the lcd screen to interact with
lcd_lock(RLock object): the lock associated with the screen
Returns:
scapy layer class of the selected layer.
"""
layer_class = dicts.LCD_INPUT_FORMAT.keys() + [scapy.Raw, 'Finish',
'Cancel', 'Load Packet']
with lcd_lock:
layer = lcd.get_input([key.name for key in
dicts.LCD_INPUT_FORMAT.keys()]
+ ['Raw', 'Finish', 'Cancel', 'Load Packet'])
return layer_class[layer]
def size_and_gen_packet(lcd, lcd_lock, packet):
"""Sizes an arbitrary packet, padding with null bytes or truncating.
Args:
lcd (LCD_Input_Wrapper object): the lcd screen to interact with
lcd_lock(RLock object): the lock associated with the screen
packet (list of scapy layer objects): the configured layers.
Returns:
a fully configured and built scapy packet sized to the user's request.
"""
with lcd_lock:
ptemp = generate_packet(packet)
size = int(lcd.get_input('Size(bytes):\n%i%i%i%i',
'%04d' % len(ptemp))[13:])
if size < len(ptemp):
lcd.clear()
lcd.message('Warning:\nTruncating Pkt')
time.sleep(2)
padsize = size - len(ptemp)
if padsize > 0:
packet.append(scapy.Raw('\x00'*padsize))
return scapy.Ether(str(generate_packet(packet))[:size])
| mit | -3,993,574,124,252,587,000 | 34.576923 | 79 | 0.613591 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_ischildoffiltermatch.py | 1 | 2492 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
from _matchesfilter import MatchesFilter
#-------------------------------------------------------------------------
#
# IsChildOfFilterMatch
#
#-------------------------------------------------------------------------
class IsChildOfFilterMatch(Rule):
"""Rule that checks for a person that is a child
of someone matched by a filter"""
labels = [ _('Filter name:') ]
name = _('Children of <filter> match')
category = _('Family filters')
description = _("Matches children of anybody matched by a filter")
def prepare(self,db):
self.db = db
self.map = set()
filt = MatchesFilter(self.list)
filt.requestprepare(db)
for person in db.iter_people():
if filt.apply(db, person):
self.init_list(person)
filt.requestreset()
def reset(self):
self.map.clear()
def apply(self,db,person):
return person.handle in self.map
def init_list(self,person):
if not person:
return
for fam_id in person.get_family_handle_list():
fam = self.db.get_family_from_handle(fam_id)
if fam:
self.map.update(child_ref.ref
for child_ref in fam.get_child_ref_list())
| gpl-2.0 | -4,666,658,151,900,319,000 | 32.226667 | 75 | 0.536116 | false |
cheery/spirthon | annotator.py | 1 | 5192 | # Annotator needs to find the least generic type for everything.
# To do that, it needs to hold a model of our types.
class Annotator(object):
def __init__(self, unit):
self.unit = unit
self.stack = []
def update(self, func):
for block in func:
for op in block.ops:
if not op.queued:
self.stack.append(op)
op.queued = True
def run(self):
while len(self.stack) > 0:
op = self.stack.pop()
op.queued = False
if op.name == 'call':
print 'annotate', op
elif op.name == 'return':
a = union(op.block.func.annotation.restype, op.args[0].annotation)
op.args[0].annotation = a
op.block.func.annotation.restype = a
op.annotation = a
print 'return update', op, a
# bit incorrect, should push uses of argument in too.
else:
assert False
# Should annotate here, if some of the fields change,
# should reschedule the used fields.
# SPIR-V annotation may need much simpler rules than specified here.
# Anything -annotation in translation unit most likely means
# that the translation failed.
class Anything(object):
specificity = 0
parametric = False
def __repr__(self):
return 'anything'
# The next most specific type after 'Unbound'.
class Constant(object):
def __init__(self, type, value):
self.type = type
self.value = value
def __repr__(self):
return 'Constant({}, {})'.format(self.type, self.value)
class FuncType(object):
def __init__(self, restype, argtypes):
self.restype = restype
self.argtypes = argtypes
def __getitem__(self, index):
return self.argtypes[index]
def __len__(self):
return len(self.argtypes)
def __repr__(self):
return '({}) ->'.format(', '.join(map(repr, self.argtypes)), self.restype)
class Type(object):
def __call__(self, parameter):
assert self.parametric
return Parametric(self, parameter)
def __init__(self, name, generic, parametric=False):
self.name = name
self.generic = generic
self.parametric = parametric
self.specificity = generic.specificity+1
def __repr__(self):
return self.name
class Parametric(object):
def __init__(self, func, parameter):
self.func = func
self.parameter = parameter
def __repr__(self):
return "{}({})".format(self.func, self.parameter)
# Types are treated as notation. They should be uniquely identified.
anything = Anything()
# not sure whether these belong here.
t_int = Type('int', anything)
t_uint = Type('uint', t_int)
t_bool = Type('bool', t_uint)
t_float = Type('float', anything)
t_vec2 = Type('vec2', anything, parametric=True)
t_vec3 = Type('vec3', anything, parametric=True)
t_vec4 = Type('vec4', anything, parametric=True)
# Thought about doing them this way, but realized types
# would require unification by their type hierarchies.
# # nullable = Type('nullable', anything, parametric=True)
# # instance = Type('instance', nullable, parametric=True)
# # t_null = Type('null', nullable)
# I don't want parametric types to leak from
# their parametric container.
def union(a, b):
c = union_raw(a, b)
while isinstance(c, Type) and c.parametric:
c = c.generic
return c
# But we still may use unification results which
# return parametric types.
def union_raw(a, b):
if a is b:
return a
if a is None:
return b
if b is None:
return a
if isinstance(a, Constant) and isinstance(b, Constant):
if a.value == b.value:
return a
else:
return union_raw(a.type, b.type)
elif isinstance(a, Constant):
return union_raw(a.type, b)
elif isinstance(b, Constant):
return union_raw(a, b.type)
if isinstance(a, Type) and isinstance(b, Type):
specificity = min(a.specificity, b.specificity)
while a.specificity > specificity:
a = a.generic
while b.specificity > specificity:
b = b.generic
while a is not b:
a = a.generic
b = b.generic
assert a is not None
return a
elif isinstance(a, Parametric) and isinstance(b, Parametric):
tp = union_raw(a.func, b.func)
if tp.parametric:
return Parametric(tp, union(a.parameter, b.parameter))
else:
return tp
elif isinstance(a, Parametric):
tp = union_raw(a.func, b)
if tp.parametric:
return Parametric(tp, a.parameter)
else:
return tp
elif isinstance(b, Parametric):
tp = union_raw(b.func, a)
if tp.parametric:
return Parametric(tp, b.parameter)
else:
return tp
elif isinstance(a, FuncType) and isinstance(b, FuncType) and len(a) == len(b):
return FuncType(
union(a.restype, b.restype),
[union(c, d) for c, d in zip(a, b)])
return anything
| mit | 7,691,338,691,917,519,000 | 30.08982 | 82 | 0.589176 | false |
tibor95/phatch-python2.7 | build/lib.linux-i686-2.7/phatch/core/pil.py | 1 | 26512 | # Phatch - Photo Batch Processor
# Copyright (C) 2007-2009 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Phatch recommends SPE (http://pythonide.stani.be) for editing python files.
# Follows PEP8
"""All PIL related issues."""
#FIXME:
# - info should be defined on layer level
# -> move call afterwards also to layer level
# -> adapt image inspector
import datetime
import os
import re
import types
from PIL import Image
#todo make this lazy
from lib import formField
from lib import imtools
from lib import metadata
from lib import openImage
from lib import system
from lib import thumbnail
from lib import unicoding
from lib.reverse_translation import _t
from lib.formField import RE_FILE_IN, RE_FILE_OUT
from ct import TITLE
from config import USER_BIN_PATH
#from other import EXIF
system.set_bin_paths([USER_BIN_PATH])
try:
import pyexiv2
from lib import _pyexiv2 as exif
except:
pyexiv2 = None
exif = False
WWW_PYEXIV2 = 'http://tilloy.net/dev/pyexiv2/'
NEEDS_PYEXIV2 = _('pyexiv2 needs to be installed') + ' (%s)' % WWW_PYEXIV2
CONVERTED_MODE = \
_('%(mode)s has been converted to %(mode_copy)s to save as %(format)s.')
DYNAMIC_VARS = set(('width', 'height', 'size', 'mode', 'transparency'))
IMAGE_DEFAULT_DPI = 72
SEPARATOR = '_' # should be same as in core.translations
MONTHS = (_t('January'), _t('February'), _t('March'), _t('April'),
_t('May'), _t('June'), _t('July'), _t('August'), _t('September'),
_t('October'), _t('November'), _t('December'))
WEEKDAYS = (_t('Monday'), _t('Tuesday'), _t('Wednesday'), _t('Thursday'),
_t('Friday'), _t('Saturday'), _t('Sunday'))
DATETIME_KEYS = ['year', 'month', 'day', 'hour', 'minute', 'second']
re_DATETIME = re.compile(
'(?P<year>\d{4})[-:](?P<month>\d{2})[-:](?P<day>\d{2}) '
'(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})')
re_TAG = re.compile('(Pil|Exif|Iptc|Pexif|Zexif)([.]\w+)+')
re_KEY = re.compile('(#*)((\w|[.])*$|[$])')
TRANSPARENCY_ERROR = _('Only palette images have transparency.')
IMAGE_READ_EXTENSIONS = set(formField.IMAGE_READ_EXTENSIONS)\
.union(openImage.WITHOUT_PIL.extensions)
IMAGE_READ_EXTENSIONS = list(IMAGE_READ_EXTENSIONS)
IMAGE_READ_EXTENSIONS.sort()
IMAGE_EXTENSIONS = [ext for ext in IMAGE_READ_EXTENSIONS
if ext in formField.IMAGE_WRITE_EXTENSIONS]
BASE_VARS = ['dpi', 'compression', 'filename', 'format',
'orientation', 'path', 'transparency', 'type']
def split_data(d):
"""Provide attribute access to the variables.
:param d: a dumped metadata dictionary
:type d: dict
>>> d = {'date': '2008-11-27 13:54:33', 'tuple': (1, 2)}
"""
value = d.values()[0]
#tuples or list
if type(value) in (types.ListType, types.TupleType):
if len(value) > 1:
for k, v in d.items():
for i, x in enumerate(v):
d['%s.%d' % (k, i)] = v[i]
return
#datetime strings
done = False
for k, v in d.items():
if type(v) in types.StringTypes:
dt = re_DATETIME.match(v)
if dt:
for key in DATETIME_KEYS:
d['%s.%s' % (k, key)] = dt.group(key)
done = True
if done:
return
#date time values
if type(value) == datetime.datetime:
for k, v in d.items():
for key in DATETIME_KEYS:
d['%s.%s' % (k, key)] = getattr(v, key)
def fix_EXIF(tag):
if not tag.startswith('EXIF'):
tag = 'EXIF.' + tag
return tag.replace(' ', SEPARATOR)
def image_to_dict(filename, im=None):
folder, name = os.path.split(filename)
d = {'path': filename, 'filename': name}
if im:
width, height = im.size
d['width'] = width
d['height'] = height
d['mode'] = im.mode
return d
def get_photo(filename):
return Photo(metadata.InfoExtract(filename, vars=BASE_VARS).dump())
def split_vars_static_dynamic(vars):
vars = set(vars)
static = vars.difference(DYNAMIC_VARS)
dynamic = vars.intersection(DYNAMIC_VARS)
return list(static), list(dynamic)
class NotWritableTagError(Exception):
pass
class InfoPhoto(dict):
def __init__(self, info, info_to_dump, get_pil, image=None):
"""The ``get_pil`` parameter is necessary for tags as width,
height, size and mode.
:param info: pil, pyexiv2, ... tag, value info
:type info: dict
:param get_pil: method to retrieve the pil image
:type get_pil: callable
"""
#parameters
self.get_pil = get_pil
path = info['path']
#sources
if image == None:
image = get_pil()
sources = {
metadata.InfoPil: image,
metadata.InfoPexif: image,
metadata.InfoZexif: image}
#check format -> readable/writable metadata with pyexiv2
if exif and exif.is_readable_format(image.format):
self.pyexiv2 = pyexiv2.ImageMetadata(path)
self.pyexiv2.read()
self.writable_exif = exif.is_writable_format_exif(image.format)
self.writable_iptc = exif.is_writable_format_exif(image.format)
self.writable = self.writable_exif or self.writable_iptc
if self.writable_exif:
self.pyexiv2['Exif.Image.Software'] = TITLE
sources[metadata.InfoExif] = sources[metadata.InfoIptc] =\
self.pyexiv2
else:
self.pyexiv2 = None
self.writable = self.writable_exif = self.writable_iptc = False
#retrieve dump info
try:
info_dumped = info_to_dump.open(path, sources).dump(free=True)
except Exception, details:
reason = unicoding.exception_to_unicode(details)
#log error details
message = u'%s:%s:\n%s' % (_('Unable extract variables from file'),
path, reason)
raise Exception(message)
self.update(info, explicit=False)
self.update(info_dumped, explicit=False)
#private vars
self._original_size = image.size # to compare if changed later
self._dirty = False
self._log = ''
self._flushed = True
def close(self):
"""Remove circular reference."""
del self.get_pil
def is_dirty(self):
"""The photo can become dirty in two ways:
* new metadata has been set
* the image has changes size
In case the image size has changed it will update the
``Exif.Photo.PixelXDimension`` and ``Exif.Photo.PixelYimension``
accordingly.
:returns: True, if dirty
:rtype: boolean
"""
if self._dirty:
return True
self.update_size()
return self._dirty
def set(self, tag, value):
super(InfoPhoto, self).__setitem__(tag, value)
def update(self, d, explicit=True):
"""Do this explicitly so __setitem__ gets called."""
if explicit:
for key, value in d.items():
self[key] = value
else:
super(InfoPhoto, self).update(d)
def update_size(self):
"""If the image is exif writable and if the size has changed,
it will update ``Exif.Photo.PixelXDimension`` and
``Exif.Photo.PixelYimension``.
"""
if not self.writable_exif:
return
size = width, height = self.get_pil().size
if self._original_size != size:
self.pyexiv2['Exif.Photo.PixelXDimension'] = width
self.pyexiv2['Exif.Photo.PixelYDimension'] = height
self._dirty = True
def __getitem__(self, tag):
"""If a dynamic tag (size, mode) is requested, it will
extract it from the image. Otherwise get it normally.
:param tag: metadata tag
:type tag: string
:returns: value
"""
if tag in DYNAMIC_VARS:
#this can maybe be optimized if necessary
if tag == 'size':
return self.get_pil().size
elif tag in ('width', 'Exif_Photo_PixelXDimension'):
return self.get_pil().size[0]
elif tag in ('height', 'Exif_Photo_PixelYDimension'):
return self.get_pil().size[1]
elif tag == 'mode':
return self.get_pil().mode
elif tag == 'transparency':
self.assert_transparency()
return self.get_pil().info['transparency']
else:
raise KeyError('Fatal Error: tag "%s" is not dynamic?!' % tag)
elif tag in metadata.ORIENTATION_TAGS:
#give priority to writable tag
if 'Exif_Image_Orientation' in self:
return super(InfoPhoto, self).\
__getitem__('Exif_Image_Orientation')
else:
return super(InfoPhoto, self).__getitem__(tag)
else:
return super(InfoPhoto, self).__getitem__(tag)
def __contains__(self, tag):
"""
"""
if super(InfoPhoto, self).__contains__(tag):
return True
if tag == 'transparency' and tag in self.get_pil().info:
return self['mode'] == 'P'
return tag in DYNAMIC_VARS
def __delitem__(self, tag):
"""Delete a tag after :method:`InfoPhoto.assert_writable`.
:param tag: metadata tag
:type tag: string
"""
self.assert_writable(tag)
if tag == 'transparency':
self.assert_transparency()
del self.get_pil().info[tag]
return
pyexiv2_tag = self._fix(tag) # pexiv2 demands str
# a bit clumsy but pyexiv2 does not support get or in
try:
pyexiv2_tag_value = self.pyexiv2[pyexiv2_tag]
except KeyError:
pyexiv2_tag_value = None
if self.pyexiv2 and pyexiv2_tag_value != None:
self.pyexiv2[pyexiv2_tag] = None
if tag in self:
super(InfoPhoto, self).__delitem__(tag)
def __setitem__(self, tag, value):
"""Delete a tag after :method:`InfoPhoto.assert_writable`.
:param tag: metadata tag
:type tag: string
:param value: new value
"""
self.assert_writable(tag)
if tag in metadata.ORIENTATION_TAGS:
if self.pyexiv2 is None and value == 1:
#allow to ignore this (e.g. transpose method)
return
#redirect to writable tag
tag = 'Exif_Image_Orientation'
if tag in DYNAMIC_VARS:
if tag == 'transparency':
self.assert_transparency()
self.get_pil().info['transparency'] = value
else:
raise KeyError(_('Tag "%s" is read only.') % tag)
else:
super(InfoPhoto, self).__setitem__(tag, value)
if metadata.RE_PYEXIV2_TAG_EDITABLE.match(tag):
try:
self.pyexiv2[self._fix(tag)] = value
except Exception, message:
raise KeyError('%s:\n%s'
% (_('Impossible to write tag "%s"') % tag, message))
self._dirty = True
self._flushed = False
def assert_transparency(self):
"""Raise a ``KeyError`` for ``'transparency'`` when ``image.mode``
is not ``'P'``.
"""
if self['mode'] != 'P':
raise KeyError(TRANSPARENCY_ERROR)
def log(self, message):
"""Log a message
:param message: message
:type message: string
"""
self._log += message + '\n'
def clear_log(self):
"""Clears the log."""
self._log = ''
def get_log(self):
"""Get the log contents.
:returns: the log
:rtype: string
"""
return self._log
@classmethod
def _fix(cls, tag):
"""Phatch uses ``_`` as a separator while pyexiv2 uses a
dot (``.``). Moreover pyexiv2 demands str.
>>> InfoPhoto._fix('Exif_Photo_PixelXDimension')
'Exif.Photo.PixelXDimension'
:param tag: tag in info notation
:type tag: string
:returns: tag in pyexiv2 notation
:rtype: string
"""
return str(tag.replace('_', '.'))
def assert_writable(self, tag):
"""Assert that the tag is writable. This can raise an
``NotWritableTagError`` because of several reasons:
* Tag might be read-only (e.g. Exif_Photo_PixelXDimension)
* Tag might be not Exif or Iptc
* Image file format might not allow writing of this tag
:param tag: tag name
:type tag: string
:returns: True, if writable
:rtype: bool
"""
if not metadata.is_writable_tag(tag):
raise NotWritableTagError(_('Tag "%s" is not writable.') % tag)
if not ((self.writable_exif and tag.startswith('Exif'))
or (self.writable_iptc and tag.startswith('Iptc'))
or metadata.is_writeable_not_exif_tag(tag, self['mode'])):
raise NotWritableTagError(
_('Format %(format)s does not support overwriting "%(tag)s".')\
% {'format': self['format'], 'tag': tag})
def save(self, target, target_format=None, thumbdata=None):
"""
:param target: target filename
:type target: string
:param target_format: target format e.g. obtained by PIL
:type target_format: string
:param thumbdata: new thumbnail (eg with StringIO, see :mod:`imtools`)
:type thumbdata: string
"""
if not exif:
raise ImportError(NEEDS_PYEXIV2)
if not pyexiv2:
#FIXME: when starting with a not exif image png
#but save as exif jpg
return
if target == self['path']:
if self.is_dirty() and not self._flushed: # includes update_size
warnings = exif.flush(self.pyexiv2, thumbdata)
self._flushed = True
else:
self.update_size()
warnings = exif.write_metadata(self.pyexiv2, target,
self['format'], target_format, thumbdata)
return warnings
class Photo:
"""Use :func:`get_photo` to obtain a photo from a filename."""
def __init__(self, info, info_to_dump=None):
self.modify_date = None # for time shift action
self.report_files = [] # for reports
self._exif_transposition_reverse = None
#layer
path = info['path']
name = self.current_layer_name = _t('background')
layer = Layer(path, load=True)
self.layers = {name: layer}
#info
self.info = InfoPhoto(info, info_to_dump, self.get_flattened_image,
layer.image)
self.rotate_exif()
def close(self):
"""Remove circular references."""
self.info.close()
del self.info
def log(self, message):
self.info.log(message)
def clear_log(self):
self.info.clear_log()
def get_log(self):
return self.info.get_log()
def get_filename(self, folder, filename, typ):
return os.path.join(folder, '%s.%s' % (filename, typ))\
.replace('<', '%(').replace('>', ')s') % self.__dict__
#---layers
def get_flattened_image(self):
return self.get_layer().image.copy()
def get_layer(self, name=None):
if name is None:
name = self.current_layer_name
return self.layers[name]
def get_thumb(self, size=thumbnail.SIZE):
return thumbnail.thumbnail(self.get_flattened_image(),
size=size, checkboard=True)
def set_layer(self, layer, name=None):
if name is None:
name = self.current_layer_name
self.layers[name] = layer
#---image operations affecting all layers
def save(self, filename, format=None, save_metadata=True, **options):
"""Saves a flattened image"""
#todo: flatten layers
if format is None:
format = imtools.get_format_filename(filename)
image = self.get_flattened_image()
image_copy = imtools.convert_save_mode_by_format(image, format)
if image_copy.mode == 'P' and 'transparency' in image_copy.info:
options['transparency'] = image_copy.info['transparency']
if image_copy.mode != image.mode:
self.log(CONVERTED_MODE % {'mode': image.mode,
'mode_copy': image_copy.mode, 'format': format} + '\n')
#reverse exif previously applied exif orientation
#exif thumbnails are usually within 160x160
#desktop thumbnails size is defined by thumbnail.py and is
#probably 128x128
save_metadata = save_metadata and exif \
and exif.is_writable_format(format)
if save_metadata:
# Exif thumbnails are stored in their own format (eg JPG)
thumb = thumbnail.thumbnail(image_copy, (160, 160))
thumbdata = imtools.get_format_data(thumb, format)
image_copy = imtools.transpose(image_copy,
self._exif_transposition_reverse)
#thumb = thumbnail.thumbnail(thumb, copy=False)
else:
thumbdata = None
#postpone thumbnail production to see later if it is needed
thumb = None
if 'compression.tif' in options:
compression = options['compression.tif']
del options['compression.tif']
else:
compression = 'none'
try:
if compression.lower() in ['raw', 'none']:
#save image with pil
file_mode = imtools.save_check_mode(image_copy, filename,
**options)
#did PIL silently change the image mode?
if file_mode:
#PIL did change the image mode without throwing
# an exception.
#Do not save thumbnails in this case
# as they won't be reliable.
if image_copy.mode.endswith('A') and \
not file_mode.endswith('A'):
#force RGBA when transparency gets lost
#eg saving TIFF format with LA mode
mode = image_copy.mode
image_copy = image_copy.convert('RGBA')
file_mode = imtools.save_check_mode(image_copy,
filename, **options)
if file_mode:
# RGBA failed
self.log(CONVERTED_MODE % {'mode': mode,
'mode_copy': file_mode, 'format': format} \
+ '\n')
else:
# RGBA succeeded
self.log(CONVERTED_MODE % {'mode': mode,
'mode_copy': 'RGBA', 'format': format} + '\n')
else:
self.log(CONVERTED_MODE % {'mode': image_copy.mode,
'mode_copy': file_mode, 'format': format} + '\n')
elif thumbnail.is_needed(image_copy, format):
# save thumbnail in system cache if needed
if thumb is None:
thumb = image_copy
thumb_info = {
'width': image.size[0],
'height': image.size[1]}
thumbnail.save_to_cache(filename, thumb,
thumb_info=thumb_info, **options)
# copy metadata if needed (problematic for tiff)
# FIXME: if metdata corrupts the image, there should be
# no thumbnail
if save_metadata:
self.info.save(filename, thumbdata=thumbdata)
else:
# save with pil>libtiff
openImage.check_libtiff(compression)
self.log(openImage.save_libtiff(image_copy, filename,
compression=compression, **options))
if self.modify_date:
# Update file access and modification date
os.utime(filename, (self.modify_date, self.modify_date))
self.append_to_report(filename, image_copy)
except IOError, message:
# clean up corrupted drawing
if os.path.exists(filename):
os.remove(filename)
raise IOError(message)
#update info
if hasattr(options, 'dpi'):
self.info['dpi'] = options['dpi'][0]
def append_to_report(self, filename, image=None):
report = image_to_dict(filename, image)
report[_t('source')] = self.info['path']
self.report_files.append(report)
def convert(self, mode, *args, **keyw):
"""Converts all layers to a different mode."""
for layer in self.layers.values():
if layer.image.mode == mode:
continue
if mode == 'P' and imtools.has_alpha(layer.image):
layer.image = imtools.convert(layer.image, mode, *args, **keyw)
self.info['transparency'] = 255
elif mode == 'P':
layer.image = imtools.convert(layer.image, mode, *args, **keyw)
self.info['transparency'] = None
else:
layer.image = imtools.convert(layer.image, mode, *args, **keyw)
def safe_mode(self, format):
"""Convert the photo into a safe mode for this specific format"""
layer = self.get_layer()
layer.image = imtools.convert_save_mode_by_format(layer.image, format)
def resize(self, size, method):
"""Resizes all layers to a different size"""
size = (max(1, size[0]), max(1, size[1]))
for layer in self.layers.values():
layer.image = layer.image.resize(size, method)
def rotate_exif(self, reverse=False):
layers = self.layers.values()
if reverse:
transposition = self._exif_transposition_reverse
self._exif_transposition_reverse = ()
else:
transposition, self._exif_transposition_reverse = \
imtools.get_exif_transposition(self.info['orientation'])
if transposition:
for layer in layers:
layer.image = imtools.transpose(layer.image, transposition)
#---pil
def apply_pil(self, function, *arg, **keyw):
for layer in self.layers.values():
layer.apply_pil(function, *arg, **keyw)
#---external
def call(self, command, check_exe=True, shell=None, size=None,
unlock=False, output_filename=None, mode=None):
if shell is None:
shell = not system.WINDOWS
#get command line
info = self.info
layer = self.get_layer()
image = layer.image
if mode != image.mode:
image = imtools.convert(image, mode)
if size != None and size[0] < image.size[0]:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
#loop over input -> save to temp files
temp_files = []
done = []
error = None
for match in RE_FILE_IN.finditer(command):
source = match.group()
if not(source in done):
ext = match.group(1)
target = system.TempFile(ext)
try:
imtools.save_safely(image, target.path)
except Exception, error:
pass
temp_files.append((source, target))
done.append(source)
if error:
break
# check if we have a file_in
# clean up in case of error
if error:
for source, target in temp_files:
target.close() # os.remove(target)
raise error
# loop over output
output = None
for index, match in \
enumerate(RE_FILE_OUT.finditer(command)):
if index > 0:
# only 1 is allowed
raise Exception('Only one file_out.* is allowed.')
source = match.group()
ext = match.group(1)
output = system.TempFile(ext, output_filename)
command = command.replace(source, system.fix_quotes(output.path))
# tweak command line
for source, target in temp_files:
command = command.replace(source, system.fix_quotes(target.path))
# execute
system.call(command, shell=shell)
# give back filename
if output and not os.path.exists(output.path):
error = True
else:
error = False
for source, target in temp_files:
target.close() # os.remove(target)
if error:
raise Exception(
_('Command did not produce an output image:\n%s')\
% command)
if output:
layer.open(output.path)
# DO NOT REMOVE image.load() or output.close will fail on windows
layer.image.load()
output.close()
class Layer:
def __init__(self, filename, position=(0, 0), load=True):
self.open(filename)
self.position = position
# VERY IMPORTANT
# do not remove load option, otherwise openImage.py won't work
# correctly with group4 tiff compression
if load:
self.image.load()
def open(self, uri):
self.image = openImage.open(uri)
if self.image.mode in ['F', 'I']:
# Phatch doesn't support F and I
# FIXME: It will better to add some sort of warning here
self.image = self.image.convert('L')
def apply_pil(self, function, *arg, **keyw):
self.image = function(self.image, *arg, **keyw)
| gpl-3.0 | 3,443,809,914,918,861,000 | 35.021739 | 79 | 0.55688 | false |
recipy/recipy | integration_test/regexps.py | 1 | 4118 | """
recipy-related regexps.
"""
# Copyright (c) 2016 University of Edinburgh.
def get_usage():
"""
Get regular expressions for usage information printed to console.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"Usage:\n"]
def get_version():
"""
Get regular expressions for version information printed to
console.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"recipy v[0-9]\.[0-9]\.[0-9]"]
def get_help():
"""
Get regular expressions for help information printed to
console.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"recipy - a frictionless provenance tool for Python\n",
r"Usage:\n",
r"Options:\n"]
def get_debug_recipy():
"""
Get regular expressions for debug information printed to
console.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"Command-line arguments: \n",
r"DB path: .*\n",
r"Full config file \(as interpreted\):\n",
r"----------------------------------\n",
r"----------------------------------\n"]
def get_db_empty():
"""
Get regular expressions for information printed to console, when
recipy database is empty.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"Database is empty"]
def get_stdout(log):
"""
Get regular expressions for recipy log information printed to
console.
:param log: Recipy database log
:type log: dict
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"Run ID: " + log["unique_id"] + "\n",
r"Created by " + log["author"] + " on .*\n",
r"Ran " + log["script"].replace("\\", "\\\\") +
" using .*\n",
r"Git: commit " + log["gitcommit"] + ", in repo " +
log["gitrepo"].replace("\\", "\\\\") +
", with origin " + str(log["gitorigin"]) + ".*\n",
r"Environment: .*\n",
r"Libraries: " + ", ".join(log["libraries"]) + "\n",
r"Inputs:\n",
log["inputs"][0][0].replace("\\", "\\\\"),
log["inputs"][0][1],
log["inputs"][0][0].replace("\\", "\\\\") +
r" \(" + log["inputs"][0][1] + r"\)\n",
r"Outputs:\n",
log["outputs"][0][0].replace("\\", "\\\\") +
r" \(" + log["outputs"][0][1] + r"\)\n"]
def get_diff(script):
"""
Get regular expressions for recipy "diff"-related log information
printed to console and recorded in database.
This function assumes that the only change made to a file was the
addition of a line with text "pass".
:param script: script for which diff information was logged
:type script: str or unicode
:returns: regular expressions
:rtype: list of str or unicode
"""
return [
r"---.*" + script + "\n",
r"\+\+\+.*" + script + "\n",
r"@@.*\n",
r"\+pass.*\n"]
def get_no_results():
"""
Get regular expressions for information printed to console, when
there are no results for a search.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"No results found"]
def get_debug():
"""
Get regular expressions for debug information printed to console.
This function assumes that the script invokes an input function and
an output function.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"recipy run inserted",
r"Patching",
r"Patching input function",
r"Patching output function",
r"Input from",
r"Output to",
r"recipy run complete"]
def get_filediffs():
"""
Get regular expressions for recipy "filediffs"-related log
information recorded in database.
:returns: regular expressions
:rtype: list of str or unicode
"""
return [r"before this run", r"after this run"]
| apache-2.0 | -5,645,377,528,694,980,000 | 25.74026 | 71 | 0.554638 | false |
klpdotorg/dubdubdub | apps/ivrs/migrations/0026_auto_20170101_2313.py | 1 | 1139 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
State = apps.get_model("ivrs", "State")
User = apps.get_model("users", "User")
states = State.objects.all()
for state in states:
# Trimming the starting 0. Have checked to make sure
# all telephones on the State table have 11 digits
# including the 0 at the beginning.
telephone = state.telephone[1:]
try:
user = User.objects.get(mobile_no=telephone)
state.user = user
except:
pass
state.telephone = telephone
state.save()
def reverse_func(apps, schema_editor):
State = apps.get_model("ivrs", "State")
states = State.objects.all()
for state in states:
telephone = "0" + state.telephone
state.telephone = telephone
state.user = None
state.save()
class Migration(migrations.Migration):
dependencies = [
('ivrs', '0025_state_user'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| mit | 7,417,438,785,882,545,000 | 24.886364 | 60 | 0.604917 | false |
google/starthinker | dags/test_dag.py | 1 | 4439 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Test Script
Used by tests.
- This should be called by the tests scripts only.
- When run will generate a say hello log.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {}
RECIPE = {
'setup': {
'day': [
'Mon',
'Tue',
'Wed',
'Thu',
'Fri',
'Sat',
'Sun'
],
'hour': [
1,
3,
23
]
},
'tasks': [
{
'hello': {
'auth': 'user',
'hour': [
1
],
'say': 'Hello At 1',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
3
],
'say': 'Hello At 3',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
],
'say': 'Hello Manual',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
23
],
'say': 'Hello At 23 Sleep',
'sleep': 30
}
},
{
'hello': {
'auth': 'user',
'say': 'Hello At Anytime',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
1,
3,
23
],
'say': 'Hello At 1, 3, 23',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
3
],
'say': 'Hello At 3 Reordered',
'sleep': 0
}
}
]
}
dag_maker = DAG_Factory('test', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| apache-2.0 | -252,957,889,067,501,380 | 24.079096 | 145 | 0.514305 | false |
DongjunLee/kino-bot | kino/slack/plot.py | 1 | 2684 | from matplotlib import pyplot as plt
import matplotlib.dates as dt
import seaborn
seaborn.set()
import datetime
class Plot(object):
def __init__(self):
pass
def make_bar(
x,
y,
f_name,
title=None,
legend=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None,
):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
if x_ticks is not None:
plt.xticks(x, x_ticks)
if y_ticks is not None:
plt.yticks(y_ticks)
plt.bar(x, y, align="center")
if legend is not None:
plt.legend(legend)
plt.savefig(f_name)
plt.close(fig)
def make_line(
x,
y,
f_name,
title=None,
legend=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None,
):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
if x_ticks is not None:
plt.xticks(x, x_ticks)
if y_ticks is not None:
plt.yticks(y_ticks)
if isinstance(y[0], list):
for data in y:
plt.plot(x, data)
else:
plt.plot(x, y)
if legend is not None:
plt.legend(legend)
plt.savefig(f_name)
plt.close(fig)
def make_efficiency_date(
total_data,
avg_data,
f_name,
title=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None,
):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
v_date = []
v_val = []
for data in total_data:
dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M"))
to_int = round(float(data[1]))
plt.plot_date(dates, data[1], color=plt.cm.brg(to_int))
for data in avg_data:
dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M"))
v_date.append(dates)
v_val.append(data[1])
plt.plot_date(v_date, v_val, "^y-", label="Average")
plt.legend()
plt.savefig(f_name)
plt.close(fig)
| mit | 2,999,971,998,501,694,500 | 22.137931 | 77 | 0.492921 | false |
sylvchev/mdla | examples/example_benchmark_performance.py | 1 | 6309 | """Benchmarking dictionary learning algorithms on random dataset"""
from multiprocessing import cpu_count
from time import time
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from numpy.linalg import norm
from numpy.random import permutation, rand, randint, randn
from mdla import MiniBatchMultivariateDictLearning, MultivariateDictLearning
# TODO:
# investigate perf break from pydico
def benchmarking_plot(figname, pst, plot_sep, minibatchRange, mprocessRange):
_ = plt.figure(figsize=(15, 10))
bar_width = 0.35
_ = plt.bar(
np.array([0]),
pst[0],
bar_width,
color="b",
label="Online, no multiprocessing (baseline)",
)
index = [0]
for i in range(1, plot_sep[1]):
if i == 1:
_ = plt.bar(
np.array([i + 1]),
pst[i],
bar_width,
color="r",
label="Online with minibatch",
)
else:
_ = plt.bar(np.array([i + 1]), pst[i], bar_width, color="r")
index.append(i + 1)
for _ in range(plot_sep[1], plot_sep[2]):
if i == plot_sep[1]:
_ = plt.bar(
np.array([i + 2]),
pst[i],
bar_width,
label="Batch with multiprocessing",
color="magenta",
)
else:
_ = plt.bar(np.array([i + 2]), pst[i], bar_width, color="magenta")
index.append(i + 2)
plt.ylabel("Time per iteration (s)")
plt.title("Processing time for online and batch processing")
tick = [""]
tick.extend(map(str, minibatchRange))
tick.extend(map(str, mprocessRange))
plt.xticks(index, tuple(tick))
plt.legend()
plt.savefig(figname + ".png")
def _generate_testbed(
kernel_init_len,
n_nonzero_coefs,
n_kernels,
n_samples=10,
n_features=5,
n_dims=3,
snr=1000,
):
"""Generate a dataset from a random dictionary
Generate a random dictionary and a dataset, where samples are combination of
n_nonzero_coefs dictionary atoms. Noise is added, based on SNR value, with
1000 indicated that no noise should be added.
Return the dictionary, the dataset and an array indicated how atoms are combined
to obtain each sample
"""
print("Dictionary sampled from uniform distribution")
dico = [rand(kernel_init_len, n_dims) for i in range(n_kernels)]
for i in range(len(dico)):
dico[i] /= norm(dico[i], "fro")
signals = list()
decomposition = list()
for _ in range(n_samples):
s = np.zeros(shape=(n_features, n_dims))
d = np.zeros(shape=(n_nonzero_coefs, 3))
rk = permutation(range(n_kernels))
for j in range(n_nonzero_coefs):
k_idx = rk[j]
k_amplitude = 3.0 * rand() + 1.0
k_offset = randint(n_features - kernel_init_len + 1)
s[k_offset : k_offset + kernel_init_len, :] += k_amplitude * dico[k_idx]
d[j, :] = array([k_amplitude, k_offset, k_idx])
decomposition.append(d)
noise = randn(n_features, n_dims)
if snr == 1000:
alpha = 0
else:
ps = norm(s, "fro")
pn = norm(noise, "fro")
alpha = ps / (pn * 10 ** (snr / 20.0))
signals.append(s + alpha * noise)
signals = np.array(signals)
return dico, signals, decomposition
rng_global = np.random.RandomState(1)
n_samples, n_dims = 1500, 1
n_features = kernel_init_len = 5
n_nonzero_coefs = 3
n_kernels, max_iter, learning_rate = 50, 10, 1.5
n_jobs, batch_size = -1, None
iter_time, plot_separator, it_separator = list(), list(), 0
generating_dict, X, code = _generate_testbed(
kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims
)
# Online without mini-batch
print(
"Processing ",
max_iter,
"iterations in online mode, " "without multiprocessing:",
end="",
)
batch_size, n_jobs = n_samples, 1
learned_dict = MiniBatchMultivariateDictLearning(
n_kernels=n_kernels,
batch_size=batch_size,
n_iter=max_iter,
n_nonzero_coefs=n_nonzero_coefs,
n_jobs=n_jobs,
learning_rate=learning_rate,
kernel_init_len=kernel_init_len,
verbose=1,
dict_init=None,
random_state=rng_global,
)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)
# Online with mini-batch
minibatch_range = [cpu_count()]
minibatch_range.extend([cpu_count() * i for i in range(3, 10, 2)])
n_jobs = -1
for mb in minibatch_range:
print(
"\nProcessing ",
max_iter,
"iterations in online mode, with ",
"minibatch size",
mb,
"and",
cpu_count(),
"processes:",
end="",
)
batch_size = mb
learned_dict = MiniBatchMultivariateDictLearning(
n_kernels=n_kernels,
batch_size=batch_size,
n_iter=max_iter,
n_nonzero_coefs=n_nonzero_coefs,
n_jobs=n_jobs,
learning_rate=learning_rate,
kernel_init_len=kernel_init_len,
verbose=1,
dict_init=None,
random_state=rng_global,
)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)
# Batch learning
mp_range = range(1, cpu_count() + 1)
for p in mp_range:
print(
"\nProcessing ",
max_iter,
"iterations in batch mode, with",
p,
"processes:",
end="",
)
n_jobs = p
learned_dict = MultivariateDictLearning(
n_kernels=n_kernels,
max_iter=max_iter,
verbose=1,
n_nonzero_coefs=n_nonzero_coefs,
n_jobs=n_jobs,
learning_rate=learning_rate,
kernel_init_len=kernel_init_len,
dict_init=None,
random_state=rng_global,
)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)
print("Done benchmarking")
figname = "minibatch-performance"
print("Plotting results in", figname)
benchmarking_plot(figname, iter_time, plot_separator, minibatch_range, mp_range)
print("Exiting.")
| gpl-3.0 | -6,148,818,530,008,922,000 | 27.547511 | 84 | 0.592645 | false |
wrznr/gramophone | tests/gp/test_alignment.py | 1 | 2132 | # -*- coding: utf-8 -*-
import sys, os, pytest
from distutils import dir_util
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../gramophone')))
from gramophone import gp
@pytest.fixture
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
def test_constructor():
aligner = gp.Aligner()
assert(aligner != None)
def test_loading(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
assert(aligner.status == 1)
def test_chain(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
chain_fst = aligner.chain(u"aabb")
chain_fst.draw('/tmp/chain.dot')
assert(chain_fst.verify())
def test_segment(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
seg_fst = aligner.segment(u"aabb")
seg_fst.draw('/tmp/seg.dot')
assert(seg_fst.verify())
def test_scan(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
seg_fst = aligner.segment(u"aabb")
seg_fst.draw('/tmp/seg.dot')
assert(seg_fst.verify())
def test_expand(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
exp_fst = aligner.expand(u"aabb")
exp_fst.draw('/tmp/exp.dot')
assert(exp_fst.verify())
def test_scan(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
segmentations = aligner.scan(u"aabb")
assert(segmentations == [['a', 'a', 'b', 'b'], ['aa', 'b', 'b']])
def test_align(datadir):
aligner = gp.Aligner(mapping=datadir.join('test_alignment.txt'))
alignment = aligner.align(u"aabb",u"abbbb")
assert(alignment[0] == ['aa', 'b', 'b'])
assert(alignment[1] == ['a', 'bb', 'bb'])
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -4,167,074,787,178,573,300 | 25.65 | 93 | 0.652908 | false |
tferr/ASA | scripting-examples/3D_Analysis_ImageStack.py | 1 | 2179 | #@ImagePlus imp
#@LogService log
'''
This script uses an outdated API. For a modern replacement, have a look at
https://github.com/morphonets/SNT/tree/master/src/main/resources/script_templates/Neuroanatomy
'''
from sholl import Sholl_Analysis
from sholl import Options
from os.path import expanduser
def spacedDistances(start, end, step):
"""Retrieves a list of Sholl sampling distances"""
leng = (end - start) / step + 1
return [start + i * step for i in range(leng)]
# x,y,z coordinates of center of analysis
xc, yc, zc = 100, 100, 10
# Threshold values for segmentation
lower_t, upper_t = 88, 255
# Definitions for sampling distances
start_radius, end_radius, step_size, = 10, 100, 10
# Destination directory for saving plots and tables
export_path = expanduser("~")
sa = Sholl_Analysis()
if sa.validateImage(imp):
# Specify plugin settings
sa.setDescription(imp.getTitle(), True)
sa.setExportPath(export_path, True)
sa.setInteractiveMode(False)
# Customize output options
so = Options()
so.setMetric(Options.MEDIAN_INTERS, False) # "Sholl Results" table
so.setPlotOutput(Options.NO_PLOTS) # Which plots should be generated?
so.setPromptChoice(Options.HIDE_SAVED_FILES, True) # Main prompt option
so.setPromptChoice(Options.OVERLAY_SHELLS, True) # Main prompt option
sa.setOptions(so)
# Specify analysis settings
sa.setCenter(xc, yc, zc)
sa.setThreshold(lower_t, upper_t)
# Retrieve intersection counts
distances = spacedDistances(start_radius, end_radius, step_size)
counts = sa.analyze3D(xc, yc, zc, distances, imp)
if all(c == 0 for c in counts):
log.warn("All intersection counts were zero")
else:
# Do something with sampled data if analysis was successful
for idx, inters in enumerate(counts):
log.info("r=%s: %s inters." % (distances[idx],inters))
# Retrieve metrics
sa.analyzeProfile(distances, counts, True)
log.info("Analysis finished. Files saved to %s" % export_path)
log.info("Sholl Results Table has not been saved")
else:
log.error(imp.getTitle() + " is not a valid image")
| gpl-3.0 | -3,595,713,342,764,342,300 | 28.849315 | 98 | 0.693437 | false |
jschaul/ComplexNetworkSim | examples/getting started code/first_visualisation.py | 1 | 1471 | '''
Complete code file only from ComplexNetworkSim's "getting started" documentation section, for visualising a simulation. For explanations refer to the documentation page.
Current link: http://complexnetworksim.0sites.net/start.html (documentation hosting may change place - see the PyPi index page.)
@author: Joe Schaul <[email protected]>
'''
from ComplexNetworkSim import PlotCreator, AnimationCreator
directory = 'test' #location of simulation result files
myName = "SIR" #name that you wish to give your image output files
title = "Simulation of agent-based simple SIR"
#define three simulation-specific constants:
SUSCEPTIBLE = 0
INFECTED = 1
RECOVERED = 2
statesToMonitor = [INFECTED, SUSCEPTIBLE] #even if we have states 0,1,2,3,... plot only 1 and 0
colours = ["r", "g"] #state 1 in red, state 0 in green
labels = ["Infected", "Susceptible"] #state 1 named 'Infected', 0 named 'Susceptible'
mapping = {SUSCEPTIBLE:"w", INFECTED:"r", RECOVERED:"0.4"}
trialToVisualise = 0
p = PlotCreator(directory, myName, title, statesToMonitor, colours, labels)
p.plotSimulation(show=False)
#show=True shows the graph directly,
#otherwise only a png file is created in the directory defined above.
visualiser = AnimationCreator(directory, myName, title, mapping, trial=trialToVisualise)
#gif speed can be changed by giving a parameter 'delay' (default=100) to AnimationCreator
visualiser.create_gif(verbose=True) | bsd-2-clause | 2,120,398,871,978,929,200 | 44.03125 | 170 | 0.743712 | false |
jonfoster/pyxb1 | pyxb/__init__.py | 1 | 10123 | """PyXB stands for Python U{W3C XML
Schema<http://www.w3.org/XML/Schema>} Bindings, and is pronounced
"pixbee". It enables translation between XML instance documents and
Python objects following rules specified by an XML Schema document.
This is the top-level entrypoint to the PyXB system. Importing this
gets you all the L{exceptions<pyxb.exceptions_.PyXBException>}, and
L{pyxb.namespace}. For more functionality, delve into these
submodules:
- L{pyxb.xmlschema} Module holding the
L{structures<pyxb.xmlschema.structures>} that convert XMLSchema
from a DOM model to a Python class model based on the XMLSchema
components. Use this when you need to operate on the component
model.
- L{pyxb.binding} Module used to generate the bindings and at runtime
to support the generated bindings. Use this if you need to use the
binding model or content model.
- L{pyxb.utils} Common utilities used in parsing, generating, and
executing. The submodules must be imported separately.
"""
import logging
_log = logging.getLogger(__name__)
class cscRoot (object):
"""This little bundle of joy exists because in Python 2.6 it
became an error to invoke C{object.__init__} with parameters (unless
you also override C{__new__}, in which case it's only a warning.
Whatever.). Since I'm bloody not going to check in every class
whether C{super(Myclass,self)} refers to C{object} (even if I could
figure out how to do that, 'cuz the obvious solutions don't work),
we'll just make this thing the root of all U{cooperative super
calling<http://www.geocities.com/foetsch/python/new_style_classes.htm#super>}
hierarchies. The standard syntax in PyXB for this pattern is::
def method_csc (self, *args, **kw):
super_fn = getattr(super(ThisClass, self), 'method_csc', lambda *a,**kw: self)
return super_fn(*args, **kw)
"""
def __init__ (self, *args, **kw):
# Oh gross. If this class descends from list (and probably dict), we
# get here when object is *not* our direct superclass. In that case,
# we have to pass the arguments on up, or the strings don't get
# created right. Below is the only way I've figured out to detect the
# situation.
#
# Note that we might also get here if you mix-in a class that used
# object as a parent instead of cscRoot. Don't do that. Printing the
# mro() is a decent way of identifying the problem.
if issubclass(self.__class__.mro()[-2], ( list, dict )):
super(cscRoot, self).__init__(*args)
__version__ = '1.1.5-DEV'
"""The version of PyXB"""
__url__ = 'http://pyxb.sourceforge.net'
"""The URL for PyXB's homepage"""
__license__ = 'Apache License 2.0'
# Bring in the exception hierarchy
from exceptions_ import *
# Bring in namespace stuff
import namespace
class BIND (object):
"""Bundle data for automated binding generation.
Instances of this class capture positional and keyword arguments that are
used to create binding instances based on context. For example, if C{w}
is an instance of a complex type whose C{option} element is declared to be
an anonymous class with simple content of type integer and an attribute of
C{units}, a correct assignment to that element could be achieved with::
w.option = BIND(54, units="m")
"""
__args = None
__kw = None
def __init__ (self, *args, **kw):
"""Cache parameters for subsequent binding creation.
Invoke just as you would the factory for a binding class."""
self.__args = args
self.__kw = kw
def createInstance (self, factory, **kw):
"""Invoke the given factory method.
Position arguments to the factory are those cached in this instance.
Keyword arguments are the ones on the command line, updated from the
ones in this instance."""
kw.update(self.__kw)
return factory(*self.__args, **kw)
XMLStyle_minidom = 0
"""Use xml.dom.minidom for XML processing. This is the fastest, but does not
provide location information. It produces DOM instances."""
XMLStyle_saxdom = 1
"""Use pyxb.utils.saxdom for XML processing. This is the slowest, but both
provides location information and generates a DOM instance."""
XMLStyle_saxer = 2
"""Use pyxb.binding.saxer when converting documents to binding instances.
This style supports location information in the bindings. It produces binding
instances directly, without going through a DOM stage, so is faster than
XMLStyle_saxdom. However, since the pyxb.xmlschema.structures classes require
a DOM model, XMLStyle_saxdom will be used for pyxb.utils.domutils.StringToDOM
if this style is selected."""
_XMLStyle = XMLStyle_saxer
"""The current XML processing style."""
_XMLStyleMap = { 'minidom' : XMLStyle_minidom,
'saxdom' : XMLStyle_saxdom,
'saxer' : XMLStyle_saxer }
_XMLStyleMapReverse = dict([ (_v, _k) for (_k, _v) in _XMLStyleMap.items() ])
_XMLStyle_envvar = 'PYXB_XML_STYLE'
def _SetXMLStyle (style=None):
"""Set the interface used to parse XML content.
This can be invoked within code. The system default of L{XMLStyle_saxer}
can also be overridden at runtime by setting the environment variable
C{PYXB_XML_STYLE} to one of C{minidom}, C{saxdom}, or C{saxer}.
@param style: One of L{XMLStyle_minidom}, L{XMLStyle_saxdom},
L{XMLStyle_saxer}. If not provided, the system default is used.
"""
global _XMLStyle
if style is None:
import os
style_name = os.environ.get(_XMLStyle_envvar)
if style_name is None:
style_name = 'saxer'
style = _XMLStyleMap.get(style_name)
if style is None:
raise PyXBException('Bad value "%s" for %s' % (style_name, _XMLStyle_envvar))
if _XMLStyleMapReverse.get(style) is None:
raise PyXBException('Bad value %s for _SetXMLStyle' % (style,))
_XMLStyle = style
#_log.debug("XML style %s", _XMLStyleMapReverse.get(_XMLStyle))
_SetXMLStyle()
# Global flag that we can use to determine whether optimization is active in
# this session. There may be cases where we can bypass methods that just
# check for things we don't care about in an optimized context
_OptimizationActive = False
try:
assert False
_OptimizationActive = True
except:
pass
_CorruptionDetectionEnabled = not _OptimizationActive
"""If C{True}, blocks attempts to assign to attributes that are reserved for
PyXB methods.
Applies only at compilation time; dynamic changes are ignored.
"""
_GenerationRequiresValid = True
def RequireValidWhenGenerating (value=None):
"""Query or set a flag that controls validation checking in XML generation.
Normally any attempts to convert a binding instance to a DOM or XML
representation requires that the binding validate against the content
model, since only in this way can the content be generated in the correct
order. In some cases it may be necessary or useful to generate a document
from a binding that is incomplete. If validation is not required, the
generated documents may not validate even if the content validates,
because ordering constraints will be ignored.
@keyword value: If absent or C{None}, no change is made; otherwise, this
enables (C{True}) or disables (C{False}) the requirement that instances
validate before being converted to XML.
@type value: C{bool}
@return: C{True} iff attempts to generate XML for a binding that does not
validate should raise an exception. """
global _GenerationRequiresValid
if value is None:
return _GenerationRequiresValid
if not isinstance(value, bool):
raise TypeError(value)
_GenerationRequiresValid = value
return _GenerationRequiresValid
_ParsingRequiresValid = True
def RequireValidWhenParsing (value=None):
"""Query or set a flag that controls validation checking in XML parsing.
Normally any attempts to convert XML to a binding instance to a binding
instance requires that the document validate against the content model.
In some cases it may be necessary or useful to process a document that is
incomplete. If validation is not required, the generated documents may
not validate even if the content validates, because ordering constraints
will be ignored.
@keyword value: If absent or C{None}, no change is made; otherwise, this
enables (C{True}) or disables (C{False}) the requirement that documents
validate when being converted to bindings.
@type value: C{bool}
@return: C{True} iff attempts to generate bindings for a document that
does not validate should raise an exception."""
global _ParsingRequiresValid
if value is None:
return _ParsingRequiresValid
if not isinstance(value, bool):
raise TypeError(value)
_ParsingRequiresValid = value
return _ParsingRequiresValid
_PreserveInputTimeZone = False
def PreserveInputTimeZone (value=None):
"""Control whether time values are converted to UTC during input.
The U{specification <http://www.w3.org/TR/xmlschema-2/#dateTime>} makes
clear that timezoned times are in UTC and that times in other timezones
are to be translated to UTC when converted from literal to value form.
Provide an option to bypass this step, so the input timezone is preserved.
@note: Naive processing of unnormalized times--i.e., ignoring the
C{tzinfo} field--may result in errors."""
global _PreserveInputTimeZone
if value is None:
return _PreserveInputTimeZone
if not isinstance(value, bool):
raise TypeError(value)
_PreserveInputTimeZone = value
return _PreserveInputTimeZone
_OutputEncoding = 'utf-8'
"""Default unicode encoding to use when creating output.
Material being written to an XML parser is not output."""
_InputEncoding = 'utf-8'
"""Default unicode encoding to assume when decoding input.
Material being written to an XML parser is treated as input."""
## Local Variables:
## fill-column:78
## End:
| apache-2.0 | -4,845,504,037,243,957,000 | 38.236434 | 89 | 0.708683 | false |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Xml/XPointer/test_parser.py | 1 | 2641 |
#From the spec (and a few more)
pointers = [('xpointer(id("list37")/item)',
'xpointer(id("list37")/child::item)'),
('xpointer(id("list37")/item[1]/range-to(following-sibling::item[2]))',
'xpointer(id("list37")/child::item[1]/range-to(following-sibling::item[2]))'),
('xpointer(id("chap1"))xpointer(//*[@id="chap1"])',
'xpointer(id("chap1")) xpointer(//child::*[attribute::id = "chap1"])'),
('intro',
'intro'),
('xpointer(id("intro"))',
'xpointer(id("intro"))'),
('element(intro/14/3)',
'element(intro/14/3)'),
('element(/1/2/5/14/3)',
'element(/1/2/5/14/3)'),
('xpointer(//x:a)',
'xpointer(//child::x:a)'),
('xmlns(x=http://example.com/foo) xpointer(//x:a)',
'xmlns(x=http://example.com/foo) xpointer(//child::x:a)'),
('xmlns(x=http://example.com/foo) xmlns(y=http://examples.org/bar) xpointer(//x:a/y:a)',
'xmlns(x=http://example.com/foo) xmlns(y=http://examples.org/bar) xpointer(//child::x:a/child::y:a)'),
('xpointer(id("chap1")/range-to(id("chap2")))',
'xpointer(id("chap1")/range-to(id("chap2")))'),
('xpointer(descendant::REVST/range-to(following::REVEND[1]))',
'xpointer(descendant::REVST/range-to(following::REVEND[1]))'),
('xpointer(string-range(//title,"Thomas Pynchon")[17])',
'xpointer(string-range(//child::title, "Thomas Pynchon")[17])'),
('xpointer(string-range(//title,"Thomas Pynchon",8,0)[3])',
'xpointer(string-range(//child::title, "Thomas Pynchon", 8, 0)[3])'),
('xpointer(string-range(string-range(//P,"Thomas Pynchon")[3],"P",1,0))',
'xpointer(string-range(string-range(//child::P, "Thomas Pynchon")[3], "P", 1, 0))'),
('xpointer(string-range(/,"!",1,2)[5])',
'xpointer(string-range(/, "!", 1, 2)[5])'),
('xpointer(here()/ancestor::slide[1]/preceding::slide[1])',
'xpointer(here()/ancestor::slide[1]/preceding::slide[1])'),
('xmlns(x=foo.com) xpointer(1)',
'xmlns(x=foo.com) xpointer(1)'),
('xpointer(range-to(range-to(foo)))',
'xpointer(range-to(range-to(child::foo)))'),
]
def Test(tester):
from Ft.Xml.XPointer import XPointerParser
for ptr, expected in pointers:
tester.startTest(ptr)
result = XPointerParser.new().parse(ptr)
tester.compare(expected, repr(result))
tester.testDone()
return
| gpl-2.0 | -288,796,644,264,339,600 | 49.788462 | 115 | 0.525559 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/widgets/analogclock.py | 1 | 3131 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from PySide import QtCore, QtGui
class AnalogClock(QtGui.QWidget):
hourHand = QtGui.QPolygon([
QtCore.QPoint(7, 8),
QtCore.QPoint(-7, 8),
QtCore.QPoint(0, -40)
])
minuteHand = QtGui.QPolygon([
QtCore.QPoint(7, 8),
QtCore.QPoint(-7, 8),
QtCore.QPoint(0, -70)
])
hourColor = QtGui.QColor(127, 0, 127)
minuteColor = QtGui.QColor(0, 127, 127, 191)
def __init__(self, parent=None):
super(AnalogClock, self).__init__(parent)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update)
timer.start(1000)
self.setWindowTitle("Analog Clock")
self.resize(200, 200)
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QtCore.QTime.currentTime()
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(side / 200.0, side / 200.0)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(AnalogClock.hourColor)
painter.save()
painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0)))
painter.drawConvexPolygon(AnalogClock.hourHand)
painter.restore()
painter.setPen(AnalogClock.hourColor)
for i in range(12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(AnalogClock.minuteColor)
painter.save()
painter.rotate(6.0 * (time.minute() + time.second() / 60.0))
painter.drawConvexPolygon(AnalogClock.minuteHand)
painter.restore()
painter.setPen(AnalogClock.minuteColor)
for j in range(60):
if (j % 5) != 0:
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
clock = AnalogClock()
clock.show()
sys.exit(app.exec_())
| epl-1.0 | -2,085,368,576,522,494,700 | 30 | 77 | 0.608432 | false |
google/clusterfuzz | src/local/butler/reproduce_tool/android.py | 1 | 3980 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android emulator installation and management."""
import os
import time
from local.butler.reproduce_tool import errors
from local.butler.reproduce_tool import prompts
from platforms.android import adb
from platforms.android import device
from system import environment
from system import new_process
ADB_DEVICES_SEPARATOR_STRING = 'List of devices attached'
EMULATOR_RELATIVE_PATH = os.path.join('local', 'bin', 'android-sdk', 'emulator',
'emulator')
def start_emulator():
"""Return a ProcessRunner configured to start the Android emulator."""
root_dir = environment.get_value('ROOT_DIR')
runner = new_process.ProcessRunner(
os.path.join(root_dir, EMULATOR_RELATIVE_PATH),
['-avd', 'TestImage', '-writable-system', '-partition-size', '2048'])
emulator_process = runner.run()
# If we run adb commands too soon after the emulator starts, we may see
# flake or errors. Delay a short while to account for this.
# TODO(mbarbella): This is slow and flaky, but wait-for-device isn't usable if
# another device is connected (as we don't know the serial yet). Find a better
# solution.
time.sleep(30)
return emulator_process
def get_devices():
"""Get a list of all connected Android devices."""
adb_runner = new_process.ProcessRunner(adb.get_adb_path())
result = adb_runner.run_and_wait(additional_args=['devices'])
if result.return_code:
raise errors.ReproduceToolUnrecoverableError('Unable to run adb.')
# Ignore non-device lines (those before "List of devices attached").
store_devices = False
devices = []
for line in result.output.splitlines():
if line == ADB_DEVICES_SEPARATOR_STRING:
store_devices = True
continue
if not store_devices or not line:
continue
devices.append(line.split()[0])
return devices
def prepare_environment(disable_android_setup):
"""Additional environment overrides needed to run on an Android device."""
environment.set_value('OS_OVERRIDE', 'ANDROID')
# Bail out if we can't determine which Android device to use.
serial = environment.get_value('ANDROID_SERIAL')
if not serial:
devices = get_devices()
if len(devices) == 1:
serial = devices[0]
environment.set_value('ANDROID_SERIAL', serial)
elif not devices:
raise errors.ReproduceToolUnrecoverableError(
'No connected Android devices were detected. Run with the -e '
'argument to use an emulator.')
else:
raise errors.ReproduceToolUnrecoverableError(
'You have multiple Android devices or emulators connected. Please '
'set the ANDROID_SERIAL environment variable and try again.\n\n'
'Attached devices: ' + ', '.join(devices))
print('Warning: this tool will make changes to settings on the connected '
'Android device with serial {serial} that could result in data '
'loss.'.format(serial=serial))
willing_to_continue = prompts.get_boolean(
'Are you sure you want to continue?')
if not willing_to_continue:
raise errors.ReproduceToolUnrecoverableError(
'Bailing out to avoid changing settings on the connected device.')
# Push the test case and build APK to the device.
apk_path = environment.get_value('APP_PATH')
device.update_build(
apk_path, should_initialize_device=not disable_android_setup)
device.push_testcases_to_device()
| apache-2.0 | -1,405,921,316,545,967,000 | 35.851852 | 80 | 0.711809 | false |
lostinplace/filtered-intervaltree | setup.py | 1 | 1509 | from setuptools import setup, find_packages
from codecs import open
from os import path
import os
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'readme.md'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, '.library-version'), encoding='utf-8') as f:
existing_version = f.read()
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
requirements = f.read().split('\n')
env_version = os.environ.get('LIBVER')
version = env_version or existing_version
setup(
name='filtered-intervaltree',
version=version,
description='an intervaltree with early exit bloom filters',
long_description=long_description,
url='https://github.com/lostinplace/filtered-intervaltree',
author='cwheeler',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
requires=[],
keywords='rbtree intervaltree bloomfilter',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['test']),
install_requires=requirements,
extras_require={
'test': ['coverage'],
}
) | mit | 3,768,461,388,915,031,600 | 26.962963 | 72 | 0.666667 | false |
lstephen/construi | setup.py | 1 | 1635 | import codecs
import os
from setuptools import find_packages, setup
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(path, encoding="utf-8") as fobj:
return fobj.read()
def find_version():
with open("VERSION") as f:
return f.read().strip()
def install_requires():
with open("requirements.txt") as f:
return [r.strip() for r in f.readlines()]
requires = {
"install": install_requires(),
"setup": ["pytest-runner == 2.6.2"],
"tests": ["pytest == 2.8.5", "pytest-cov == 2.2.0"],
}
summary = "Use Docker to define your build environment"
setup(
name="construi",
version=find_version(),
url="https://github.com/lstephen/construi",
license="Apache License 2.0",
description=summary,
long_description=summary,
author="Levi Stephen",
author_email="[email protected]",
zip_safe=True,
packages=find_packages(),
install_requires=requires["install"],
setup_requires=requires["setup"],
tests_require=requires["tests"],
entry_points={"console_scripts": ["construi=construi.cli:main"]},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| apache-2.0 | -3,990,177,488,345,775,600 | 27.189655 | 69 | 0.616514 | false |
Willyham/tchannel-python | tchannel/tornado/timeout.py | 1 | 1748 | # Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import tornado
import tornado.ioloop
import contextlib2
from ..errors import TimeoutError
@contextlib2.contextmanager
def timeout(future, seconds=0, io_loop=None):
# TODO: This is probably too heavy to attach to every request, should do
# this in the background.
io_loop = io_loop or tornado.ioloop.IOLoop.current()
def raise_timeout(*args, **kwargs):
if future.running():
future.set_exception(TimeoutError())
if not seconds:
# No timeout if seconds is set to 0.
yield
else:
io_loop.call_later(seconds, raise_timeout)
yield
| mit | 1,548,382,656,835,013,000 | 37.844444 | 79 | 0.741419 | false |
Kpler/scrapy-logentries-extension | scrapylogentries/extension.py | 1 | 1989 | import logging
import os
from scrapy import signals
from scrapy.exceptions import NotConfigured
from logentries import LogentriesHandler
from logentriesadapter import LogentriesAdapter, ScrapingHubFilter
logger = logging.getLogger(__name__)
class LogentriesExtension(object):
def __init__(self, token):
self.token = token
root = logging.getLogger()
self.handler = LogentriesHandler(token)
spider_id = os.environ.get('SCRAPY_SPIDER_ID')
project_id = os.environ.get('SCRAPY_PROJECT_ID')
job_id = os.environ.get('SCRAPY_JOB_ID')
formatted = False
if job_id is not None:
formatted = True
filter = ScrapingHubFilter({
'project_id': project_id,
'spider_id': spider_id,
'job_id': job_id,
})
format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s"
formatter = logging.Formatter(format)
self.handler.addFilter(filter)
self.handler.setFormatter(formatter)
root.addHandler(self.handler)
# NCA: not sure we want sensitive information like the token in the logs
# Maybe use debug log level instead
if formatted:
logger.info('Logentries activated with token {} and custom SH format'.format(token))
else:
logger.info('Logentries activated with token {} and no custom SH format'.format(token))
@classmethod
def from_crawler(cls, crawler):
# first check if the extension should be enabled and raise
# NotConfigured otherwise
token = crawler.settings.get('LOGENTRIES_TOKEN')
if not token:
raise NotConfigured
# instantiate the extension object
ext = cls(token)
# return the extension object
return ext
# vim: syntax=python:sws=4:sw=4:et:
| mit | 7,340,216,088,831,452,000 | 30.078125 | 131 | 0.612871 | false |
cbertinato/pandas | pandas/tests/frame/test_duplicates.py | 1 | 14578 | import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas.util.testing as tm
@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({'A': [0, 0, 1],
'B': [0, 0, 1],
'C': [0, 0, 1]})
with pytest.raises(KeyError):
df.duplicated(subset)
with pytest.raises(KeyError):
df.drop_duplicates(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000)
for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_keep(keep, expected):
df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_nan_none(keep, expected):
df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A'])
def test_duplicated_subset(subset, keep):
df = DataFrame({'A': [0, 1, 1, 2, 0],
'B': ['a', 'b', 'b', 'c', 'a'],
'C': [np.nan, 3, 3, None, np.nan]})
if subset is None:
subset = list(df.columns)
elif isinstance(subset, str):
# need to have a DataFrame, not a Series
# -> select columns with singleton list, not string
subset = [subset]
expected = df[subset].duplicated(keep=keep)
result = df.duplicated(keep=keep, subset=subset)
tm.assert_series_equal(result, expected)
def test_drop_duplicates():
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
def test_duplicated_on_empty_frame():
# GH 25184
df = DataFrame(columns=['a', 'b'])
dupes = df.duplicated('a')
result = df[dupes]
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([
[1, 2, 5],
[3, 4, 6],
[3, 4, 7]
], columns=['a', 'a', 'b'])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates('a')
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all():
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple():
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('df', [
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=['A', 'B', 'C']),
DataFrame(index=[]),
DataFrame(index=['A', 'B', 'C'])
])
def test_drop_duplicates_empty(df):
# GH 20516
result = df.drop_duplicates()
tm.assert_frame_equal(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
tm.assert_frame_equal(result, df)
def test_drop_duplicates_NA():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': range(8),
})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': range(8),
})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
| bsd-3-clause | 8,847,764,871,148,110,000 | 30.691304 | 79 | 0.551996 | false |
teracyhq/flask-classy | test_classful/test_decorators.py | 2 | 7323 | from flask import Flask
from .view_classes import DecoratedView
from .view_classes import DecoratedBoldListView
from .view_classes import DecoratedBoldItalicsListView
from .view_classes import DecoratedListMemberView
from .view_classes import DecoratedListFunctionAttributesView
from .view_classes import DecoratedListMemberFunctionAttributesView
from .view_classes import DecoratedAppendClassAttributeView
from nose.tools import eq_
app = Flask("decorated")
DecoratedView.register(app)
DecoratedBoldListView.register(app)
DecoratedBoldItalicsListView.register(app)
DecoratedListMemberView.register(app)
DecoratedListFunctionAttributesView.register(app)
DecoratedListMemberFunctionAttributesView.register(app)
DecoratedAppendClassAttributeView.register(app)
client = app.test_client()
def test_func_decorator_index():
resp = client.get('/decorated/')
eq_(b"Index", resp.data)
resp = client.get('/decorated')
eq_(resp.status_code, 308)
def test_func_decorator_get():
resp = client.get('/decorated/1234/')
eq_(b"Get 1234", resp.data)
resp = client.get('/decorated/1234')
eq_(resp.status_code, 308)
def test_recursive_decorator_post():
resp = client.post('/decorated/')
eq_(b"Post", resp.data)
resp = client.post('/decorated')
eq_(resp.status_code, 308)
def test_more_recursive_decorator_get():
resp = client.get('/decorated/get_some/')
eq_(b"Get Some", resp.data)
resp = client.get('/decorated/get_some')
eq_(resp.status_code, 308)
def test_multiple_recursive_decorators_get():
resp = client.get('/decorated/get_this/')
eq_(b"Get This", resp.data)
resp = client.get('/decorated/get_this')
eq_(resp.status_code, 308)
def test_routes_with_recursive_decorators():
resp = client.get('/decorated/mixitup/')
eq_(b"Mix It Up", resp.data)
resp = client.get('/decorated/mixitup')
eq_(resp.status_code, 308)
def test_recursive_with_parameter():
resp = client.get('/decorated/someval/1234/')
eq_(b"Someval 1234", resp.data)
def test_recursive_with_route_with_parameter():
resp = client.get('/decorated/anotherval/1234/')
eq_(b"Anotherval 1234", resp.data)
def test_params_decorator():
resp = client.get('/decorated/params_decorator_method/')
eq_(b"Params Decorator", resp.data)
def test_params_decorator_delete():
resp = client.delete('/decorated/1234/')
eq_(b"Params Decorator Delete 1234", resp.data)
resp = client.delete('/decorated/1234')
eq_(resp.status_code, 308)
def test_decorator_bold_list_get():
"""Tests that the get route is wrapped in bold"""
resp = client.get('/decorated_bold_list_view/1234/')
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b>Get 1234</b>', resp.data)
resp = client.get('/decorated_bold_list_view/1234')
eq_(resp.status_code, 308)
def test_decorator_bold_list_index():
"""Tests that the index route is wrapped in bold"""
resp = client.get('/decorated_bold_list_view/')
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b>Index</b>', resp.data)
def test_decorator_bold_italics_list_get():
"""Tests that the get route is wrapped in bold and italics"""
resp = client.get('/decorated_bold_italics_list_view/1234/')
eq_(b'<i>' in resp.data, True)
eq_(b'</i>' in resp.data, True)
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b><i>Get 1234</i></b>', resp.data)
resp = client.get('/decorated_bold_italics_list_view/1234')
eq_(resp.status_code, 308)
def test_decorator_bold_italics_list_index():
"""Tests that the index route is wrapped in bold and italics"""
resp = client.get('/decorated_bold_italics_list_view/')
eq_(b'<i>' in resp.data, True)
eq_(b'</i>' in resp.data, True)
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b><i>Index</i></b>', resp.data)
def test_decorator_list_member_index():
"""
Tests that the index route is wrapped in bold,
italics and paragraph
"""
resp = client.get('/decorated_list_member_view/')
eq_(b'<i>' in resp.data, True)
eq_(b'</i>' in resp.data, True)
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<p>' not in resp.data, True)
eq_(b'</p>' not in resp.data, True)
eq_(b'<b><i>Index</i></b>', resp.data)
def test_decorator_list_member_get():
"""Tests the ordering of decorators"""
resp = client.get('/decorated_list_member_view/1234/')
eq_(b'<b>', resp.data[:3])
eq_(b'<i>', resp.data[3:6])
eq_(b'<p>', resp.data[6:9])
eq_(b'</p>', resp.data[-12:-8])
eq_(b'</i>', resp.data[-8:-4])
eq_(b'</b>', resp.data[-4:])
eq_(b'<b><i><p>Get 1234</p></i></b>', resp.data)
resp = client.get('/decorated_list_member_view/1234')
eq_(resp.status_code, 308)
def test_decorator_list_function_attributes_get():
"""
Verify list of decorators with attributes modify all functions in FlaskView
"""
resp = client.get('/decorated_list_function_attributes_view/1234/')
eq_(b'Get 1234' in resp.data, True)
eq_(b'<i><b>Get 1234</b></i>', resp.data)
eq_(hasattr(
app.view_functions['DecoratedListFunctionAttributesView:get'],
'eggs'),
True)
eq_('scrambled',
app.view_functions['DecoratedListFunctionAttributesView:get'].eggs)
resp = client.get('/decorated_list_function_attributes_view/1234')
eq_(resp.status_code, 308)
def test_decorator_list_function_attributes_index():
"""
Verify list of decorators with attributes modify all functions in FlaskView
"""
resp = client.get('/decorated_list_function_attributes_view/')
eq_(b'Index' in resp.data, True)
eq_(b'<i>Index</i>', resp.data)
eq_(hasattr(
app.view_functions['DecoratedListFunctionAttributesView:index'],
'eggs'),
True)
eq_('scrambled',
app.view_functions['DecoratedListFunctionAttributesView:index'].eggs)
def test_decorator_list_member_function_attributes_get():
"""Verify decorator with attributes does not modify other members"""
resp = client.get('/decorated_list_member_function_attributes_view/4321/')
eq_(b'Get 4321' in resp.data, True)
eq_(b'<i><b>Get 4321</b></i>', resp.data)
eq_(
hasattr(
app.view_functions[
'DecoratedListMemberFunctionAttributesView:get'
], 'eggs'),
False)
resp = client.get('/decorated_list_member_function_attributes_view/4321')
eq_(resp.status_code, 308)
def test_decorator_list_member_function_attributes_index():
"""Verify decorator with attributes modify decorated memeber functions"""
resp = client.get('/decorated_list_member_function_attributes_view/')
eq_(b'Index' in resp.data, True)
eq_(b'<i>Index</i>', resp.data)
eq_(hasattr(
app.view_functions[
'DecoratedListMemberFunctionAttributesView:index'
], 'eggs'),
True)
eq_('scrambled',
app.view_functions[
'DecoratedListMemberFunctionAttributesView:index'
].eggs)
def test_decorator_append_class_attribute_index():
resp = client.get('/decorated_append_class_attribute_view/')
eq_(b'Index (this is a test)', resp.data)
| bsd-3-clause | 495,681,920,520,354,400 | 32.286364 | 79 | 0.654377 | false |
fjtc/gtest-tools | gtestgen/tests/test_core.py | 1 | 7457 | # Copyright (c) 2015, FJTC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of gtest-tool nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gtestgen.core
import unittest
import os.path
from gtestgen.core import FileExistsException
class TemplateFileTest(unittest.TestCase):
def test_constructor(self):
t = gtestgen.core.TemplateFile('TemplateFileTest.tpl')
self.assertIsNotNone(t.template)
self.assertIsNotNone(t.file_name)
def test_constructor_failed(self):
try:
t = gtestgen.core.TemplateFile('this_file_does_not_exist')
self.fail()
except gtestgen.core.TemplateNotFoundException:
pass
def test_template(self):
t = gtestgen.core.TemplateFile('TemplateFileTest.tpl')
self.assertEquals('This is a simple ${VALUE} test.\n', t.template)
def test_file_name(self):
t = gtestgen.core.TemplateFile('TemplateFileTest.tpl')
self.assertEquals('TemplateFileTest.tpl', t.file_name)
def test_process(self):
t = gtestgen.core.TemplateFile('TemplateFileTest.tpl')
v = t.process({'VALUE': 'TemplateFileTest'})
self.assertEquals('This is a simple TemplateFileTest test.\n', v)
def test_processMissingParameter(self):
t = gtestgen.core.TemplateFile('TemplateFileTest.tpl')
v = t.process({'VALUES': 'TemplateFileTest'})
self.assertEquals('This is a simple ${VALUE} test.\n', v)
class TestTitleTest(unittest.TestCase):
def test_constructor(self):
t = gtestgen.core.TestTitle('title')
self.assertIsNotNone(t)
def test_name(self):
t = gtestgen.core.TestTitle('title')
self.assertEqual('title', t.name)
def test_is_valid_identifier(self):
t = gtestgen.core.TestTitle('title')
self.assertTrue(t.is_valid_identifier)
t = gtestgen.core.TestTitle('tItle')
self.assertTrue(t.is_valid_identifier)
t = gtestgen.core.TestTitle('t0tle')
self.assertTrue(t.is_valid_identifier)
t = gtestgen.core.TestTitle('Title')
self.assertTrue(t.is_valid_identifier)
t = gtestgen.core.TestTitle('_title')
self.assertTrue(t.is_valid_identifier)
t = gtestgen.core.TestTitle('t_itle')
self.assertTrue(t.is_valid_identifier)
t = gtestgen.core.TestTitle('9title')
self.assertFalse(t.is_valid_identifier)
t = gtestgen.core.TestTitle(' title')
self.assertFalse(t.is_valid_identifier)
t = gtestgen.core.TestTitle('t.itle')
self.assertFalse(t.is_valid_identifier)
def test_header_name(self):
t = gtestgen.core.TestTitle('title')
self.assertEqual('title.h', t.header_name)
def test_source_name(self):
t = gtestgen.core.TestTitle('title')
self.assertEqual('title.cpp', t.source_name)
def test_macro_name(self):
t = gtestgen.core.TestTitle('title')
self.assertEqual('__TITLE_H__', t.macro_name)
class EngineTest(unittest.TestCase):
def get_output_dir(self):
outdir = os.path.join('.', 'tmp')
if not os.path.isdir(outdir):
os.mkdir(outdir)
return outdir
def clear_output_dir(self):
outdir = self.get_output_dir()
for f in os.listdir(outdir):
path = os.path.join(outdir, f)
os.remove(path)
def test_constructor(self):
engine = gtestgen.core.Engine(None, None)
self.assertIsNotNone(engine.output_dir)
self.assertIsNotNone(engine.template_dir)
def test_output_dir(self):
engine = gtestgen.core.Engine(None, None)
self.assertEqual(os.path.abspath('.'), engine.output_dir)
engine.output_dir = '.'
self.assertEqual(os.path.abspath('.'), engine.output_dir)
engine.output_dir = '/opt'
self.assertEqual('/opt', engine.output_dir)
engine.output_dir = __file__
self.assertEqual(os.path.dirname(os.path.abspath(__file__)), engine.output_dir)
def test_template_dir(self):
engine = gtestgen.core.Engine(None, None)
self.assertEqual(os.path.dirname(gtestgen.core.__file__), engine.template_dir)
engine.template_dir = None
self.assertEqual(os.path.dirname(gtestgen.core.__file__), engine.template_dir)
engine.template_dir = '.'
self.assertEqual(os.path.abspath('.'), engine.template_dir)
try:
engine.template_dir = __file__
self.fail()
except gtestgen.core.TemplateNotFoundException:
pass
def test_generate_main(self):
outdir = self.get_output_dir()
self.clear_output_dir()
engine = gtestgen.core.Engine(outdir, os.path.dirname(__file__))
engine.generate_main()
# Test duplicated file
try:
engine.generate_main()
self.fail()
except gtestgen.core.FileExistsException:
pass
def test_generate_test(self):
outdir = self.get_output_dir()
self.clear_output_dir()
engine = gtestgen.core.Engine(outdir, os.path.dirname(__file__))
engine.generate_test('test_name')
# Test duplicated file
try:
engine.generate_test('test_name')
self.fail()
except gtestgen.core.FileExistsException:
pass
def test_generate_test_invalid_name(self):
outdir = self.get_output_dir()
self.clear_output_dir()
engine = gtestgen.core.Engine(outdir, os.path.dirname(__file__))
try:
engine.generate_test('00000')
self.fail()
except ValueError:
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,354,923,069,388,113,000 | 34.679426 | 87 | 0.624112 | false |
iwm911/plaso | plaso/formatters/mcafeeav.py | 1 | 1238 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for the McAfee AV Logs files."""
from plaso.lib import eventdata
class McafeeAccessProtectionLogEventFormatter(eventdata.EventFormatter):
"""Class that formats the McAfee Access Protection Log events."""
DATA_TYPE = 'av:mcafee:accessprotectionlog'
# The format string.
FORMAT_STRING = (u'File Name: {filename} User: {username} {trigger_location} '
u'{status} {rule} {action}')
FORMAT_STRING_SHORT = u'{filename} {action}'
SOURCE_LONG = 'McAfee Access Protection Log'
SOURCE_SHORT = 'LOG'
| apache-2.0 | -6,182,131,290,445,977,000 | 35.411765 | 80 | 0.731018 | false |
mollie/mollie-api-python | tests/test_client.py | 1 | 18333 | import re
import sys
import time
from datetime import datetime
import pytest
import requests.adapters
from mollie.api.client import Client, generate_querystring
from mollie.api.error import (
DataConsistencyError,
IdentifierError,
NotFoundError,
RequestError,
RequestSetupError,
ResponseError,
ResponseHandlingError,
UnauthorizedError,
UnprocessableEntityError,
)
from mollie.api.objects.method import Method
from mollie.api.objects.organization import Organization
from .utils import assert_list_object
@pytest.mark.parametrize(
"params, querystring",
[
({}, None),
({"locale": "nl_NL"}, "locale=nl_NL"),
({"locale": "nl_NL", "hoeba": "kek"}, "hoeba=kek&locale=nl_NL"),
({"amount": {"value": "100.00", "currency": "USD"}}, "amount%5Bcurrency%5D=USD&amount%5Bvalue%5D=100.00"),
],
)
def test_generate_querystring(params, querystring):
"""Verify that we can generate querystring that are correctly quoted."""
result = generate_querystring(params)
assert result == querystring
def test_client_querystring(client, response):
"""Verify that we are triggering the correct URL when using querystring with square brackets."""
response.add(
response.GET,
"https://api.mollie.com/v2/methods?amount[currency]=USD&amount[value]=100.00",
body=response._get_body("methods_list"),
match_querystring=True,
)
params = {"amount": {"currency": "USD", "value": "100.00"}}
methods = client.methods.list(**params)
assert_list_object(methods, Method)
def test_client_api_key():
"""Setting up a valid api key or access token should be possible."""
client = Client()
client.set_access_token("access_123")
assert client.api_key == "access_123"
client.set_api_key("live_123")
assert client.api_key == "live_123"
client.set_api_key("test_123")
assert client.api_key == "test_123"
def test_client_no_api_key():
"""A Request without an API key should raise an error."""
client = Client()
with pytest.raises(RequestSetupError, match="You have not set an API key."):
client.customers.list()
def test_client_invalid_api_key():
"""Setting up an invalid api key raises an error."""
client = Client()
with pytest.raises(RequestSetupError, match="Invalid API key: 'invalid'"):
client.set_api_key("invalid")
with pytest.raises(RequestSetupError, match="Invalid API key: 'access_123'"):
client.set_api_key("access_123")
with pytest.raises(RequestSetupError, match="Invalid access token: 'invalid'"):
client.set_access_token("invalid")
with pytest.raises(RequestSetupError, match="Invalid access token: 'live_123'"):
client.set_access_token("live_123")
with pytest.raises(RequestSetupError, match="Invalid access token: 'test_123'"):
client.set_access_token("test_123")
def test_client_broken_cert_bundle(monkeypatch):
"""A request should raise an error when the certificate bundle is not available.
Under circumstances it could be possible that the certifi package is not correctly installed, broken,
or just plain too old. Connecting to the Mollie API should fail with an error when the certificate
cannot be verified.
"""
monkeypatch.setenv("REQUESTS_CA_BUNDLE", "/does/not/exist")
client = Client()
client.set_api_key("test_test")
with pytest.raises(OSError) as excinfo:
client.customers.list()
assert "Could not find a suitable TLS CA certificate bundle, invalid path: /does/not/exist" in str(excinfo.value)
def test_client_generic_request_error(response):
"""When the remote server refuses connections or other request issues arise, an error should be raised.
The 'response' fixture blocks all outgoing connections, also when no actual responses are configured.
"""
client = Client()
client.set_api_key("test_test")
client.set_api_endpoint("https://api.mollie.invalid/")
with pytest.raises(RequestError, match="Unable to communicate with Mollie: Connection refused"):
client.customers.list()
def test_client_invalid_create_data(client):
"""Invalid data for a create command should raise an error."""
data = datetime.now()
with pytest.raises(RequestSetupError, match="Error encoding parameters into JSON"):
client.customers.create(data=data)
def test_client_invalid_update_data(client):
"""Invalid data for a create command should raise an error."""
data = datetime.now()
with pytest.raises(RequestSetupError, match="Error encoding parameters into JSON"):
client.customers.update("cst_12345", data=data)
@pytest.mark.parametrize(
"endpoint, errorstr",
[
("customers", "Invalid customer ID: 'invalid'. A customer ID should start with 'cst_'."),
("payments", "Invalid payment ID: 'invalid'. A payment ID should start with 'tr_'."),
("refunds", "Invalid refund ID: 'invalid'. A refund ID should start with 're_'."),
("orders", "Invalid order ID: 'invalid'. An order ID should start with 'ord_'."),
],
)
def test_client_get_invalid_id(client, endpoint, errorstr):
"""An invalid formatted object ID should raise an error."""
with pytest.raises(IdentifierError, match=errorstr):
getattr(client, endpoint).get("invalid")
@pytest.mark.parametrize(
"endpoint, errorstr",
[
("customer_mandates", "Invalid mandate ID: 'invalid'. A mandate ID should start with 'mdt_'."),
("customer_payments", "Invalid payment ID: 'invalid'. A payment ID should start with 'tr_'."),
("customer_subscriptions", "Invalid subscription ID: 'invalid'. A subscription ID should start with 'sub_'."),
],
)
def test_client_get_customer_related_invalid_id(client, endpoint, errorstr):
"""An invalid formatted object ID should raise an error."""
with pytest.raises(IdentifierError, match=errorstr):
getattr(client, endpoint).with_parent_id("cst_12345").get("invalid")
@pytest.mark.parametrize(
"endpoint, errorstr",
[
("payment_chargebacks", "Invalid chargeback ID: 'invalid'. A chargeback ID should start with 'chb_'."),
("payment_refunds", "Invalid refund ID: 'invalid'. A refund ID should start with 're_'."),
],
)
def test_client_get_payment_related_invalid_id(client, endpoint, errorstr):
"""An invalid formatted object ID should raise an error."""
with pytest.raises(IdentifierError, match=errorstr):
getattr(client, endpoint).with_parent_id("tr_12345").get("invalid")
def test_client_invalid_json_response(client, response):
"""An invalid json response should raise an error."""
response.get("https://api.mollie.com/v2/customers", "invalid_json")
with pytest.raises(ResponseHandlingError, match=r"Unable to decode Mollie API response \(status code: 200\)"):
client.customers.list()
@pytest.mark.parametrize(
"resp_payload, resp_status, exception, errormsg",
[
("error_unauthorized", 401, UnauthorizedError, "Missing authentication, or failed to authenticate"),
("customer_doesnotexist", 404, NotFoundError, "No customer exists with token cst_doesnotexist."),
("payment_rejected", 422, UnprocessableEntityError, "The amount is higher than the maximum"),
("error_teapot", 418, ResponseError, "Just an example error that is not explicitly supported"),
],
)
def test_client_get_received_error_response(client, response, resp_payload, resp_status, exception, errormsg):
"""An error response from the API should raise a matching error."""
response.get("https://api.mollie.com/v2/customers/cst_doesnotexist", resp_payload, status=resp_status)
with pytest.raises(exception, match=errormsg) as excinfo:
client.customers.get("cst_doesnotexist")
assert excinfo.value.status == resp_status
@pytest.mark.parametrize(
"resp_payload, resp_status, exception, errormsg",
[
("error_unauthorized", 401, UnauthorizedError, "Missing authentication, or failed to authenticate"),
("customer_doesnotexist", 404, NotFoundError, "No customer exists with token cst_doesnotexist."),
("error_teapot", 418, ResponseError, "Just an example error that is not explicitly supported"),
],
)
def test_client_delete_received_error_response(client, response, resp_payload, resp_status, exception, errormsg):
"""When deleting, an error response from the API should raise a matching error."""
response.delete("https://api.mollie.com/v2/customers/cst_doesnotexist", resp_payload, status=resp_status)
with pytest.raises(exception, match=errormsg) as excinfo:
client.customers.delete("cst_doesnotexist")
assert excinfo.value.status == resp_status
def test_client_response_404_but_no_payload(response):
"""An error response from the API should raise an error.
When the response returns an error, but no valid error data is available in the response,
we should still raise an error. The API v1 formatted error in the test is missing the required 'status' field.
"""
response.get("https://api.mollie.com/v3/customers", "v1_api_error", status=404)
client = Client()
client.api_version = "v3"
client.set_api_key("test_test")
with pytest.raises(ResponseHandlingError, match="Invalid API version"):
client.customers.list()
def test_client_error_including_field_response(client, response):
"""An error response containing a 'field' value should be reflected in the raised error."""
response.post("https://api.mollie.com/v2/payments", "payment_rejected", status=422)
params = {
"amount": {
"value": "10000000.00",
"currency": "EUR",
},
"method": "ideal",
"description": "My order",
"redirectUrl": "https://webshop.example.org/order/12345/",
"webhookUrl": "https://webshop.example.org/payments/webhook/",
}
with pytest.raises(UnprocessableEntityError, match="The amount is higher than the maximum") as excinfo:
client.payments.create(**params)
assert excinfo.value.field == "amount"
@pytest.mark.skipif(sys.version_info.major == 2, reason="output differs for python 2")
def test_client_unicode_error_py3(client, response):
"""An error response containing Unicode characters should also be processed correctly."""
response.post("https://api.mollie.com/v2/orders", "order_error", status=422)
with pytest.raises(UnprocessableEntityError) as err:
# actual POST data for creating an order can be found in test_orders.py
client.orders.create({})
# handling the error should work even when utf-8 characters (€) are in the response.
exception = err.value
expected = (
"Order line 1 is invalid. VAT amount is off. "
"Expected VAT amount to be €3.47 (21.00% over €20.00), got €3.10"
)
assert str(exception) == expected
def test_client_request_timeout(mocker, client):
"""Mock requests.request in the client to be able to read if the timeout is in the request call args."""
mocked_request = mocker.patch("mollie.api.client.requests.Session.request")
# Create a mocked response for the request
response = mocker.Mock(status_code=200)
response.headers.get.return_value = "application/hal+json"
response.json.return_value = {}
mocked_request.return_value = response
client.set_timeout(300)
client.payments.list()
assert mocked_request.call_args[1]["timeout"] == 300
def test_client_request_timed_out(mocker, client):
"""Timeout should raise a RequestError."""
mocker.patch(
"mollie.api.client.requests.Session.request",
side_effect=requests.exceptions.ReadTimeout(
"HTTPSConnectionPool(host='api.mollie.com', port=443): Read timed out. (read timeout=10)"
),
)
with pytest.raises(RequestError, match="Read timed out."):
client.payments.list()
def test_client_will_propagate_retry_setting(response):
response.get("https://api.mollie.com/v2/methods", "methods_list")
client = Client(retry=3)
client.set_api_key("test_test")
client.methods.list()
adapter = client._client.adapters["https://"]
assert adapter.max_retries.connect == 3
def test_client_data_consistency_error(client, response):
"""When the API sends us data we did not expect raise an consistency error."""
order_id = "ord_kEn1PlbGa"
line_id = "odl_12345"
response.get(f"https://api.mollie.com/v2/orders/{order_id}", "order_single")
response.patch(f"https://api.mollie.com/v2/orders/{order_id}/lines/{line_id}", "order_single")
order = client.orders.get(order_id)
data = {
"name": "LEGO 71043 Hogwarts™ Castle",
}
# Update an nonexistent order line. This raises an data consistency error.
with pytest.raises(DataConsistencyError, match=r"Line id .* not found in response."):
order.update_line(line_id, data)
def test_client_default_user_agent(client, response):
"""Default user-agent should contain some known values."""
regex = re.compile(r"^Mollie/[\d\.]+ Python/[\w\.\+]+ OpenSSL/[\w\.]+$")
assert re.match(regex, client.user_agent)
# perform a request and inpect the actual used headers
response.get("https://api.mollie.com/v2/methods", "methods_list")
client.methods.list()
request = response.calls[0].request
assert re.match(regex, request.headers["User-Agent"])
def test_oauth_client_default_user_agent(oauth_client, response):
"""Default user-agent should contain some known values."""
regex = re.compile(r"^Mollie/[\d\.]+ Python/[\w\.\+]+ OpenSSL/[\w\.]+ OAuth/2.0$")
assert re.match(regex, oauth_client.user_agent)
# perform a request and inpect the actual used headers
response.get("https://api.mollie.com/v2/organizations/me", "organization_current")
oauth_client.organizations.get("me")
request = response.calls[0].request
assert re.match(regex, request.headers["User-Agent"])
def test_client_user_agent_with_access_token():
"""When authenticating with an access token, the User-Agent should contain an OAuth component."""
client = Client()
assert "OAuth".lower() not in client.user_agent.lower()
client.set_access_token("access_123")
assert "OAuth/2.0" in client.user_agent
def test_client_set_user_agent_component(response):
"""We should be able to add useragent components.
Note: we don't use the fixture client because it is shared between tests, and we don't want it
to be clobbered with random User-Agent strings.
"""
client = Client()
assert "Hoeba" not in client.user_agent
client.set_user_agent_component("Hoeba", "1.0.0")
assert "Hoeba/1.0.0" in client.user_agent
response.get("https://api.mollie.com/v2/methods", "methods_list")
client.set_api_key("test_123")
client.methods.list()
request = response.calls[0].request
assert "Hoeba/1.0.0" in request.headers["User-Agent"]
@pytest.mark.parametrize(
"key, expected",
[
("lowercase", "Lowercase"),
("UPPERCASE", "Uppercase"),
("multiple words", "MultipleWords"),
("multiple spaces", "MultipleSpaces"),
("trailing space ", "TrailingSpace"),
],
)
def test_client_set_user_agent_component_correct_key_syntax(key, expected):
"""When we receive UA component keys that don't adhere to the proposed syntax, they are corrected."""
client = Client()
client.set_user_agent_component(key, "1.0.0")
assert f"{expected}/1.0.0" in client.user_agent
@pytest.mark.parametrize(
"value, expected",
[
("1.2.3", "1.2.3"),
("singleword", "singleword"),
("MiXedCaSe", "MiXedCaSe"), # should be preserved
("UPPERCASE", "UPPERCASE"), # should be preserved
("with space", "with_space"),
("multiple spaces", "multiple_spaces"),
("trailing space ", "trailing_space"),
],
)
def test_client_set_user_agent_component_correct_value_syntax(value, expected):
"""When we receive UA component values that don't adhere to the proposed syntax, they are corrected."""
client = Client()
client.set_user_agent_component("Something", value)
assert f"Something/{expected}" in client.user_agent
def test_client_update_user_agent_component():
"""We should be able to update the User-Agent component when using the same key."""
client = Client()
client.set_user_agent_component("Test", "1.0.0")
assert "Test/1.0.0" in client.user_agent
# now update the component using the same key
client.set_user_agent_component("Test", "2.0.0")
assert "Test/2.0.0" in client.user_agent
assert "Test/1.0.0" not in client.user_agent
# and update with a key that will be converted to the same value
client.set_user_agent_component("TEST", "3.0.0")
assert "Test/3.0.0" in client.user_agent
assert "Test/2.0.0" not in client.user_agent
assert "Test/1.0.0" not in client.user_agent
def test_oauth_client_will_refresh_token_automatically(mocker, oauth_token, response):
"""Initializing the client with an expired token will trigger a token refresh automatically."""
# expire the token: set expiration time in the past.
oauth_token["expires_at"] = time.time() - 5
set_token_mock = mocker.Mock()
client = Client()
client.setup_oauth(
client_id="client_id",
client_secret="client_secret",
redirect_uri="https://example.com/callback",
scope=("organizations.read",),
token=oauth_token,
set_token=set_token_mock,
)
# setup two request mocks: the token refresh and the actual data request
response.post("https://api.mollie.com/oauth2/tokens", "token_single")
response.get("https://api.mollie.com/v2/organizations/me", "organization_current")
organization = client.organizations.get("me")
assert isinstance(organization, Organization), "Unexpected result from request."
assert response.assert_all_requests_are_fired, "Not all expected requests have been performed."
# verify handling of the new token
set_token_mock.assert_called_once()
args, kwargs = set_token_mock.call_args
assert isinstance(args[0], dict), "set_token() did not receive a dictionary."
| bsd-2-clause | 6,219,822,788,418,959,000 | 39.094092 | 118 | 0.684549 | false |
poulpito/Flexget | flexget/plugins/daemon/irc.py | 1 | 41096 | from __future__ import unicode_literals, division, absolute_import, with_statement
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
from future.moves.urllib.parse import quote
import os
import re
import threading
import logging
from xml.etree.ElementTree import parse
import io
from uuid import uuid4
import time
from datetime import datetime, timedelta
from flexget.entry import Entry
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.manager import manager
from flexget.config_schema import one_or_more
from flexget.utils import requests
from flexget.utils.tools import get_config_hash
try:
from irc_bot.irc_bot import IRCBot, partial
from irc_bot import irc_bot
except ImportError as e:
irc_bot = None
IRCBot = object
log = logging.getLogger('irc')
MESSAGE_CLEAN = re.compile("\x0f|\x1f|\x02|\x03(?:[\d]{1,2}(?:,[\d]{1,2})?)?", re.MULTILINE | re.UNICODE)
URL_MATCHER = re.compile(r'(https?://[\da-z\.-]+\.[a-z\.]{2,6}[/\w\.-\?&]*/?)', re.MULTILINE | re.UNICODE)
channel_pattern = {
'type': 'string', 'pattern': '^([#&][^\x07\x2C\s]{0,200})',
'error_pattern': 'channel name must start with # or & and contain no commas and whitespace'
}
schema = {
'oneOf': [
{
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'tracker_file': {'type': 'string'},
'server': {'type': 'string'},
'port': {'type': 'integer'},
'nickname': {'type': 'string'},
'channels': one_or_more(channel_pattern),
'nickserv_password': {'type': 'string'},
'invite_nickname': {'type': 'string'},
'invite_message': {'type': 'string'},
'task': one_or_more({
'type': 'string'
}),
'task_re': {
'type': 'object',
'additionalProperties': one_or_more({
'type': 'object',
'properties': {
'regexp': {'type': 'string'},
'field': {'type': 'string'}
},
'required': ['regexp', 'field'],
'additionalProperties': False
})
},
'queue_size': {'type': 'integer', 'default': 1},
'use_ssl': {'type': 'boolean', 'default': False},
'task_delay': {'type': 'integer'},
},
'anyOf': [
{'required': ['server', 'channels']},
{'required': ['tracker_file']}
],
'error_anyOf': 'Must specify a tracker file or server and channel(s)',
'oneOf': [
{'required': ['task']},
{'required': ['task_re']}
],
'error_oneOf': 'Must specify a task',
'required': ['port'],
'additionalProperties': {'type': 'string'},
}
},
{'type': 'boolean', 'enum': [False]},
]
}
# Global that holds all the IRCConnection instances
irc_connections = {}
# The manager object and thread
irc_manager = None
# To avoid having to restart the connections whenever the config updated event is fired (which is apparently a lot)
config_hash = {}
def create_thread(name, conn):
"""
Creates a new thread and starts it
:param conn: IRCConnection or IRCConnectionManager object
:return: Thread
"""
thread = threading.Thread(target=conn.start, name=name)
thread.setDaemon(True)
return thread
def irc_prefix(var):
"""
Prefix a string with the irc_
:param var: Variable to prefix
:return: Prefixed variable
"""
if isinstance(var, basestring):
return 'irc_%s' % var.lower()
def strip_whitespace(value):
"""
Remove leading and trailing whitespace from strings. Return value if not a string.
:param value:
:return: stripped string or value
"""
if isinstance(value, basestring):
return value.strip()
return value
class TrackerFileParseError(Exception):
"""Exception thrown when parsing the tracker file fails"""
class TrackerFileError(Exception):
"""Exception thrown when parsing the tracker file fails"""
class MissingConfigOption(Exception):
"""Exception thrown when a config option specified in the tracker file is not on the irc config"""
class IRCConnection(IRCBot):
def __init__(self, config, config_name):
self.config = config
self.connection_name = config_name
self.tracker_config = None
self.server_list = []
self.announcer_list = []
self.ignore_lines = []
self.message_regex = []
# If we have a tracker config file, load it
tracker_config_file = config.get('tracker_file')
if tracker_config_file:
self.tracker_config = self.retrieve_tracker_config(tracker_config_file)
channel_list = []
if self.tracker_config is not None:
# Validate config with the settings in the torrent file
for param in self.tracker_config.find('settings'):
# Handle textbox entries
if param.tag == 'textbox':
value_name = param.get('name')
else:
value_name = param.tag
# Strip the gazelle prefix
if value_name.startswith('gazelle_'):
value_name = value_name.replace('gazelle_', '')
# Skip descriptions
if 'description' in value_name:
continue
if self.config.get(value_name) is None:
raise MissingConfigOption('missing configuration option on irc config %s: %s' %
(self.connection_name, value_name))
# Get the tracker name, for use in the connection name
self.connection_name = self.tracker_config.get('longName', config_name)
# Extract the IRC server information
for server in self.tracker_config.find('servers'):
self.server_list.extend(server.get('serverNames').split(','))
channel_list.extend(server.get('channelNames').split(','))
self.announcer_list.extend(server.get('announcerNames').split(','))
# Process ignore lines
for regex_values in self.tracker_config.findall('parseinfo/ignore/regex'):
rx = re.compile(regex_values.get('value'), re.UNICODE | re.MULTILINE)
self.ignore_lines.append((rx, regex_values.get('expected') != 'false'))
# Parse patterns
self.multilinepatterns = self.parse_patterns(list(
self.tracker_config.findall('parseinfo/multilinepatterns/extract')))
self.linepatterns = self.parse_patterns(list(
self.tracker_config.findall('parseinfo/linepatterns/extract')))
# overwrite tracker config with flexget config
if self.config.get('server'):
self.server_list = [self.config['server']]
log.debug('Using server specified from config')
channels = config.get('channels')
if channels:
channel_list = channels if isinstance(channels, list) else [channels]
log.debug('Using channel(s) specified from config')
log.debug('Servers: %s', self.server_list)
log.debug('Channels: %s', channel_list)
log.debug('Announcers: %s', self.announcer_list)
log.debug('Ignore Lines: %d', len(self.ignore_lines))
log.debug('Message Regexs: %d', len(self.multilinepatterns) + len(self.linepatterns))
for rx, vals, optional in self.multilinepatterns:
msg = ' Multilinepattern "%s" extracts %s'
if optional:
msg += ' (optional)'
log.debug(msg, rx.pattern, vals)
for rx, vals, optional in self.linepatterns:
msg = ' Linepattern "%s" extracts %s'
if optional:
msg += ' (optional)'
log.debug(msg, rx.pattern, vals)
# Init the IRC Bot
ircbot_config = {'servers': self.server_list, 'port': config['port'], 'channels': channel_list,
'nickname': config.get('nickname', 'Flexget-%s' % str(uuid4())),
'invite_nickname': config.get('invite_nickname'),
'invite_message': config.get('invite_message'),
'nickserv_password': config.get('nickserv_password'),
'use_ssl': config.get('use_ssl')}
IRCBot.__init__(self, ircbot_config)
self.inject_before_shutdown = False
self.entry_queue = []
self.line_cache = {}
self.processing_message = False # if set to True, it means there's a message processing queued
self.thread = create_thread(self.connection_name, self)
@classmethod
def read_tracker_config(cls, path):
"""
Attempts to open and parse the .tracker file specified in path
:param path: path to .tracker file
:return: the parsed XML
"""
try:
tracker_config = parse(path).getroot()
except Exception as e:
raise TrackerFileParseError('Unable to parse tracker config file %s: %s' % (path, e))
else:
return tracker_config
@classmethod
def retrieve_tracker_config(cls, tracker_config_file):
"""
Will attempt to retrieve the .tracker file from disk or github. Returns the parsed XML.
:param tracker_config_file: URL or path to .tracker file
:return: parsed XML
"""
base_url = 'https://raw.githubusercontent.com/autodl-community/autodl-trackers/master/'
tracker_config_file = os.path.expanduser(tracker_config_file)
# First we attempt to find the file locally as-is
if os.path.exists(tracker_config_file):
log.debug('Found tracker file: %s', tracker_config_file)
return cls.read_tracker_config(tracker_config_file)
if not tracker_config_file.endswith('.tracker'):
tracker_config_file += '.tracker'
# Maybe the file is missing extension?
if os.path.exists(tracker_config_file):
log.debug('Found tracker file: %s', tracker_config_file)
return cls.read_tracker_config(tracker_config_file.rsplit('.tracker')[0])
# Check that containing dir exists, otherwise default to flexget_config_dir/trackers
if os.path.exists(os.path.dirname(tracker_config_file)):
base_dir = os.path.dirname(tracker_config_file)
else:
base_dir = os.path.abspath(os.path.join(manager.config_base, 'trackers'))
# Find the filenames for easy use later
tracker_name = os.path.basename(tracker_config_file)
tracker_name_no_ext = os.path.splitext(tracker_name)[0]
# One last try with case insensitive search!
if os.path.exists(base_dir):
files = os.listdir(base_dir)
for f in files:
if tracker_name_no_ext.lower() in f.lower():
found_path = os.path.join(base_dir, f)
log.debug('Found tracker file: %s', found_path)
return cls.read_tracker_config(found_path)
# Download from Github instead
if not os.path.exists(base_dir): # will only try to create the default `trackers` dir
try:
os.mkdir(base_dir)
except IOError as e:
raise TrackerFileError(e)
log.info('Tracker file not found on disk. Attempting to fetch tracker config file from Github.')
tracker = None
try:
tracker = requests.get(base_url + tracker_config_file)
except (requests.RequestException, IOError):
pass
if not tracker:
try:
log.debug('Trying to search list of tracker files on Github')
# Try to see if it's not found due to case sensitivity
trackers = requests.get('https://api.github.com/repos/autodl-community/'
'autodl-trackers/git/trees/master?recursive=1').json().get('tree', [])
for t in trackers:
name = t.get('path', '')
if not name.endswith('.tracker') or name.lower() != tracker_name.lower():
continue
tracker = requests.get(base_url + name)
tracker_name = name
break
except (requests.RequestException, IOError) as e:
raise TrackerFileError(e)
if not tracker:
raise TrackerFileError('Unable to find %s on disk or Github. Did you spell it correctly?' %
tracker_config_file)
# If we got this far, let's save our work :)
save_path = os.path.join(base_dir, tracker_name)
with io.open(save_path, 'wb') as tracker_file:
for chunk in tracker.iter_content(8192):
tracker_file.write(chunk)
return cls.read_tracker_config(save_path)
def is_alive(self):
return self.thread and self.thread.is_alive()
def parse_patterns(self, patterns):
"""
Parses the patterns and creates a tuple with the compiled regex pattern and the variables it produces
:param patterns: list of regex patterns as .tracker XML
:return: list of (regex, variables, optional)-pairs
"""
result = []
for pattern in patterns:
rx = re.compile(pattern.find('regex').get('value'), re.UNICODE | re.MULTILINE)
vals = [var.get('name') for idx, var in enumerate(pattern.find('vars'))]
optional = True if pattern.get('optional', 'false').lower() == 'true' else False
result.append((rx, vals, optional))
return result
def quit(self):
"""
Quit the IRC bot
:return:
"""
if self.inject_before_shutdown and self.entry_queue:
self.run_tasks()
IRCBot.quit(self)
def run_tasks(self):
"""
Passes entries to the target task(s) configured for this connection
:return:
"""
tasks = self.config.get('task')
tasks_re = self.config.get('task_re')
if tasks:
if isinstance(tasks, basestring):
tasks = [tasks]
log.debug('Injecting %d entries into tasks %s', len(self.entry_queue), ', '.join(tasks))
manager.execute(options={'tasks': tasks, 'cron': True, 'inject': self.entry_queue, 'allow_manual': True},
priority=5)
if tasks_re:
tasks_entry_map = {}
for entry in self.entry_queue:
matched = False
for task, config in tasks_re.items():
if isinstance(config, dict):
config = [config]
for c in config:
if re.search(c['regexp'], entry.get(c['field'], ''), re.IGNORECASE):
matched = True
if not tasks_entry_map.get(task):
tasks_entry_map[task] = []
tasks_entry_map[task].append(entry)
if not matched:
log.debug('Entry "%s" did not match any task regexp.', entry['title'])
for task, entries in tasks_entry_map.items():
log.debug('Injecting %d entries into task "%s"', len(entries), task)
manager.execute(options={'tasks': [task], 'cron': True, 'inject': entries, 'allow_manual': True},
priority=5)
self.entry_queue = []
def queue_entry(self, entry):
"""
Stores an entry in the connection entry queue, if the queue is over the size limit then submit them
:param entry: Entry to be queued
:return:
"""
self.entry_queue.append(entry)
log.debug('Entry: %s', entry)
if len(self.entry_queue) >= self.config['queue_size']:
if self.config.get('task_delay'):
self.schedule.queue_command(self.config['task_delay'], self.run_tasks, unique=False)
else:
self.run_tasks()
def match_message_patterns(self, patterns, msg):
"""
Tries to match the message to the list of patterns. Supports multiline messages.
:param patterns: list of (regex, variable)-pairs
:param msg: The parsed IRC message
:param multiline: True if msg is multiline
:return: A dict of the variables and their extracted values
"""
result = {}
for rx, vals, _ in patterns:
log.debug('Using pattern %s to parse message vars', rx.pattern)
match = rx.search(msg)
if match:
val_names = [irc_prefix(val.lower()) for val in vals]
val_values = [strip_whitespace(x) or '' for x in match.groups()]
result.update(dict(zip(val_names, val_values)))
log.debug('Found: %s', dict(zip(val_names, val_values)))
break
else:
log.debug('No matches found for %s in %s', rx.pattern, msg)
return result
def process_tracker_config_rules(self, entry, rules=None):
"""
Processes an Entry object with the linematched rules defined in a tracker config file
:param entry: Entry to be updated
:param rules: Ruleset to use.
:return:
"""
ignore_optionals = []
if rules is None:
rules = self.tracker_config.find('parseinfo/linematched')
# Make sure all irc fields from entry are in `fields`
fields = {key: val for key, val in entry.items() if key.startswith('irc_')}
for rule in rules:
log.debug('Processing rule %s' % rule.tag)
# Var - concat a var from other vars
if rule.tag == 'var':
result = ''
for element in rule:
if element.tag == 'string':
result += element.get('value')
elif element.tag in ['var', 'varenc']:
varname = element.get('name')
if irc_prefix(varname) in fields:
value = fields[irc_prefix(varname)]
elif self.config.get(varname):
value = self.config.get(varname)
else:
log.error('Missing variable %s from config, skipping rule', irc_prefix(varname))
break
if element.tag == 'varenc':
value = quote(value.encode('utf-8'))
result += value
else:
log.error('Unsupported var operation %s, skipping rule', element.tag)
break
else:
# Only set the result if we processed all elements
log.debug('Result for rule %s: %s=%s', rule.tag, rule.get('name'), result)
fields[irc_prefix(rule.get('name'))] = result
# Var Replace - replace text in a var
elif rule.tag == 'varreplace':
source_var = irc_prefix(rule.get('srcvar'))
target_var = irc_prefix(rule.get('name'))
regex = rule.get('regex')
replace = rule.get('replace')
if source_var and target_var and regex is not None and replace is not None and source_var in fields:
fields[target_var] = re.sub(regex, replace, fields[source_var])
log.debug('varreplace: %s=%s', target_var, fields[target_var])
else:
log.error('Invalid varreplace options, skipping rule')
# Extract - create multiple vars from a single regex
elif rule.tag == 'extract':
source_var = irc_prefix(rule.get('srcvar'))
if source_var not in fields:
if rule.get('optional', 'false') == 'false':
log.error('Error processing extract rule, non-optional value %s missing!', source_var)
ignore_optionals.append(source_var)
continue
if rule.find('regex') is not None:
regex = rule.find('regex').get('value')
else:
log.error('Regex option missing on extract rule, skipping rule')
continue
group_names = [irc_prefix(x.get('name')) for x in rule.find('vars') if x.tag == 'var']
match = re.search(regex, fields[source_var])
if match:
fields.update(dict(zip(group_names, match.groups())))
else:
log.debug('No match found for rule extract')
# Extract Tag - set a var if a regex matches a tag in a var
elif rule.tag == 'extracttags':
source_var = irc_prefix(rule.get('srcvar'))
split = rule.get('split')
if source_var in ignore_optionals:
continue
values = [strip_whitespace(x) for x in fields[source_var].split(split)]
for element in rule:
if element.tag == 'setvarif':
target_var = irc_prefix(element.get('varName'))
regex = element.get('regex')
value = element.get('value')
new_value = element.get('newValue')
if regex is not None:
found_match = False
for val in values:
match = re.match(regex, val)
if match:
fields[target_var] = val
found_match = True
if not found_match:
log.debug('No matches found for regex %s', regex)
elif value is not None and new_value is not None:
if value in values:
fields[target_var] = new_value
else:
log.debug('No match found for value %s in %s', value, source_var)
else:
log.error('Missing regex/value/newValue for setvarif command, ignoring')
# Extract One - extract one var from a list of regexes
elif rule.tag == 'extractone':
for element in rule:
if element.tag == 'extract':
source_var = irc_prefix(element.get('srcvar'))
if element.find('regex') is not None:
regex = element.find('regex').get('value')
else:
log.error('Regex option missing on extract rule, skipping.')
continue
if element.find('vars') is not None:
vars = [irc_prefix(var.get('name')) for var in element.find('vars')]
else:
log.error('No variable bindings found in extract rule, skipping.')
continue
match = re.match(regex, fields.get(source_var, ''))
if match:
fields.update(dict(zip(vars, match.groups())))
else:
log.debug('No match for extract with regex: %s', regex)
else:
log.error('Unsupported extractone tag: %s', element.tag)
# Set Regex - set a var if a regex matches
elif rule.tag == 'setregex':
source_var = irc_prefix(rule.get('srcvar'))
regex = rule.get('regex')
target_var = irc_prefix(rule.get('varName'))
target_val = rule.get('newValue')
if source_var and regex and target_var and target_val:
if source_var in fields and re.search(regex, fields[source_var]):
fields[target_var] = target_val
else:
log.error('Option missing on setregex, skipping rule')
# If statement
elif rule.tag == 'if':
source_var = irc_prefix(rule.get('srcvar'))
regex = rule.get('regex')
if source_var and regex:
if source_var in fields and re.match(regex, fields[source_var]):
fields.update(self.process_tracker_config_rules(fields, rule))
else:
log.error('Option missing for if statement, skipping rule')
else:
log.warning('Unsupported linematched tag: %s', rule.tag)
return fields
def on_privmsg(self, msg):
"""
Appends messages for the specific channel in the line cache. Schedules a message processing after 1s to
handle multiline announcements.
:param msg: IRCMessage object
:return:
"""
nickname = msg.from_nick
channel = msg.arguments[0]
if not irc_bot.is_channel(channel):
log.debug('Received msg is not a channel msg: %s', msg)
return
# set some defaults
self.line_cache.setdefault(channel, {})
self.line_cache[channel].setdefault(nickname, [])
self.line_cache[channel][nickname].append(msg.arguments[1])
if not self.processing_message:
# Schedule a parse of the message in 1 second (for multilines)
self.schedule.queue_command(1, partial(self.process_message, nickname, channel))
self.processing_message = True
def process_message(self, nickname, channel):
"""
Pops lines from the line cache and passes them to be parsed
:param str nickname: Nickname of who sent the message
:param str channel: Channel where the message originated from
:return: None
"""
# If we have announcers defined, ignore any messages not from them
if self.announcer_list and nickname not in self.announcer_list:
log.debug('Ignoring message: from non-announcer %s', nickname)
return
# Clean up the messages
lines = [MESSAGE_CLEAN.sub('', line) for line in self.line_cache[channel][nickname]]
log.debug('Received line(s): %s', u'\n'.join(lines))
# Generate some entries
if self.linepatterns:
entries = self.entries_from_linepatterns(lines)
elif self.multilinepatterns:
entries, lines = self.entries_from_multilinepatterns(lines)
else:
entries = self.entries_from_lines(lines)
for entry in entries:
# Process the generated entry through the linematched rules
if self.tracker_config is not None and entry:
entry.update(self.process_tracker_config_rules(entry))
elif self.tracker_config is not None:
log.error('Failed to parse message(s).')
return
entry['title'] = entry.get('irc_torrentname')
entry['url'] = entry.get('irc_torrenturl')
log.debug('Entry after processing: %s', dict(entry))
if not entry['url'] or not entry['title']:
log.error('Parsing message failed. Title=%s, url=%s.', entry['title'], entry['url'])
continue
log.verbose('IRC message in %s generated an entry: %s', channel, entry)
self.queue_entry(entry)
# reset the line cache
if self.multilinepatterns and lines:
self.line_cache[channel][nickname] = lines
log.debug('Left over lines: %s', '\n'.join(lines))
else:
self.line_cache[channel][nickname] = []
self.processing_message = False
def entries_from_linepatterns(self, lines):
"""
:param lines: list of lines from irc
:return list: list of entries generated from lines
"""
entries = []
for line in lines:
# If it's listed in ignore lines, skip it
for rx, expected in self.ignore_lines:
if rx.match(line) and expected:
log.debug('Ignoring message: matched ignore line')
continue
entry = Entry(irc_raw_message=line)
match = self.match_message_patterns(self.linepatterns, line)
# Generate the entry and process it through the linematched rules
if not match:
log.error('Failed to parse message. Skipping.')
continue
entry.update(match)
entries.append(entry)
return entries
def entries_from_multilinepatterns(self, lines):
"""
:param lines: list of lines
:return list: list of entries generated from lines
"""
entries = []
rest = [] # contains the rest of the lines
while len(lines) > 0:
entry = Entry()
raw_message = ''
matched_lines = []
for idx, (rx, vals, optional) in enumerate(self.multilinepatterns):
log.debug('Using pattern %s to parse message vars', rx.pattern)
# find the next candidate line
line = ''
for l in list(lines):
# skip ignored lines
for ignore_rx, expected in self.ignore_lines:
if ignore_rx.match(l) and expected:
log.debug('Ignoring message: matched ignore line')
lines.remove(l)
break
else:
line = l
break
raw_message += '\n' + line
match = self.match_message_patterns([(rx, vals, optional)], line)
if match:
entry.update(match)
matched_lines.append(line)
lines.remove(line)
elif optional:
log.debug('No match for optional extract pattern found.')
elif not line:
rest = matched_lines + lines
break
elif idx == 0: # if it's the first regex that fails, then it's probably just garbage
log.error('No matches found for pattern %s', rx.pattern)
lines.remove(line)
rest = lines
break
else:
log.error('No matches found for pattern %s', rx.pattern)
rest = lines
break
else:
entry['irc_raw_message'] = raw_message
entries.append(entry)
continue
return entries, rest
def entries_from_lines(self, lines):
"""
:param lines: list of lines
:return list: list of entries generated from lines
"""
entries = []
for line in lines:
entry = Entry(irc_raw_message=line)
# Use the message as title
entry['title'] = line
# find a url...
url_match = URL_MATCHER.findall(line)
if url_match:
# We have a URL(s)!, generate an entry
urls = list(url_match)
url = urls[-1]
entry.update({
'urls': urls,
'url': url,
})
if not entry.get('url'):
log.error('Parsing message failed. No url found.')
continue
entries.append(entry)
return entries
def is_connected(self):
return self.connected
def stop(self, wait):
if self.is_connected() and wait:
self.inject_before_shutdown = True
self.quit()
class IRCConnectionManager(object):
def __init__(self, config):
self.config = config
self.shutdown_event = threading.Event()
self.wait = False
self.delay = 30
self.thread = create_thread('irc_manager', self)
self.thread.start()
def is_alive(self):
return self.thread and self.thread.is_alive()
def start(self):
"""
Checks for dead threads and attempts to restart them. If the connection appears to be throttled, it won't
attempt to reconnect for 30s.
:return:
"""
global irc_connections
self.start_connections()
schedule = {} # used to keep track of reconnection schedules
while not self.shutdown_event.is_set():
for conn_name, conn in irc_connections.items():
# Don't want to revive if connection was closed cleanly
if not conn.running:
continue
now = datetime.now()
# Attempt to revive the thread if it has died. conn.running will be True if it died unexpectedly.
if not conn and self.config.get(conn_name):
try:
self.restart_connection(conn_name, self.config[conn_name])
except IOError as e:
log.error(e)
elif not conn.is_alive() and conn.running:
if conn_name not in schedule:
schedule[conn_name] = now + timedelta(seconds=5)
# add extra time if throttled
if conn.throttled:
schedule[conn_name] += timedelta(seconds=self.delay)
# is it time yet?
if schedule[conn_name] <= now:
log.error('IRC connection for %s has died unexpectedly. Restarting it.', conn_name)
try:
self.restart_connection(conn_name, conn.config)
except IOError as e:
log.error(e)
# remove it from the schedule
del schedule[conn_name]
time.sleep(1)
self.stop_connections(self.wait)
irc_connections = {}
def restart_connections(self, name=None):
if name:
self.restart_connection(name)
else:
for name, connection in irc_connections.items():
self.restart_connection(name, connection.config)
def restart_connection(self, name, config=None):
if not config:
config = irc_connections[name].config
if irc_connections[name].is_alive():
self.stop_connection(name)
irc_connections[name] = IRCConnection(config, name)
irc_connections[name].thread.start()
def start_connections(self):
"""
Start all the irc connections. Stop the daemon if there are failures.
:return:
"""
# First we validate the config for all connections including their .tracker files
for conn_name, config in self.config.items():
try:
log.info('Starting IRC connection for %s', conn_name)
conn = IRCConnection(config, conn_name)
irc_connections[conn_name] = conn
config_hash['names'][conn_name] = get_config_hash(config)
except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e:
log.error(e)
if conn_name in irc_connections:
del irc_connections[conn_name] # remove it from the list of connections
# Now we can start
for conn_name, connection in irc_connections.items():
connection.thread.start()
def stop_connections(self, wait, name=None):
if name:
self.stop_connection(name, wait)
else:
for name in irc_connections.keys():
self.stop_connection(name, wait)
def stop_connection(self, name, wait=False):
if irc_connections[name].is_alive():
irc_connections[name].stop(wait)
irc_connections[name].thread.join(11)
def stop(self, wait):
self.wait = wait
self.shutdown_event.set()
def status(self, name=None):
status = []
if name:
if name not in irc_connections:
raise ValueError('%s is not a valid irc connection' % name)
else:
status.append(self.status_dict(name))
else:
for n in irc_connections.keys():
status.append(self.status_dict(n))
return status
def status_dict(self, name):
status = {name: {}}
connection = irc_connections[name]
status[name]['alive'] = connection.is_alive()
status[name]['channels'] = [{key: value} for key, value in connection.channels.items()]
status[name]['connected_channels'] = connection.connected_channels
status[name]['server'] = connection.servers[0]
status[name]['port'] = connection.port
return status
def update_config(self, config):
new_irc_connections = {}
removed_connections = set(self.config.keys()) - set(config.keys())
for name, conf in config.items():
hash = get_config_hash(conf)
if name in self.config and config_hash['names'].get(name) == hash:
continue
try:
new_irc_connections[name] = IRCConnection(conf, name)
config_hash['names'][name] = hash
except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e:
log.error('Failed to update config. Error when updating %s: %s', name, e)
return
# stop connections that have been removed from config
for name in removed_connections:
self.stop_connection(name)
del irc_connections[name]
# and (re)start the new ones
for name, connection in new_irc_connections.items():
if name in irc_connections:
self.stop_connection(name)
irc_connections[name] = connection
connection.thread.start()
self.config = config
@event('manager.daemon.started')
def irc_start(manager):
irc_update_config(manager)
@event('manager.config_updated')
def irc_update_config(manager):
global irc_manager, config_hash
# Exit if we're not running daemon mode
if not manager.is_daemon:
return
config = manager.config.get('irc')
# No config, no connections
if not config:
log.debug('No irc connections defined in the config')
stop_irc(manager)
return
if irc_bot is None:
log.error('ImportError: irc_bot module not found. Shutting down daemon.')
stop_irc(manager)
manager.shutdown(finish_queue=False)
return
config_hash.setdefault('names', {})
new_config_hash = get_config_hash(config)
if config_hash.get('config') == new_config_hash:
log.verbose('IRC config has not been changed. Not reloading any connections.')
return
config_hash['manager'] = new_config_hash
if irc_manager is not None and irc_manager.is_alive():
irc_manager.update_config(config)
else:
irc_manager = IRCConnectionManager(config)
@event('manager.shutdown_requested')
def shutdown_requested(manager):
stop_irc(manager, wait=True)
@event('manager.shutdown')
def stop_irc(manager, wait=False):
if irc_manager is not None and irc_manager.is_alive():
log.info('Shutting down IRC.')
irc_manager.stop(wait)
# this check is necessary for when the irc manager is the one shutting down the daemon
# a thread can't join itself
if not threading.current_thread() == irc_manager.thread:
# It's important to give the threads time to shut down to avoid socket issues later (eg. quick restart)
irc_manager.thread.join(len(irc_connections.keys()) * 11)
@event('config.register')
def register_plugin():
register_config_key('irc', schema)
| mit | -8,097,727,778,710,655,000 | 39.448819 | 117 | 0.540515 | false |
jarrodmcc/OpenFermion | src/openfermion/utils/_sparse_tools_test.py | 1 | 55877 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sparse_tools.py."""
from __future__ import absolute_import, division
import numpy
import unittest
from numpy.linalg import multi_dot
from scipy.linalg import eigh, norm
from scipy.sparse import csc_matrix
from scipy.special import comb
from openfermion.hamiltonians import (fermi_hubbard, jellium_model,
wigner_seitz_length_scale)
from openfermion.ops import FermionOperator, up_index, down_index
from openfermion.transforms import (get_fermion_operator, get_sparse_operator,
jordan_wigner)
from openfermion.utils import (
Grid, fourier_transform, normal_ordered, number_operator)
from openfermion.utils._jellium_hf_state import (
lowest_single_particle_energy_states)
from openfermion.utils._linear_qubit_operator import LinearQubitOperator
from openfermion.utils._slater_determinants_test import (
random_quadratic_hamiltonian)
from openfermion.utils._sparse_tools import *
class SparseOperatorTest(unittest.TestCase):
def test_kronecker_operators(self):
self.assertAlmostEqual(
0, numpy.amax(numpy.absolute(
kronecker_operators(3 * [identity_csc]) -
kronecker_operators(3 * [pauli_x_csc]) ** 2)))
def test_qubit_jw_fermion_integration(self):
# Initialize a random fermionic operator.
fermion_operator = FermionOperator(((3, 1), (2, 1), (1, 0), (0, 0)),
-4.3)
fermion_operator += FermionOperator(((3, 1), (1, 0)), 8.17)
fermion_operator += 3.2 * FermionOperator()
# Map to qubits and compare matrix versions.
qubit_operator = jordan_wigner(fermion_operator)
qubit_sparse = get_sparse_operator(qubit_operator)
qubit_spectrum = sparse_eigenspectrum(qubit_sparse)
fermion_sparse = jordan_wigner_sparse(fermion_operator)
fermion_spectrum = sparse_eigenspectrum(fermion_sparse)
self.assertAlmostEqual(0., numpy.amax(
numpy.absolute(fermion_spectrum - qubit_spectrum)))
class JordanWignerSparseTest(unittest.TestCase):
def test_jw_sparse_0create(self):
expected = csc_matrix(([1], ([1], [0])), shape=(2, 2))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^')).A,
expected.A))
def test_jw_sparse_1annihilate(self):
expected = csc_matrix(([1, -1], ([0, 2], [1, 3])), shape=(4, 4))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('1')).A,
expected.A))
def test_jw_sparse_0create_2annihilate(self):
expected = csc_matrix(([-1j, 1j],
([4, 6], [1, 3])),
shape=(8, 8))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^ 2', -1j)).A,
expected.A))
def test_jw_sparse_0create_3annihilate(self):
expected = csc_matrix(([-1j, 1j, 1j, -1j],
([8, 10, 12, 14], [1, 3, 5, 7])),
shape=(16, 16))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^ 3', -1j)).A,
expected.A))
def test_jw_sparse_twobody(self):
expected = csc_matrix(([1, 1], ([6, 14], [5, 13])), shape=(16, 16))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('2^ 1^ 1 3')).A,
expected.A))
def test_qubit_operator_sparse_n_qubits_too_small(self):
with self.assertRaises(ValueError):
qubit_operator_sparse(QubitOperator('X3'), 1)
def test_qubit_operator_sparse_n_qubits_not_specified(self):
expected = csc_matrix(([1, 1, 1, 1], ([1, 0, 3, 2], [0, 1, 2, 3])),
shape=(4, 4))
self.assertTrue(numpy.allclose(
qubit_operator_sparse(QubitOperator('X1')).A,
expected.A))
def test_get_linear_qubit_operator_diagonal_wrong_n(self):
"""Testing with wrong n_qubits."""
with self.assertRaises(ValueError):
get_linear_qubit_operator_diagonal(QubitOperator('X3'), 1)
def test_get_linear_qubit_operator_diagonal_0(self):
"""Testing with zero term."""
qubit_operator = QubitOperator.zero()
vec_expected = numpy.zeros(8)
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator, 3), vec_expected))
def test_get_linear_qubit_operator_diagonal_zero(self):
"""Get zero diagonals from get_linear_qubit_operator_diagonal."""
qubit_operator = QubitOperator('X0 Y1')
vec_expected = numpy.zeros(4)
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_non_zero(self):
"""Get non zero diagonals from get_linear_qubit_operator_diagonal."""
qubit_operator = QubitOperator('Z0 Z2')
vec_expected = numpy.array([1, -1, 1, -1, -1, 1, -1, 1])
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_cmp_zero(self):
"""Compare get_linear_qubit_operator_diagonal with
get_linear_qubit_operator."""
qubit_operator = QubitOperator('Z1 X2 Y5')
vec_expected = numpy.diag(LinearQubitOperator(qubit_operator) *
numpy.eye(2 ** 6))
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_cmp_non_zero(self):
"""Compare get_linear_qubit_operator_diagonal with
get_linear_qubit_operator."""
qubit_operator = QubitOperator('Z1 Z2 Z5')
vec_expected = numpy.diag(LinearQubitOperator(qubit_operator) *
numpy.eye(2 ** 6))
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
class ComputationalBasisStateTest(unittest.TestCase):
def test_computational_basis_state(self):
comp_basis_state = jw_configuration_state([0, 2, 5], 7)
self.assertAlmostEqual(comp_basis_state[82], 1.)
self.assertAlmostEqual(sum(comp_basis_state), 1.)
class JWHartreeFockStateTest(unittest.TestCase):
def test_jw_hartree_fock_state(self):
hartree_fock_state = jw_hartree_fock_state(3, 7)
self.assertAlmostEqual(hartree_fock_state[112], 1.)
self.assertAlmostEqual(sum(hartree_fock_state), 1.)
class JWNumberIndicesTest(unittest.TestCase):
def test_jw_sparse_index(self):
"""Test the indexing scheme for selecting specific particle numbers"""
expected = [1, 2]
calculated_indices = jw_number_indices(1, 2)
self.assertEqual(expected, calculated_indices)
expected = [3]
calculated_indices = jw_number_indices(2, 2)
self.assertEqual(expected, calculated_indices)
def test_jw_number_indices(self):
n_qubits = numpy.random.randint(1, 12)
n_particles = numpy.random.randint(n_qubits + 1)
number_indices = jw_number_indices(n_particles, n_qubits)
subspace_dimension = len(number_indices)
self.assertEqual(subspace_dimension, comb(n_qubits, n_particles))
for index in number_indices:
binary_string = bin(index)[2:].zfill(n_qubits)
n_ones = binary_string.count('1')
self.assertEqual(n_ones, n_particles)
class JWSzIndicesTest(unittest.TestCase):
def test_jw_sz_indices(self):
"""Test the indexing scheme for selecting specific sz value"""
def sz_integer(bitstring):
"""Computes the total number of occupied up sites
minus the total number of occupied down sites."""
n_sites = len(bitstring) // 2
n_up = len([site for site in range(n_sites)
if bitstring[up_index(site)] == '1'])
n_down = len([site for site in range(n_sites)
if bitstring[down_index(site)] == '1'])
return n_up - n_down
def jw_sz_indices_brute_force(sz_value, n_qubits):
"""Computes the correct indices by brute force."""
indices = []
for bitstring in itertools.product(['0', '1'], repeat=n_qubits):
if (sz_integer(bitstring) ==
int(2 * sz_value)):
indices.append(int(''.join(bitstring), 2))
return indices
# General test
n_sites = numpy.random.randint(1, 10)
n_qubits = 2 * n_sites
sz_int = ((-1) ** numpy.random.randint(2) *
numpy.random.randint(n_sites + 1))
sz_value = sz_int / 2.
correct_indices = jw_sz_indices_brute_force(sz_value, n_qubits)
subspace_dimension = len(correct_indices)
calculated_indices = jw_sz_indices(sz_value, n_qubits)
self.assertEqual(len(calculated_indices), subspace_dimension)
for index in calculated_indices:
binary_string = bin(index)[2:].zfill(n_qubits)
self.assertEqual(sz_integer(binary_string), sz_int)
# Test fixing particle number
n_particles = abs(sz_int)
correct_indices = [index for index in correct_indices
if bin(index)[2:].count('1') == n_particles]
subspace_dimension = len(correct_indices)
calculated_indices = jw_sz_indices(sz_value, n_qubits,
n_electrons=n_particles)
self.assertEqual(len(calculated_indices), subspace_dimension)
for index in calculated_indices:
binary_string = bin(index)[2:].zfill(n_qubits)
self.assertEqual(sz_integer(binary_string), sz_int)
self.assertEqual(binary_string.count('1'), n_particles)
# Test exceptions
with self.assertRaises(ValueError):
indices = jw_sz_indices(3, 3)
with self.assertRaises(ValueError):
indices = jw_sz_indices(3.1, 4)
with self.assertRaises(ValueError):
indices = jw_sz_indices(1.5, 8, n_electrons=6)
with self.assertRaises(ValueError):
indices = jw_sz_indices(1.5, 8, n_electrons=1)
class JWNumberRestrictOperatorTest(unittest.TestCase):
def test_jw_restrict_operator(self):
"""Test the scheme for restricting JW encoded operators to number"""
# Make a Hamiltonian that cares mostly about number of electrons
n_qubits = 6
target_electrons = 3
penalty_const = 100.
number_sparse = jordan_wigner_sparse(number_operator(n_qubits))
bias_sparse = jordan_wigner_sparse(
sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i
in range(n_qubits)], FermionOperator()))
hamiltonian_sparse = penalty_const * (
number_sparse - target_electrons *
scipy.sparse.identity(2**n_qubits)).dot(
number_sparse - target_electrons *
scipy.sparse.identity(2**n_qubits)) + bias_sparse
restricted_hamiltonian = jw_number_restrict_operator(
hamiltonian_sparse, target_electrons, n_qubits)
true_eigvals, _ = eigh(hamiltonian_sparse.A)
test_eigvals, _ = eigh(restricted_hamiltonian.A)
self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]),
0.0)
def test_jw_restrict_operator_hopping_to_1_particle(self):
hop = FermionOperator('3^ 1') + FermionOperator('1^ 3')
hop_sparse = jordan_wigner_sparse(hop, n_qubits=4)
hop_restrict = jw_number_restrict_operator(hop_sparse, 1, n_qubits=4)
expected = csc_matrix(([1, 1], ([0, 2], [2, 0])), shape=(4, 4))
self.assertTrue(numpy.allclose(hop_restrict.A, expected.A))
def test_jw_restrict_operator_interaction_to_1_particle(self):
interaction = FermionOperator('3^ 2^ 4 1')
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(
interaction_sparse, 1, n_qubits=6)
expected = csc_matrix(([], ([], [])), shape=(6, 6))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_operator_interaction_to_2_particles(self):
interaction = (FermionOperator('3^ 2^ 4 1') +
FermionOperator('4^ 1^ 3 2'))
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(
interaction_sparse, 2, n_qubits=6)
dim = 6 * 5 // 2 # shape of new sparse array
# 3^ 2^ 4 1 maps 2**4 + 2 = 18 to 2**3 + 2**2 = 12 and vice versa;
# in the 2-particle subspace (1, 4) and (2, 3) are 7th and 9th.
expected = csc_matrix(([-1, -1], ([7, 9], [9, 7])), shape=(dim, dim))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_operator_hopping_to_1_particle_default_nqubits(self):
interaction = (FermionOperator('3^ 2^ 4 1') +
FermionOperator('4^ 1^ 3 2'))
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
# n_qubits should default to 6
interaction_restrict = jw_number_restrict_operator(
interaction_sparse, 2)
dim = 6 * 5 // 2 # shape of new sparse array
# 3^ 2^ 4 1 maps 2**4 + 2 = 18 to 2**3 + 2**2 = 12 and vice versa;
# in the 2-particle subspace (1, 4) and (2, 3) are 7th and 9th.
expected = csc_matrix(([-1, -1], ([7, 9], [9, 7])), shape=(dim, dim))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_jellium_ground_state_integration(self):
n_qubits = 4
grid = Grid(dimensions=1, length=n_qubits, scale=1.0)
jellium_hamiltonian = jordan_wigner_sparse(
jellium_model(grid, spinless=False))
# 2 * n_qubits because of spin
number_sparse = jordan_wigner_sparse(number_operator(2 * n_qubits))
restricted_number = jw_number_restrict_operator(number_sparse, 2)
restricted_jellium_hamiltonian = jw_number_restrict_operator(
jellium_hamiltonian, 2)
energy, ground_state = get_ground_state(restricted_jellium_hamiltonian)
number_expectation = expectation(restricted_number, ground_state)
self.assertAlmostEqual(number_expectation, 2)
class JWSzRestrictOperatorTest(unittest.TestCase):
def test_restrict_interaction_hamiltonian(self):
"""Test restricting a coulomb repulsion Hamiltonian to a specified
Sz manifold."""
x_dim = 3
y_dim = 2
interaction_term = fermi_hubbard(x_dim, y_dim, 0., 1.)
interaction_sparse = get_sparse_operator(interaction_term)
sz_value = 2
interaction_restricted = jw_sz_restrict_operator(interaction_sparse,
sz_value)
restricted_interaction_values = set([
int(value.real) for value in interaction_restricted.diagonal()])
# Originally the eigenvalues run from 0 to 6 but after restricting,
# they should run from 0 to 2
self.assertEqual(restricted_interaction_values, {0, 1, 2})
class JWNumberRestrictStateTest(unittest.TestCase):
def test_jw_number_restrict_state(self):
n_qubits = numpy.random.randint(1, 12)
n_particles = numpy.random.randint(0, n_qubits)
number_indices = jw_number_indices(n_particles, n_qubits)
subspace_dimension = len(number_indices)
# Create a vector that has entry 1 for every coordinate with
# the specified particle number, and 0 everywhere else
vector = numpy.zeros(2**n_qubits, dtype=float)
vector[number_indices] = 1
# Restrict the vector
restricted_vector = jw_number_restrict_state(vector, n_particles)
# Check that it has the correct shape
self.assertEqual(restricted_vector.shape[0], subspace_dimension)
# Check that it has the same norm as the original vector
self.assertAlmostEqual(inner_product(vector, vector),
inner_product(restricted_vector,
restricted_vector))
class JWSzRestrictStateTest(unittest.TestCase):
def test_jw_sz_restrict_state(self):
n_sites = numpy.random.randint(1, 10)
n_qubits = 2 * n_sites
sz_int = ((-1) ** numpy.random.randint(2) *
numpy.random.randint(n_sites + 1))
sz_value = sz_int / 2
sz_indices = jw_sz_indices(sz_value, n_qubits)
subspace_dimension = len(sz_indices)
# Create a vector that has entry 1 for every coordinate in
# the specified subspace, and 0 everywhere else
vector = numpy.zeros(2**n_qubits, dtype=float)
vector[sz_indices] = 1
# Restrict the vector
restricted_vector = jw_sz_restrict_state(vector, sz_value)
# Check that it has the correct shape
self.assertEqual(restricted_vector.shape[0], subspace_dimension)
# Check that it has the same norm as the original vector
self.assertAlmostEqual(inner_product(vector, vector),
inner_product(restricted_vector,
restricted_vector))
class JWGetGroundStatesByParticleNumberTest(unittest.TestCase):
def test_jw_get_ground_state_at_particle_number_herm_conserving(self):
# Initialize a particle-number-conserving Hermitian operator
ferm_op = FermionOperator('0^ 1') + FermionOperator('1^ 0') + \
FermionOperator('1^ 2') + FermionOperator('2^ 1') + \
FermionOperator('1^ 3', -.4) + FermionOperator('3^ 1', -.4)
jw_hamiltonian = jordan_wigner(ferm_op)
sparse_operator = get_sparse_operator(jw_hamiltonian)
n_qubits = 4
num_op = get_sparse_operator(number_operator(n_qubits))
# Test each possible particle number
for particle_number in range(n_qubits):
# Get the ground energy and ground state at this particle number
energy, state = jw_get_ground_state_at_particle_number(
sparse_operator, particle_number)
# Check that it's an eigenvector with the correct eigenvalue
self.assertTrue(
numpy.allclose(sparse_operator.dot(state), energy * state))
# Check that it has the correct particle number
num = expectation(num_op, state)
self.assertAlmostEqual(num, particle_number)
def test_jw_get_ground_state_at_particle_number_hubbard(self):
model = fermi_hubbard(2, 2, 1.0, 4.0)
sparse_operator = get_sparse_operator(model)
n_qubits = count_qubits(model)
num_op = get_sparse_operator(number_operator(n_qubits))
# Test each possible particle number
for particle_number in range(n_qubits):
# Get the ground energy and ground state at this particle number
energy, state = jw_get_ground_state_at_particle_number(
sparse_operator, particle_number)
# Check that it's an eigenvector with the correct eigenvalue
self.assertTrue(
numpy.allclose(sparse_operator.dot(state), energy * state))
# Check that it has the correct particle number
num = expectation(num_op, state)
self.assertAlmostEqual(num, particle_number)
def test_jw_get_ground_state_at_particle_number_jellium(self):
grid = Grid(2, 2, 1.0)
model = jellium_model(grid, spinless=True, plane_wave=False)
sparse_operator = get_sparse_operator(model)
n_qubits = count_qubits(model)
num_op = get_sparse_operator(number_operator(n_qubits))
# Test each possible particle number
for particle_number in range(n_qubits):
# Get the ground energy and ground state at this particle number
energy, state = jw_get_ground_state_at_particle_number(
sparse_operator, particle_number)
# Check that it's an eigenvector with the correct eigenvalue
self.assertTrue(
numpy.allclose(sparse_operator.dot(state), energy * state))
# Check that it has the correct particle number
num = expectation(num_op, state)
self.assertAlmostEqual(num, particle_number)
class JWGetGaussianStateTest(unittest.TestCase):
def setUp(self):
self.n_qubits_range = range(2, 10)
def test_ground_state_particle_conserving(self):
"""Test getting the ground state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, True)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, ground_state = get_ground_state(sparse_operator)
# Compute the ground state using the circuit
circuit_energy, circuit_state = jw_get_gaussian_state(
quadratic_hamiltonian)
# Check that the energies match
self.assertAlmostEqual(ground_energy, circuit_energy)
# Check that the state obtained using the circuit is a ground state
difference = (sparse_operator * circuit_state -
ground_energy * circuit_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_ground_state_particle_nonconserving(self):
"""Test getting the ground state of a Hamiltonian that does not
conserve particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a non-particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, ground_state = get_ground_state(sparse_operator)
# Compute the ground state using the circuit
circuit_energy, circuit_state = (
jw_get_gaussian_state(quadratic_hamiltonian))
# Check that the energies match
self.assertAlmostEqual(ground_energy, circuit_energy)
# Check that the state obtained using the circuit is a ground state
difference = (sparse_operator * circuit_state -
ground_energy * circuit_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_excited_state_particle_conserving(self):
"""Test getting an excited state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, True)
# Pick some orbitals to occupy
num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1)
occupied_orbitals = numpy.random.choice(
range(n_qubits), num_occupied_orbitals, False)
# Compute the Gaussian state
circuit_energy, gaussian_state = jw_get_gaussian_state(
quadratic_hamiltonian, occupied_orbitals)
# Compute the true energy
orbital_energies, constant = (
quadratic_hamiltonian.orbital_energies())
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Check that the energies match
self.assertAlmostEqual(energy, circuit_energy)
# Check that the state obtained using the circuit is an eigenstate
# with the correct eigenvalue
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
difference = (sparse_operator * gaussian_state -
energy * gaussian_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_excited_state_particle_nonconserving(self):
"""Test getting an excited state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a non-particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False)
# Pick some orbitals to occupy
num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1)
occupied_orbitals = numpy.random.choice(
range(n_qubits), num_occupied_orbitals, False)
# Compute the Gaussian state
circuit_energy, gaussian_state = jw_get_gaussian_state(
quadratic_hamiltonian, occupied_orbitals)
# Compute the true energy
orbital_energies, constant = (
quadratic_hamiltonian.orbital_energies())
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Check that the energies match
self.assertAlmostEqual(energy, circuit_energy)
# Check that the state obtained using the circuit is an eigenstate
# with the correct eigenvalue
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
difference = (sparse_operator * gaussian_state -
energy * gaussian_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_bad_input(self):
"""Test bad input."""
with self.assertRaises(ValueError):
energy, state = jw_get_gaussian_state('a')
class JWSparseGivensRotationTest(unittest.TestCase):
def test_bad_input(self):
with self.assertRaises(ValueError):
givens_matrix = jw_sparse_givens_rotation(0, 2, 1., 1., 5)
with self.assertRaises(ValueError):
givens_matrix = jw_sparse_givens_rotation(4, 5, 1., 1., 5)
class JWSlaterDeterminantTest(unittest.TestCase):
def test_hadamard_transform(self):
r"""Test creating the states
1 / sqrt(2) (a^\dagger_0 + a^\dagger_1) |vac>
and
1 / sqrt(2) (a^\dagger_0 - a^\dagger_1) |vac>.
"""
slater_determinant_matrix = numpy.array([[1., 1.]]) / numpy.sqrt(2.)
slater_determinant = jw_slater_determinant(slater_determinant_matrix)
self.assertAlmostEqual(slater_determinant[1],
slater_determinant[2])
self.assertAlmostEqual(abs(slater_determinant[1]),
1. / numpy.sqrt(2.))
self.assertAlmostEqual(abs(slater_determinant[0]), 0.)
self.assertAlmostEqual(abs(slater_determinant[3]), 0.)
slater_determinant_matrix = numpy.array([[1., -1.]]) / numpy.sqrt(2.)
slater_determinant = jw_slater_determinant(slater_determinant_matrix)
self.assertAlmostEqual(slater_determinant[1],
-slater_determinant[2])
self.assertAlmostEqual(abs(slater_determinant[1]),
1. / numpy.sqrt(2.))
self.assertAlmostEqual(abs(slater_determinant[0]), 0.)
self.assertAlmostEqual(abs(slater_determinant[3]), 0.)
class GroundStateTest(unittest.TestCase):
def test_get_ground_state_hermitian(self):
ground = get_ground_state(get_sparse_operator(
QubitOperator('Y0 X1') + QubitOperator('Z0 Z1')))
expected_state = csc_matrix(([1j, 1], ([1, 2], [0, 0])),
shape=(4, 1)).A
expected_state /= numpy.sqrt(2.0)
self.assertAlmostEqual(ground[0], -2)
self.assertAlmostEqual(
numpy.absolute(
expected_state.T.conj().dot(ground[1]))[0], 1.)
class ExpectationTest(unittest.TestCase):
def test_expectation_correct_sparse_matrix(self):
operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([0., 1.j, 0., 1.j])
self.assertAlmostEqual(expectation(operator, vector), 2.0)
density_matrix = scipy.sparse.csc_matrix(
numpy.outer(vector, numpy.conjugate(vector)))
self.assertAlmostEqual(expectation(operator, density_matrix), 2.0)
def test_expectation_correct_linear_operator(self):
operator = LinearQubitOperator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([0., 1.j, 0., 1.j])
self.assertAlmostEqual(expectation(operator, vector), 2.0)
def test_expectation_handles_column_vector(self):
operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([[0.], [1.j], [0.], [1.j]])
self.assertAlmostEqual(expectation(operator, vector), 2.0)
def test_expectation_correct_zero(self):
operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([1j, -1j, -1j, -1j])
self.assertAlmostEqual(expectation(operator, vector), 0.0)
class VarianceTest(unittest.TestCase):
def test_variance_row_vector(self):
X = pauli_matrix_map['X']
Z = pauli_matrix_map['Z']
zero = numpy.array([1., 0.])
plus = numpy.array([1., 1.]) / numpy.sqrt(2)
minus = numpy.array([1., -1.]) / numpy.sqrt(2)
self.assertAlmostEqual(variance(Z, zero), 0.)
self.assertAlmostEqual(variance(X, zero), 1.)
self.assertAlmostEqual(variance(Z, plus), 1.)
self.assertAlmostEqual(variance(X, plus), 0.)
self.assertAlmostEqual(variance(Z, minus), 1.)
self.assertAlmostEqual(variance(X, minus), 0.)
def test_variance_column_vector(self):
X = pauli_matrix_map['X']
Z = pauli_matrix_map['Z']
zero = numpy.array([[1.], [0.]])
plus = numpy.array([[1.], [1.]]) / numpy.sqrt(2)
minus = numpy.array([[1.], [-1.]]) / numpy.sqrt(2)
self.assertAlmostEqual(variance(Z, zero), 0.)
self.assertAlmostEqual(variance(X, zero), 1.)
self.assertAlmostEqual(variance(Z, plus), 1.)
self.assertAlmostEqual(variance(X, plus), 0.)
self.assertAlmostEqual(variance(Z, minus), 1.)
self.assertAlmostEqual(variance(X, minus), 0.)
class ExpectationComputationalBasisStateTest(unittest.TestCase):
def test_expectation_fermion_operator_single_number_terms(self):
operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1')
state = csc_matrix(([1], ([15], [0])), shape=(16, 1))
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.9)
def test_expectation_fermion_operator_two_number_terms(self):
operator = (FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1') +
FermionOperator('2^ 1^ 2 1', -1.7))
state = csc_matrix(([1], ([6], [0])), shape=(16, 1))
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 3.6)
def test_expectation_identity_fermion_operator(self):
operator = FermionOperator.identity() * 1.1
state = csc_matrix(([1], ([6], [0])), shape=(16, 1))
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.1)
def test_expectation_state_is_list_single_number_terms(self):
operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1')
state = [1, 1, 1, 1]
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.9)
def test_expectation_state_is_list_fermion_operator_two_number_terms(self):
operator = (FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1') +
FermionOperator('2^ 1^ 2 1', -1.7))
state = [0, 1, 1]
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 3.6)
def test_expectation_state_is_list_identity_fermion_operator(self):
operator = FermionOperator.identity() * 1.1
state = [0, 1, 1]
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.1)
def test_expectation_bad_operator_type(self):
with self.assertRaises(TypeError):
expectation_computational_basis_state(
'never', csc_matrix(([1], ([6], [0])), shape=(16, 1)))
def test_expectation_qubit_operator_not_implemented(self):
with self.assertRaises(NotImplementedError):
expectation_computational_basis_state(
QubitOperator(), csc_matrix(([1], ([6], [0])), shape=(16, 1)))
class ExpectationDualBasisOperatorWithPlaneWaveBasisState(unittest.TestCase):
def setUp(self):
grid_length = 4
dimension = 1
wigner_seitz_radius = 10.
self.spinless = True
self.n_spatial_orbitals = grid_length ** dimension
n_qubits = self.n_spatial_orbitals
self.n_particles = 3
# Compute appropriate length scale and the corresponding grid.
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, self.n_particles, dimension)
self.grid1 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid1, self.spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, self.n_particles))
self.hf_state_index1 = numpy.sum(2 ** occupied_states)
self.hf_state1 = numpy.zeros(2 ** n_qubits)
self.hf_state1[self.hf_state_index1] = 1.0
self.orbital_occupations1 = [digit == '1' for digit in
bin(self.hf_state_index1)[2:]][::-1]
self.occupied_orbitals1 = [index for index, occupied in
enumerate(self.orbital_occupations1)
if occupied]
self.reversed_occupied_orbitals1 = list(self.occupied_orbitals1)
for i in range(len(self.reversed_occupied_orbitals1)):
self.reversed_occupied_orbitals1[i] = -1 + int(numpy.log2(
self.hf_state1.shape[0])) - self.reversed_occupied_orbitals1[i]
self.reversed_hf_state_index1 = sum(
2 ** index for index in self.reversed_occupied_orbitals1)
def test_1body_hopping_operator_1D(self):
operator = FermionOperator('2^ 0')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_1body_number_operator_1D(self):
operator = FermionOperator('2^ 2')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_2body_partial_number_operator_high_1D(self):
operator = FermionOperator('2^ 1^ 2 0')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_2body_partial_number_operator_mid_1D(self):
operator = FermionOperator('1^ 0^ 1 2')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_3body_double_number_operator_1D(self):
operator = FermionOperator('3^ 2^ 1^ 3 1 0')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_2body_adjacent_number_operator_1D(self):
operator = FermionOperator('3^ 2^ 2 1')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_1d5_with_spin_10particles(self):
dimension = 1
grid_length = 5
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = False
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 10
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) +
FermionOperator('7^ 6^ 5 4', -3.7j) +
FermionOperator('3^ 3', 2.1) +
FermionOperator('3^ 2', 1.7))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = 2.1
# Calculated from expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
def test_1d5_with_spin_7particles(self):
dimension = 1
grid_length = 5
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = False
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 7
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) +
FermionOperator('7^ 2^ 4 1') +
FermionOperator('3^ 3', 2.1) +
FermionOperator('5^ 3^ 1 0', 7.3))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = 1.66 - 0.0615536707435j
# Calculated with expected = expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
def test_3d2_spinless(self):
dimension = 3
grid_length = 2
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = True
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 5
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('4^ 2^ 3^ 5 5 4', 2) +
FermionOperator('7^ 6^ 7 4', -3.7j) +
FermionOperator('3^ 7', 2.1))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = -0.2625 - 0.4625j
# Calculated with expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
def test_3d2_with_spin(self):
dimension = 3
grid_length = 2
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = False
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 9
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('4^ 2^ 3^ 5 5 4', 2) +
FermionOperator('7^ 6^ 7 4', -3.7j) +
FermionOperator('3^ 7', 2.1))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = -0.2625 - 0.578125j
# Calculated from expected = expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
class GetGapTest(unittest.TestCase):
def test_get_gap(self):
operator = QubitOperator('Y0 X1') + QubitOperator('Z0 Z1')
self.assertAlmostEqual(get_gap(get_sparse_operator(operator)), 2.0)
def test_get_gap_nonhermitian_error(self):
operator = (QubitOperator('X0 Y1', 1 + 1j) +
QubitOperator('Z0 Z1', 1j) + QubitOperator((), 2 + 1j))
with self.assertRaises(ValueError):
get_gap(get_sparse_operator(operator))
class InnerProductTest(unittest.TestCase):
def test_inner_product(self):
state_1 = numpy.array([1., 1.j])
state_2 = numpy.array([1., -1.j])
self.assertAlmostEqual(inner_product(state_1, state_1), 2.)
self.assertAlmostEqual(inner_product(state_1, state_2), 0.)
class BosonSparseTest(unittest.TestCase):
def setUp(self):
self.hbar = 1.
self.d = 5
self.b = numpy.diag(numpy.sqrt(numpy.arange(1, self.d)), 1)
self.bd = self.b.conj().T
self.q = numpy.sqrt(self.hbar/2)*(self.b + self.bd)
self.p = -1j*numpy.sqrt(self.hbar/2)*(self.b - self.bd)
self.Id = numpy.identity(self.d)
def test_boson_ladder_noninteger_trunc(self):
with self.assertRaises(ValueError):
b = boson_ladder_sparse(1, 0, 0, 0.1)
with self.assertRaises(ValueError):
b = boson_ladder_sparse(1, 0, 0, -1)
with self.assertRaises(ValueError):
b = boson_ladder_sparse(1, 0, 0, 0)
def test_boson_ladder_destroy_one_mode(self):
b = boson_ladder_sparse(1, 0, 0, self.d).toarray()
self.assertTrue(numpy.allclose(b, self.b))
def test_boson_ladder_create_one_mode(self):
bd = boson_ladder_sparse(1, 0, 1, self.d).toarray()
self.assertTrue(numpy.allclose(bd, self.bd))
def test_boson_ladder_single_adjoint(self):
b = boson_ladder_sparse(1, 0, 0, self.d).toarray()
bd = boson_ladder_sparse(1, 0, 1, self.d).toarray()
self.assertTrue(numpy.allclose(b.conj().T, bd))
def test_boson_ladder_two_mode(self):
res = boson_ladder_sparse(2, 0, 0, self.d).toarray()
expected = numpy.kron(self.b, self.Id)
self.assertTrue(numpy.allclose(res, expected))
res = boson_ladder_sparse(2, 1, 0, self.d).toarray()
expected = numpy.kron(self.Id, self.b)
self.assertTrue(numpy.allclose(res, expected))
def test_single_quad_noninteger_trunc(self):
with self.assertRaises(ValueError):
b = single_quad_op_sparse(1, 0, 'q', self.hbar, 0.1)
with self.assertRaises(ValueError):
b = single_quad_op_sparse(1, 0, 'q', self.hbar, -1)
with self.assertRaises(ValueError):
b = single_quad_op_sparse(1, 0, 'q', self.hbar, 0)
def test_single_quad_q_one_mode(self):
res = single_quad_op_sparse(1, 0, 'q', self.hbar, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.q))
self.assertTrue(numpy.allclose(res, res.conj().T))
def test_single_quad_p_one_mode(self):
res = single_quad_op_sparse(1, 0, 'p', self.hbar, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.p))
self.assertTrue(numpy.allclose(res, res.conj().T))
def test_single_quad_two_mode(self):
res = single_quad_op_sparse(2, 0, 'q', self.hbar, self.d).toarray()
expected = numpy.kron(self.q, self.Id)
self.assertTrue(numpy.allclose(res, expected))
res = single_quad_op_sparse(2, 1, 'p', self.hbar, self.d).toarray()
expected = numpy.kron(self.Id, self.p)
self.assertTrue(numpy.allclose(res, expected))
def test_boson_operator_sparse_trunc(self):
op = BosonOperator('0')
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, 0.1)
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, -1)
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, 0)
def test_boson_operator_invalid_op(self):
op = FermionOperator('0')
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, self.d)
def test_boson_operator_sparse_empty(self):
for op in (BosonOperator(), QuadOperator()):
res = boson_operator_sparse(op, self.d)
self.assertEqual(res, numpy.array([[0]]))
def test_boson_operator_sparse_identity(self):
for op in (BosonOperator(''), QuadOperator('')):
res = boson_operator_sparse(op, self.d)
self.assertEqual(res, numpy.array([[1]]))
def test_boson_operator_sparse_single(self):
op = BosonOperator('0')
res = boson_operator_sparse(op, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.b))
op = BosonOperator('0^')
res = boson_operator_sparse(op, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.bd))
op = QuadOperator('q0')
res = boson_operator_sparse(op, self.d, self.hbar).toarray()
self.assertTrue(numpy.allclose(res, self.q))
op = QuadOperator('p0')
res = boson_operator_sparse(op, self.d, self.hbar).toarray()
self.assertTrue(numpy.allclose(res, self.p))
def test_boson_operator_sparse_number(self):
op = BosonOperator('0^ 0')
res = boson_operator_sparse(op, self.d).toarray()
self.assertTrue(numpy.allclose(res, numpy.dot(self.bd, self.b)))
def test_boson_operator_sparse_multi_mode(self):
op = BosonOperator('0^ 1 1^ 2')
res = boson_operator_sparse(op, self.d).toarray()
b0 = boson_ladder_sparse(3, 0, 0, self.d).toarray()
b1 = boson_ladder_sparse(3, 1, 0, self.d).toarray()
b2 = boson_ladder_sparse(3, 2, 0, self.d).toarray()
expected = multi_dot([b0.T, b1, b1.T, b2])
self.assertTrue(numpy.allclose(res, expected))
op = QuadOperator('q0 p0 p1')
res = boson_operator_sparse(op, self.d, self.hbar).toarray()
expected = numpy.identity(self.d**2)
for term in op.terms:
for i, j in term:
expected = expected.dot(single_quad_op_sparse(
2, i, j, self.hbar, self.d).toarray())
self.assertTrue(numpy.allclose(res, expected))
def test_boson_operator_sparse_addition(self):
op = BosonOperator('0^ 1')
op += BosonOperator('0 0^')
res = boson_operator_sparse(op, self.d).toarray()
b0 = boson_ladder_sparse(2, 0, 0, self.d).toarray()
b1 = boson_ladder_sparse(2, 1, 0, self.d).toarray()
expected = numpy.dot(b0.T, b1) + numpy.dot(b0, b0.T)
self.assertTrue(numpy.allclose(res, expected))
| apache-2.0 | 8,698,967,164,351,995,000 | 40.668158 | 81 | 0.614493 | false |
rchuppala/usc_agent | src/usc-agent-dev/common/source/pyang/pyang/syntax.py | 1 | 11073 | """Description of YANG & YIN syntax."""
import re
### Regular expressions - constraints on arguments
# keywords and identifiers
identifier = r"[_A-Za-z][._\-A-Za-z0-9]*"
prefix = identifier
keyword = '((' + prefix + '):)?(' + identifier + ')'
# no group version of keyword
keyword_ng = '(?:(' + prefix + '):)?(?:' + identifier + ')'
re_keyword = re.compile(keyword)
re_keyword_start = re.compile('^' + keyword)
pos_integer = r"[1-9][0-9]*"
nonneg_integer = r"(0|[1-9])[0-9]*"
integer_ = r"[-+]?" + nonneg_integer
decimal_ = r"(\+|\-)?[0-9]+(\.[0-9]+)?"
length_str = '((min|max|[0-9]+)\s*' \
'(\.\.\s*' \
'(min|max|[0-9]+)\s*)?)'
length_expr = length_str + '(\|\s*' + length_str + ')*'
re_length_part = re.compile(length_str)
range_str = '((\-INF|min|max|((\+|\-)?[0-9]+(\.[0-9]+)?))\s*' \
'(\.\.\s*' \
'(INF|min|max|(\+|\-)?[0-9]+(\.[0-9]+)?)\s*)?)'
range_expr = range_str + '(\|\s*' + range_str + ')*'
re_range_part = re.compile(range_str)
re_identifier = re.compile("^" + identifier + "$")
# path and unique
node_id = keyword_ng
rel_path_keyexpr = r"(\.\./)+(" + node_id + "/)*" + node_id
path_key_expr = r"(current\s*\(\s*\)/" + rel_path_keyexpr + ")"
path_equality_expr = node_id + r"\s*=\s*" + path_key_expr
path_predicate = r"\s*\[\s*" + path_equality_expr + r"\s*\]\s*"
absolute_path_arg = "(?:/" + node_id + "(" + path_predicate + ")*)+"
descendant_path_arg = node_id + "(" + path_predicate + ")*" + \
"(?:" + absolute_path_arg + ")?"
relative_path_arg = r"(\.\./)*" + descendant_path_arg
deref_path_arg = r"deref\s*\(\s*(?:" + relative_path_arg + \
")\s*\)/\.\./" + relative_path_arg
path_arg = "(" + absolute_path_arg + "|" + relative_path_arg + "|" + \
deref_path_arg + ")"
absolute_schema_nodeid = "(/" + node_id + ")+"
descendant_schema_nodeid = node_id + "(" + absolute_schema_nodeid + ")?"
schema_nodeid = "("+absolute_schema_nodeid+"|"+descendant_schema_nodeid+")"
unique_arg = descendant_schema_nodeid + "(\s+" + descendant_schema_nodeid + ")*"
key_arg = node_id + "(\s+" + node_id + ")*"
re_schema_node_id_part = re.compile('/' + keyword)
# URI - RFC 3986, Appendix A
scheme = "[A-Za-z][-+.A-Za-z0-9]*"
unreserved = "[-._~A-Za-z0-9]"
pct_encoded = "%[0-9A-F]{2}"
sub_delims = "[!$&'()*+,;=]"
pchar = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|[:@])")
segment = pchar + "*"
segment_nz = pchar + "+"
userinfo = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|:)*")
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = "(" + dec_octet + r"\.){3}" + dec_octet
h16 = "[0-9A-F]{1,4}"
ls32 = "(" + h16 + ":" + h16 + "|" + ipv4address + ")"
ipv6address = (
"((" + h16 + ":){6}" + ls32 +
"|::(" + h16 + ":){5}" + ls32 +
"|(" + h16 + ")?::(" + h16 + ":){4}" + ls32 +
"|((" + h16 + ":)?" + h16 + ")?::(" + h16 + ":){3}" + ls32 +
"|((" + h16 + ":){,2}" + h16 + ")?::(" + h16 + ":){2}" + ls32 +
"|((" + h16 + ":){,3}" + h16 + ")?::" + h16 + ":" + ls32 +
"|((" + h16 + ":){,4}" + h16 + ")?::" + ls32 +
"|((" + h16 + ":){,5}" + h16 + ")?::" + h16 +
"|((" + h16 + ":){,6}" + h16 + ")?::)")
ipvfuture = r"v[0-9A-F]+\.(" + unreserved + "|" + sub_delims + "|:)+"
ip_literal = r"\[(" + ipv6address + "|" + ipvfuture + r")\]"
reg_name = "(" + unreserved + "|" + pct_encoded + "|" + sub_delims + ")*"
host = "(" + ip_literal + "|" + ipv4address + "|" + reg_name + ")"
port = "[0-9]*"
authority = "(" + userinfo + "@)?" + host + "(:" + port + ")?"
path_abempty = "(/" + segment + ")*"
path_absolute = "/(" + segment_nz + "(/" + segment + ")*)?"
path_rootless = segment_nz + "(/" + segment + ")*"
path_empty = pchar + "{0}"
hier_part = ("(" + "//" + authority + path_abempty + "|" +
path_absolute + "|" + path_rootless + "|" + path_empty + ")")
query = "(" + pchar + "|[/?])*"
fragment = query
uri = (scheme + ":" + hier_part + r"(\?" + query + ")?" +
"(#" + fragment + ")?")
# Date
date = r"[1-2][0-9]{3}-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])"
re_nonneg_integer = re.compile("^" + nonneg_integer + "$")
re_integer = re.compile("^" + integer_ + "$")
re_decimal = re.compile("^" + decimal_ + "$")
re_uri = re.compile("^" + uri + "$")
re_boolean = re.compile("^(true|false)$")
re_version = re.compile("^1$")
re_date = re.compile("^" + date +"$")
re_status = re.compile("^(current|obsolete|deprecated)$")
re_key = re.compile("^" + key_arg + "$")
re_length = re.compile("^" + length_expr + "$")
re_range = re.compile("^" + range_expr + "$")
re_pos_integer = re.compile(r"^(unbounded|" + pos_integer + r")$")
re_ordered_by = re.compile(r"^(user|system)$")
re_node_id = re.compile("^" + node_id + "$")
re_path = re.compile("^" + path_arg + "$")
re_absolute_path = re.compile("^" + absolute_path_arg + "$")
re_unique = re.compile("^" + unique_arg + "$")
re_schema_nodeid = re.compile("^" + schema_nodeid + "$")
re_absolute_schema_nodeid = re.compile("^" + absolute_schema_nodeid + "$")
re_descendant_schema_nodeid = re.compile("^" + descendant_schema_nodeid + "$")
re_deviate = re.compile("^(add|delete|replace|not-supported)$")
arg_type_map = {
"identifier": lambda s: re_identifier.search(s) is not None,
"non-negative-integer": lambda s: re_nonneg_integer.search(s) is not None,
"integer": lambda s: re_integer.search(s) is not None,
"uri": lambda s: re_uri.search(s) is not None,
"boolean": lambda s: re_boolean.search(s) is not None,
"version": lambda s: re_version.search(s) is not None,
"date": lambda s: re_date.search(s) is not None,
"status-arg": lambda s: re_status.search(s) is not None,
"key-arg": lambda s: re_key.search(s) is not None,
"length-arg": lambda s: re_length.search(s) is not None,
"range-arg": lambda s: re_range.search(s) is not None,
"max-value": lambda s: re_pos_integer.search(s) is not None,
"ordered-by-arg": lambda s: re_ordered_by.search(s) is not None,
"identifier-ref": lambda s: re_node_id.search(s) is not None,
"path-arg": lambda s: re_path.search(s) is not None,
"absolute-path-arg": lambda s: re_absolute_path.search(s) is not None,
"unique-arg": lambda s: re_unique.search(s) is not None,
"absolute-schema-nodeid": lambda s: \
re_absolute_schema_nodeid.search(s) is not None,
"descendant-schema-nodeid": lambda s: \
re_descendant_schema_nodeid.search(s) is not None,
"schema-nodeid": lambda s: \
re_schema_nodeid.search(s) is not None,
"enum-arg": lambda s: chk_enum_arg(s),
"fraction-digits-arg": lambda s: chk_fraction_digits_arg(s),
"deviate-arg": lambda s: re_deviate.search(s) is not None,
}
"""Argument type definitions.
Regular expressions for all argument types except plain string that
are checked directly by the parser.
"""
def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True
def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False
def add_arg_type(arg_type, regexp):
"""Add a new arg_type to the map.
Used by extension plugins to register their own argument types."""
arg_type_map[arg_type] = regexp
# keyword argument-name yin-element
yin_map = \
{'anyxml': ('name', False),
'argument': ('name', False),
'augment': ('target-node', False),
'base': ('name', False),
'belongs-to': ('module', False),
'bit': ('name', False),
'case': ('name', False),
'choice': ('name', False),
'config': ('value', False),
'contact': ('text', True),
'container': ('name', False),
'default': ('value', False),
'description': ('text', True),
'deviate': ('value', False),
'deviation': ('target-node', False),
'enum': ('name', False),
'error-app-tag': ('value', False),
'error-message': ('value', True),
'extension': ('name', False),
'feature': ('name', False),
'fraction-digits': ('value', False),
'grouping': ('name', False),
'identity': ('name', False),
'if-feature': ('name', False),
'import': ('module', False),
'include': ('module', False),
'input': (None, None),
'key': ('value', False),
'leaf': ('name', False),
'leaf-list': ('name', False),
'length': ('value', False),
'list': ('name', False),
'mandatory': ('value', False),
'max-elements': ('value', False),
'min-elements': ('value', False),
'module': ('name', False),
'must': ('condition', False),
'namespace': ('uri', False),
'notification': ('name', False),
'ordered-by': ('value', False),
'organization': ('text', True),
'output': (None, None),
'path': ('value', False),
'pattern': ('value', False),
'position': ('value', False),
'presence': ('value', False),
'prefix': ('value', False),
'range': ('value', False),
'reference': ('text', True),
'refine': ('target-node', False),
'require-instance': ('value', False),
'revision': ('date', False),
'revision-date': ('date', False),
'rpc': ('name', False),
'status': ('value', False),
'submodule': ('name', False),
'type': ('name', False),
'typedef': ('name', False),
'unique': ('tag', False),
'units': ('name', False),
'uses': ('name', False),
'value': ('value', False),
'when': ('condition', False),
'yang-version': ('value', False),
'yin-element': ('value', False),
}
"""Mapping of statements to the YIN representation of their arguments.
The values are pairs whose first component specifies whether the
argument is stored in a subelement and the second component is the
name of the attribute or subelement carrying the argument. See YANG
specification.
"""
| gpl-2.0 | 2,142,903,817,177,840,400 | 41.588462 | 80 | 0.493362 | false |
bocaaust/FreshLife | django_project/fresh_life/fresh_life/wsgi.py | 1 | 1431 | """
WSGI config for fresh_life project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "fresh_life.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fresh_life.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| apache-2.0 | -5,162,366,673,369,774,000 | 43.71875 | 79 | 0.792453 | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/aio/operations/_private_endpoint_connections_operations.py | 1 | 20474 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2021_04_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> Optional["_models.PrivateEndpointConnection"]:
"""Gets the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def put(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
properties: "_models.PrivateEndpointConnection",
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Updates the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:param properties: The intended state of private endpoint connection.
:type properties: ~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> Optional["_models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Deletes the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list_by_resource(
self,
resource_group_name: str,
vault_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""The List operation gets information about the private endpoint connections associated with the
vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.keyvault.v2021_04_01_preview.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections'} # type: ignore
| mit | 945,774,547,791,755,900 | 51.904393 | 231 | 0.66245 | false |
ngokevin/zamboni | mkt/operators/tests/test_authorization.py | 1 | 5750 | from nose.tools import ok_
from rest_framework.generics import GenericAPIView
from django.contrib.auth.models import AnonymousUser
from amo.tests import TestCase
from mkt.access.middleware import ACLMiddleware
from mkt.carriers import CARRIER_MAP as CARRIERS
from mkt.feed.constants import FEED_TYPE_SHELF
from mkt.feed.tests.test_models import FeedTestMixin
from mkt.operators.authorization import (OperatorAuthorization,
OperatorShelfAuthorization)
from mkt.operators.models import OperatorPermission
from mkt.regions import REGIONS_DICT as REGIONS
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
from test_utils import RequestFactory
class BaseTestOperatorAuthorization(FeedTestMixin, TestCase):
fixtures = fixture('user_2519') + FeedTestMixin.fixtures
def setUp(self):
super(BaseTestOperatorAuthorization, self).setUp()
self.auth = self.auth_class()
self.user = UserProfile.objects.get(pk=2519)
self.view = GenericAPIView()
def make_admin(self):
self.grant_permission(self.user, 'OperatorDashboard:*')
def give_objpermission(self, carrier, region):
carrier_id = CARRIERS[carrier].id
region_id = REGIONS[region].id
OperatorPermission.objects.create(user=self.user, region=region_id,
carrier=carrier_id)
def is_authorized(self, verb, anon=False, carrier='telefonica',
region='br'):
request = self.request(verb, anon=anon, carrier=carrier,
region=region)
return self.auth.has_permission(request, self.view)
def is_object_authorized(self, verb, obj, anon=False, carrier='telefonica',
region='br'):
request = self.request(verb, anon=anon, carrier=carrier,
region=region)
return self.auth.has_object_permission(request, self.view, obj)
def request(self, verb, anon=False, **kwargs):
request = getattr(RequestFactory(), verb.lower())('/', kwargs)
request.user = AnonymousUser() if anon else self.user
ACLMiddleware().process_request(request)
return request
class TestOperatorAuthorization(BaseTestOperatorAuthorization):
auth_class = OperatorAuthorization
def test_safe(self):
ok_(self.is_authorized('GET', anon=True))
ok_(self.is_authorized('GET'))
def test_safe_permission(self):
self.make_admin()
ok_(self.is_authorized('GET'))
def test_safe_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_authorized('GET', carrier='telefonica', region='br'))
def test_safe_objpermission_mismatch(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_authorized('GET', carrier='america_movil', region='fr'))
def test_unsafe(self):
ok_(not self.is_authorized('POST', anon=True))
ok_(not self.is_authorized('POST'))
def test_unsafe_permission(self):
self.make_admin()
ok_(self.is_authorized('POST'))
def test_unsafe_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_authorized('POST'))
def test_unsafe_objpermission_mismatch(self):
self.give_objpermission('telefonica', 'br')
ok_(not self.is_authorized('POST', carrier='america_movil',
region='fr'))
class TestOperatorShelfAuthorization(BaseTestOperatorAuthorization):
auth_class = OperatorShelfAuthorization
def setUp(self):
super(TestOperatorShelfAuthorization, self).setUp()
self.feed_item = self.feed_item_factory(carrier=1, region=7, # TEF/BR
item_type=FEED_TYPE_SHELF)
self.shelf = self.feed_item.shelf
def test_safe_object(self):
ok_(self.is_object_authorized('GET', self.feed_item, anon=True))
ok_(self.is_object_authorized('GET', self.shelf, anon=True))
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
self.make_admin()
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
def test_safe_object_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
def test_safe_object_objpermission_mismatch(self):
self.give_objpermission('america_movil', 'fr')
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
def test_unsafe_object(self):
ok_(not self.is_object_authorized('POST', self.feed_item, anon=True))
ok_(not self.is_object_authorized('POST', self.shelf, anon=True))
ok_(not self.is_object_authorized('POST', self.feed_item))
ok_(not self.is_object_authorized('POST', self.shelf))
self.make_admin()
ok_(self.is_object_authorized('POST', self.feed_item))
ok_(self.is_object_authorized('POST', self.shelf))
def test_unsafe_object_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_object_authorized('POST', self.feed_item))
ok_(self.is_object_authorized('POST', self.shelf))
def test_unsafe_object_objpermission_mismatch(self):
self.give_objpermission('america_movil', 'fr')
ok_(not self.is_object_authorized('POST', self.feed_item))
ok_(not self.is_object_authorized('POST', self.shelf))
| bsd-3-clause | -7,164,898,191,117,203,000 | 39.20979 | 79 | 0.652522 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.