repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
openforis/sepal | modules/google-earth-engine/docker/src/sepalinternal/mosaic/cloud_score.py | 1 | 1357 | import ee
# Based on scripts by Ian Hausman, which in turn is based on script by Matt Hancher
# https://groups.google.com/d/msg/google-earth-engine-developers/i63DS-Dg8Sg/_hgCBEYeBwAJ
def cloud_score(image):
def rescale(image, exp, thresholds):
return image.expression(exp, {'i': image}) \
.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
# Compute several indicators of cloudyness and take the minimum of them.
score = ee.Image(1)
blueCirrusScore = ee.Image(0)
# Clouds are reasonably bright in the blue or cirrus bands.
# Use .max as a pseudo OR conditional
blueCirrusScore = blueCirrusScore.max(rescale(image, 'i.blue', [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(image, 'i.aerosol', [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(image, 'i.cirrus', [0.1, 0.3]))
score = score.min(blueCirrusScore)
# Clouds are reasonably bright in all visible bands.
score = score.min(rescale(image, 'i.red + i.green + i.blue', [0.2, 0.8]))
# Clouds are reasonably bright in all infrared bands.
score = score.min(
rescale(image, 'i.nir + i.swir1 + i.swir2', [0.3, 0.8]))
# However, clouds are not snow.
ndsi = image.normalizedDifference(['green', 'swir1'])
score = score.min(rescale(ndsi, 'i', [0.8, 0.6]))
return score
| mit | 2,958,949,883,621,575,000 | 38.911765 | 89 | 0.66986 | false |
pyrocko/pyrocko | src/model/event.py | 1 | 14506 | # http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
from __future__ import absolute_import, division
import logging
import numpy as num
import hashlib
import base64
from pyrocko import util, moment_tensor
from pyrocko.guts import Float, String, Timestamp, Unicode, \
StringPattern, List, Dict, Any
from .location import Location
logger = logging.getLogger('pyrocko.model.event')
guts_prefix = 'pf'
d2r = num.pi / 180.
def cmp(a, b):
return (a > b) - (a < b)
def ehash(s):
return str(base64.urlsafe_b64encode(
hashlib.sha1(s.encode('utf8')).digest()).decode('ascii'))
def float_or_none_to_str(x, prec=9):
return 'None' if x is None else '{:.{prec}e}'.format(x, prec=prec)
class FileParseError(Exception):
pass
class EventExtrasDumpError(Exception):
pass
class EOF(Exception):
pass
class EmptyEvent(Exception):
pass
class Tag(StringPattern):
pattern = r'^[A-Za-z][A-Za-z0-9._]{0,128}(:[A-Za-z0-9._-]*)?$'
class Event(Location):
'''Seismic event representation
:param lat: latitude of hypocenter (default 0.0)
:param lon: longitude of hypocenter (default 0.0)
:param time: origin time system timestamp
:param name: event identifier as string (optional)
:param depth: source depth (optional)
:param magnitude: magnitude of event (optional)
:param region: source region (optional)
:param catalog: name of catalog that lists this event (optional)
:param moment_tensor: moment tensor as
:py:class:`moment_tensor.MomentTensor` instance (optional)
:param duration: source duration as float (optional)
:param tags: list of tags describing event (optional)
:param extras: dictionary for user defined event attributes (optional).
Keys must be strings, values must be YAML serializable.
'''
time = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
depth = Float.T(optional=True)
name = String.T(default='', optional=True, yamlstyle="'")
magnitude = Float.T(optional=True)
magnitude_type = String.T(optional=True, yamlstyle="'")
region = Unicode.T(optional=True, yamlstyle="'")
catalog = String.T(optional=True, yamlstyle="'")
moment_tensor = moment_tensor.MomentTensor.T(optional=True)
duration = Float.T(optional=True)
tags = List.T(Tag.T(), default=[])
extras = Dict.T(String.T(), Any.T(), default={})
def __init__(
self, lat=0., lon=0., north_shift=0., east_shift=0., time=0.,
name='', depth=None, elevation=None,
magnitude=None, magnitude_type=None, region=None, load=None,
loadf=None, catalog=None, moment_tensor=None, duration=None,
tags=None, extras=None):
if tags is None:
tags = []
if extras is None:
extras = {}
vals = None
if load is not None:
vals = Event.oldload(load)
elif loadf is not None:
vals = Event.oldloadf(loadf)
if vals:
lat, lon, north_shift, east_shift, time, name, depth, magnitude, \
magnitude_type, region, catalog, moment_tensor, duration, \
tags = vals
Location.__init__(
self, lat=lat, lon=lon,
north_shift=north_shift, east_shift=east_shift,
time=time, name=name, depth=depth,
elevation=elevation,
magnitude=magnitude, magnitude_type=magnitude_type,
region=region, catalog=catalog,
moment_tensor=moment_tensor, duration=duration, tags=tags,
extras=extras)
def time_as_string(self):
return util.time_to_str(self.time)
def set_name(self, name):
self.name = name
def olddump(self, filename):
file = open(filename, 'w')
self.olddumpf(file)
file.close()
def olddumpf(self, file):
if self.extras:
raise EventExtrasDumpError(
'Event user-defined extras attributes cannot be dumped in the '
'"basic" event file format. Use '
'dump_events(..., format="yaml").')
file.write('name = %s\n' % self.name)
file.write('time = %s\n' % util.time_to_str(self.time))
if self.lat != 0.0:
file.write('latitude = %.12g\n' % self.lat)
if self.lon != 0.0:
file.write('longitude = %.12g\n' % self.lon)
if self.north_shift != 0.0:
file.write('north_shift = %.12g\n' % self.north_shift)
if self.east_shift != 0.0:
file.write('east_shift = %.12g\n' % self.east_shift)
if self.magnitude is not None:
file.write('magnitude = %g\n' % self.magnitude)
file.write('moment = %g\n' %
moment_tensor.magnitude_to_moment(self.magnitude))
if self.magnitude_type is not None:
file.write('magnitude_type = %s\n' % self.magnitude_type)
if self.depth is not None:
file.write('depth = %.10g\n' % self.depth)
if self.region is not None:
file.write('region = %s\n' % self.region)
if self.catalog is not None:
file.write('catalog = %s\n' % self.catalog)
if self.moment_tensor is not None:
m = self.moment_tensor.m()
sdr1, sdr2 = self.moment_tensor.both_strike_dip_rake()
file.write((
'mnn = %g\nmee = %g\nmdd = %g\nmne = %g\nmnd = %g\nmed = %g\n'
'strike1 = %g\ndip1 = %g\nrake1 = %g\n'
'strike2 = %g\ndip2 = %g\nrake2 = %g\n') % (
(m[0, 0], m[1, 1], m[2, 2], m[0, 1], m[0, 2], m[1, 2]) +
sdr1 + sdr2))
if self.duration is not None:
file.write('duration = %g\n' % self.duration)
if self.tags:
file.write('tags = %s\n' % ', '.join(self.tags))
@staticmethod
def unique(events, deltat=10., group_cmp=(lambda a, b:
cmp(a.catalog, b.catalog))):
groups = Event.grouped(events, deltat)
events = []
for group in groups:
if group:
group.sort(group_cmp)
events.append(group[-1])
return events
@staticmethod
def grouped(events, deltat=10.):
events = list(events)
groups = []
for ia, a in enumerate(events):
groups.append([])
haveit = False
for ib, b in enumerate(events[:ia]):
if abs(b.time - a.time) < deltat:
groups[ib].append(a)
haveit = True
break
if not haveit:
groups[ia].append(a)
groups = [g for g in groups if g]
groups.sort(key=lambda g: sum(e.time for e in g) // len(g))
return groups
@staticmethod
def dump_catalog(events, filename=None, stream=None):
if filename is not None:
file = open(filename, 'w')
else:
file = stream
try:
i = 0
for ev in events:
ev.olddumpf(file)
file.write('--------------------------------------------\n')
i += 1
finally:
if filename is not None:
file.close()
@staticmethod
def oldload(filename):
with open(filename, 'r') as file:
return Event.oldloadf(file)
@staticmethod
def oldloadf(file):
d = {}
try:
for line in file:
if line.lstrip().startswith('#'):
continue
toks = line.split(' = ', 1)
if len(toks) == 2:
k, v = toks[0].strip(), toks[1].strip()
if k in ('name', 'region', 'catalog', 'magnitude_type'):
d[k] = v
if k in (('latitude longitude magnitude depth duration '
'north_shift east_shift '
'mnn mee mdd mne mnd med strike1 dip1 rake1 '
'strike2 dip2 rake2 duration').split()):
d[k] = float(v)
if k == 'time':
d[k] = util.str_to_time(v)
if k == 'tags':
d[k] = [x.strip() for x in v.split(',')]
if line.startswith('---'):
d['have_separator'] = True
break
except Exception as e:
raise FileParseError(e)
if not d:
raise EOF()
if 'have_separator' in d and len(d) == 1:
raise EmptyEvent()
mt = None
m6 = [d[x] for x in 'mnn mee mdd mne mnd med'.split() if x in d]
if len(m6) == 6:
mt = moment_tensor.MomentTensor(m=moment_tensor.symmat6(*m6))
else:
sdr = [d[x] for x in 'strike1 dip1 rake1'.split() if x in d]
if len(sdr) == 3:
moment = 1.0
if 'moment' in d:
moment = d['moment']
elif 'magnitude' in d:
moment = moment_tensor.magnitude_to_moment(d['magnitude'])
mt = moment_tensor.MomentTensor(
strike=sdr[0], dip=sdr[1], rake=sdr[2],
scalar_moment=moment)
return (
d.get('latitude', 0.0),
d.get('longitude', 0.0),
d.get('north_shift', 0.0),
d.get('east_shift', 0.0),
d.get('time', 0.0),
d.get('name', ''),
d.get('depth', None),
d.get('magnitude', None),
d.get('magnitude_type', None),
d.get('region', None),
d.get('catalog', None),
mt,
d.get('duration', None),
d.get('tags', []))
@staticmethod
def load_catalog(filename):
file = open(filename, 'r')
try:
while True:
try:
ev = Event(loadf=file)
yield ev
except EmptyEvent:
pass
except EOF:
pass
file.close()
def get_hash(self):
e = self
if isinstance(e.time, float):
stime = util.time_to_str(e.time, format='%Y-%m-%d %H:%M:%S.3FRAC')
else:
stime = util.time_to_str(e.time, format='%Y-%m-%d %H:%M:%S.6FRAC')
s = float_or_none_to_str
to_hash = ', '.join((
stime,
s(e.lat), s(e.lon), s(e.depth),
float_or_none_to_str(e.magnitude, 5),
str(e.catalog), str(e.name or ''),
str(e.region)))
return ehash(to_hash)
def human_str(self):
s = [
'Latitude [deg]: %g' % self.lat,
'Longitude [deg]: %g' % self.lon,
'Time [UTC]: %s' % util.time_to_str(self.time)]
if self.name:
s.append('Name: %s' % self.name)
if self.depth is not None:
s.append('Depth [km]: %g' % (self.depth / 1000.))
if self.magnitude is not None:
s.append('Magnitude [%s]: %3.1f' % (
self.magnitude_type or 'M?', self.magnitude))
if self.region:
s.append('Region: %s' % self.region)
if self.catalog:
s.append('Catalog: %s' % self.catalog)
if self.moment_tensor:
s.append(str(self.moment_tensor))
return '\n'.join(s)
def detect_format(filename):
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#') or line.startswith('%'):
continue
if line.startswith('--- !pf.Event'):
return 'yaml'
else:
return 'basic'
return 'basic'
def load_events(filename, format='detect'):
'''Read events file.
:param filename: name of file as str
:param format: file format: ``'detect'``, ``'basic'``, or ``'yaml'``
:returns: list of :py:class:`Event` objects
'''
if format == 'detect':
format = detect_format(filename)
if format == 'yaml':
from pyrocko import guts
events = [
ev for ev in guts.load_all(filename=filename)
if isinstance(ev, Event)]
return events
elif format == 'basic':
return list(Event.load_catalog(filename))
else:
from pyrocko.io.io_common import FileLoadError
raise FileLoadError('unknown event file format: %s' % format)
class OneEventRequired(Exception):
pass
def load_one_event(filename, format='detect'):
events = load_events(filename)
if len(events) != 1:
raise OneEventRequired(
'exactly one event is required in "%s"' % filename)
return events[0]
def dump_events(events, filename=None, stream=None, format='basic'):
'''Write events file.
:param events: list of :py:class:`Event` objects
:param filename: name of file as str
:param format: file format: ``'basic'``, or ``'yaml'``
'''
if format == 'basic':
Event.dump_catalog(events, filename=filename, stream=stream)
elif format == 'yaml':
from pyrocko import guts
events = [ev for ev in events if isinstance(ev, Event)]
guts.dump_all(object=events, filename=filename, stream=None)
else:
from pyrocko.io.io_common import FileSaveError
raise FileSaveError('unknown event file format: %s' % format)
def load_kps_event_list(filename):
elist = []
f = open(filename, 'r')
for line in f:
toks = line.split()
if len(toks) < 7:
continue
tim = util.to_time_float(util.ctimegm(toks[0]+' '+toks[1]))
lat, lon, depth, magnitude = [float(x) for x in toks[2:6]]
duration = float(toks[10])
region = toks[-1]
name = util.gmctime_fn(tim)
e = Event(
lat, lon, tim,
name=name,
depth=depth,
magnitude=magnitude,
duration=duration,
region=region)
elist.append(e)
f.close()
return elist
def load_gfz_event_list(filename):
from pyrocko import catalog
cat = catalog.Geofon()
elist = []
f = open(filename, 'r')
for line in f:
e = cat.get_event(line.strip())
elist.append(e)
f.close()
return elist
| gpl-3.0 | 7,530,054,320,324,479,000 | 29.033126 | 79 | 0.52268 | false |
mikebryant/rapid-router | game/views/api.py | 1 | 9289 | # -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2015, Ocado Innovation Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from django.http import HttpResponse
from game.models import Level, Episode, LevelBlock, Block, Character, LevelDecor
from game.serializers import LevelListSerializer, EpisodeListSerializer, LevelDetailSerializer, EpisodeDetailSerializer, \
LevelBlockSerializer, BlockSerializer, CharacterSerializer, LevelMapDetailSerializer, \
LevelDecorSerializer, LevelModeSerializer, LevelMapListSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import viewsets
from game.decor import get_all_decor, get_decor_element_by_pk, get_decors_url
from game.theme import get_all_themes, get_theme_by_pk, get_themes_url
@api_view(('GET',))
def api_root(request, format=None):
return Response({
'blocks': reverse('block-list', request=request, format=format),
'characters': reverse('character-list', request=request, format=format),
'decors': reverse('decor-list', request=request, format=format),
'episodes': reverse('episode-list', request=request, format=format),
'levels': reverse('level-list', request=request, format=format),
'maps': reverse('map-list', request=request, format=format),
'themes': reverse('theme-list', request=request, format=format),
})
@api_view(('GET',))
def decor_list(request, format=None):
decors = get_all_decor()
data = [{get_decors_url(i.pk, request)} for i in decors]
return Response(data)
@api_view(('GET',))
def decor_detail(request, pk, format=None):
try:
decor = get_decor_element_by_pk(pk=pk)
except KeyError:
return HttpResponse(status=404)
data = decor.__dict__.copy()
data['theme'] = get_themes_url(data['theme'].pk, request)
return Response(data)
@api_view(('GET',))
def level_list(request, format=None):
levels = Level.objects.sorted_levels()
serializer = LevelListSerializer(levels, many=True, context={'request': request})
return Response(serializer.data)
# pk is the episode id
@api_view(('GET',))
def level_for_episode(request, pk, format=None):
levels = Level.objects.filter(episode__id=pk)
serializer = LevelListSerializer(levels, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def level_detail(request, pk, format=None):
try:
level = Level.objects.get(pk=pk)
except Level.DoesNotExist:
return HttpResponse(status=404)
serializer = LevelDetailSerializer(level, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def map_list(request, format=None):
levels = Level.objects.sorted_levels()
serializer = LevelMapListSerializer(levels, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def map_for_level(request, pk, format=None):
try:
level = Level.objects.get(pk=pk)
except Level.DoesNotExist:
return HttpResponse(status=404)
serializer = LevelMapDetailSerializer(level, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def mode_for_level(request, pk, format=None):
try:
level = Level.objects.get(pk=pk)
except Level.DoesNotExist:
return HttpResponse(status=404)
serializer = LevelModeSerializer(level, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def episode_list(request, format=None):
episodes = Episode.objects.all()
serializer = EpisodeListSerializer(episodes, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def episode_detail(request, pk, format=None):
try:
episode = Episode.objects.get(pk=pk)
except Episode.DoesNotExist:
return HttpResponse(status=404)
serializer = EpisodeDetailSerializer(episode, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def levelblock_list(request, level, format=None):
blocks = LevelBlock.objects.filter(level__id=level)
serializer = LevelBlockSerializer(blocks, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def levelblock_for_level(request, pk, format=None):
levelblocks = LevelBlock.objects.filter(level__id=pk)
serializer = LevelBlockSerializer(levelblocks, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def levelblock_detail(request, pk, format=None):
try:
levelblock = LevelBlock.objects.get(pk=pk)
except LevelBlock.DoesNotExist:
return HttpResponse(status=404)
serializer = LevelBlockSerializer(levelblock, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def leveldecor_list(request, level, format=None):
leveldecors = LevelDecor.objects.filter(level__id=level)
serializer = LevelDecorSerializer(leveldecors, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def leveldecor_for_level(request, pk, format=None):
leveldecors = LevelDecor.objects.filter(level__id=pk)
serializer = LevelDecorSerializer(leveldecors, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def leveldecor_detail(request, pk, format=None):
try:
leveldecor = LevelDecor.objects.get(pk=pk)
except LevelDecor.DoesNotExist:
return HttpResponse(status=404)
serializer = LevelDecorSerializer(leveldecor, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def block_list(request, format=None):
block = Block.objects.all()
serializer = BlockSerializer(block, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def block_detail(request, pk, format=None):
try:
block = Block.objects.get(pk=pk)
except Block.DoesNotExist:
return HttpResponse(status=404)
serializer = BlockSerializer(block, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def theme_list(request, format=None):
themes = get_all_themes()
data = [{get_themes_url(i.pk, request)} for i in themes]
return Response(data)
@api_view(('GET',))
def theme_detail(request, pk, format=None):
try:
theme = get_theme_by_pk(pk)
except KeyError:
return HttpResponse(status=404)
return Response(theme.__dict__)
@api_view(('GET',))
def character_list(request, format=None):
characters = Character.objects.all()
serializer = CharacterSerializer(characters, many=True, context={'request': request})
return Response(serializer.data)
@api_view(('GET',))
def character_detail(request, pk, format=None):
try:
character = Character.objects.get(pk=pk)
except Character.DoesNotExist:
return HttpResponse(status=404)
serializer = CharacterSerializer(character, context={'request': request})
return Response(serializer.data)
# Maybe used later for when we use a viewset which requires multiple serializer
class MultiSerializerViewSet(viewsets.ModelViewSet):
serializers = {
'default': None,
}
def get_serializer_class(self):
return self.serializers.get(self.action,
self.serializers['default'])
| agpl-3.0 | 7,866,183,302,795,193,000 | 33.722846 | 122 | 0.715349 | false |
magfest/ubersystem | uber/models/mits.py | 1 | 11679 | import os
from functools import wraps
from PIL import Image
from residue import CoerceUTF8 as UnicodeText, UTCDateTime, UUID
from sqlalchemy import and_
from sideboard.lib import on_startup
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Boolean, Integer
from sqlalchemy.ext.hybrid import hybrid_property
from uber.config import c
from uber.models import MagModel
from uber.models.types import default_relationship as relationship, utcnow, Choice, DefaultColumn as Column, MultiChoice
__all__ = ['MITSTeam', 'MITSApplicant', 'MITSGame', 'MITSPicture', 'MITSDocument', 'MITSTimes']
class MITSTeam(MagModel):
name = Column(UnicodeText)
days_available = Column(Integer, nullable=True)
hours_available = Column(Integer, nullable=True)
concurrent_attendees = Column(Integer, default=0)
panel_interest = Column(Boolean, nullable=True, admin_only=True)
showcase_interest = Column(Boolean, nullable=True, admin_only=True)
want_to_sell = Column(Boolean, default=False)
address = Column(UnicodeText)
submitted = Column(UTCDateTime, nullable=True)
waiver_signature = Column(UnicodeText)
waiver_signed = Column(UTCDateTime, nullable=True)
applied = Column(UTCDateTime, server_default=utcnow())
status = Column(Choice(c.MITS_APP_STATUS), default=c.PENDING, admin_only=True)
applicants = relationship('MITSApplicant', backref='team')
games = relationship('MITSGame', backref='team')
schedule = relationship('MITSTimes', uselist=False, backref='team')
panel_app = relationship('MITSPanelApplication', uselist=False, backref='team')
duplicate_of = Column(UUID, nullable=True)
deleted = Column(Boolean, default=False)
# We've found that a lot of people start filling out an application and
# then instead of continuing their application just start over fresh and
# fill out a new one. In these cases we mark the application as
# soft-deleted and then set the duplicate_of field so that when an
# applicant tries to log into the original application, we can redirect
# them to the correct application.
email_model_name = 'team'
@property
def accepted(self):
return self.status == c.ACCEPTED
@property
def email(self):
return [applicant.email for applicant in self.primary_contacts]
@property
def primary_contacts(self):
return [a for a in self.applicants if a.primary_contact]
@property
def salutation(self):
return ' and '.join(applicant.first_name for applicant in self.primary_contacts)
@property
def comped_badge_count(self):
return len([
a for a in self.applicants
if a.attendee_id and a.attendee.paid in [c.NEED_NOT_PAY, c.REFUNDED]])
@property
def total_badge_count(self):
return len([a for a in self.applicants if a.attendee_id])
@property
def can_add_badges(self):
uncomped_badge_count = len([
a for a in self.applicants
if a.attendee_id and a.attendee.paid not in [c.NEED_NOT_PAY, c.REFUNDED]])
claimed_badges = len(self.applicants) - uncomped_badge_count
return claimed_badges < c.MITS_BADGES_PER_TEAM
@property
def can_save(self):
return c.HAS_MITS_ADMIN_ACCESS or self.status in [c.ACCEPTED, c.WAITLISTED] or (
self.is_new
and c.BEFORE_MITS_SUBMISSION_DEADLINE
or c.BEFORE_MITS_EDITING_DEADLINE)
@property
def completed_panel_request(self):
return self.panel_interest is not None
@property
def completed_showcase_request(self):
return self.showcase_interest is not None
@property
def completed_hotel_form(self):
"""
This is "any" rather than "all" because teams are allowed to
add and remove members even after their application has been
submitted. Rather than suddenly downgrade their completion
percentage, it makes more sense to send such teams an
automated email indicating that they need to provide their
remaining hotel info.
"""
return any(a.declined_hotel_space or a.requested_room_nights for a in self.applicants)
@property
def no_hotel_space(self):
return all(a.declined_hotel_space for a in self.applicants)
@property
def steps_completed(self):
if not self.days_available:
return 1
elif not self.games:
return 2
elif not self.submitted:
return 3
else:
return 4
@property
def completion_percentage(self):
return 100 * self.steps_completed // c.MITS_APPLICATION_STEPS
class MITSApplicant(MagModel):
team_id = Column(ForeignKey('mits_team.id'))
attendee_id = Column(ForeignKey('attendee.id'), nullable=True)
primary_contact = Column(Boolean, default=False)
first_name = Column(UnicodeText)
last_name = Column(UnicodeText)
email = Column(UnicodeText)
cellphone = Column(UnicodeText)
contact_method = Column(Choice(c.MITS_CONTACT_OPTS), default=c.TEXTING)
declined_hotel_space = Column(Boolean, default=False)
requested_room_nights = Column(MultiChoice(c.MITS_ROOM_NIGHT_OPTS), default='')
email_model_name = 'applicant'
@property
def email_to_address(self):
if self.attendee:
return self.attendee.email
return self.email
@property
def full_name(self):
return self.first_name + ' ' + self.last_name
def has_requested(self, night):
return night in self.requested_room_nights_ints
class MITSGame(MagModel):
team_id = Column(ForeignKey('mits_team.id'))
name = Column(UnicodeText)
promo_blurb = Column(UnicodeText)
description = Column(UnicodeText)
genre = Column(UnicodeText)
phase = Column(Choice(c.MITS_PHASE_OPTS), default=c.DEVELOPMENT)
min_age = Column(Choice(c.MITS_AGE_OPTS), default=c.CHILD)
age_explanation = Column(UnicodeText)
min_players = Column(Integer, default=2)
max_players = Column(Integer, default=4)
copyrighted = Column(Choice(c.MITS_COPYRIGHT_OPTS), nullable=True)
personally_own = Column(Boolean, default=False)
unlicensed = Column(Boolean, default=False)
professional = Column(Boolean, default=False)
pictures = relationship('MITSPicture', backref='team')
documents = relationship('MITSDocument', backref='team')
@hybrid_property
def has_been_accepted(self):
return self.team.status == c.ACCEPTED
@has_been_accepted.expression
def has_been_accepted(cls):
return and_(MITSTeam.id == cls.team_id, MITSTeam.status == c.ACCEPTED)
@property
def guidebook_name(self):
return self.team.name
@property
def guidebook_subtitle(self):
return self.name
@property
def guidebook_desc(self):
return self.description
@property
def guidebook_location(self):
return ''
@property
def guidebook_image(self):
if not self.pictures:
return ''
for image in self.pictures:
if image.is_header:
return image.filename
return self.pictures[0].filename
@property
def guidebook_thumbnail(self):
if not self.pictures:
return ''
for image in self.pictures:
if image.is_thumbnail:
return image.filename
return self.pictures[1].filename if len(self.pictures) > 1 else self.pictures[0].filename
@property
def guidebook_images(self):
if not self.pictures:
return ['', '']
header = None
thumbnail = None
for image in self.pictures:
if image.is_header and not header:
header = image
if image.is_thumbnail and not thumbnail:
thumbnail = image
if not header:
header = self.pictures[0]
if not thumbnail:
thumbnail = self.pictures[1] if len(self.pictures) > 1 else self.pictures[0]
if header == thumbnail:
return [header.filename], [header]
else:
return [header.filename, thumbnail.filename], [header, thumbnail]
class MITSPicture(MagModel):
game_id = Column(UUID, ForeignKey('mits_game.id'))
filename = Column(UnicodeText)
content_type = Column(UnicodeText)
extension = Column(UnicodeText)
description = Column(UnicodeText)
@property
def url(self):
return '../mits/view_picture?id={}'.format(self.id)
@property
def filepath(self):
return os.path.join(c.MITS_PICTURE_DIR, str(self.id))
@property
def is_header(self):
try:
return Image.open(self.filepath).size == tuple(map(int, c.MITS_HEADER_SIZE))
except OSError:
# This probably isn't an image, so it's not a header image
return
@property
def is_thumbnail(self):
try:
return Image.open(self.filepath).size == tuple(map(int, c.MITS_THUMBNAIL_SIZE))
except OSError:
# This probably isn't an image, so it's not a thumbnail image
return
class MITSDocument(MagModel):
game_id = Column(UUID, ForeignKey('mits_game.id'))
filename = Column(UnicodeText)
description = Column(UnicodeText)
@property
def url(self):
return '../mits/download_doc?id={}'.format(self.id)
@property
def filepath(self):
return os.path.join(c.MITS_PICTURE_DIR, str(self.id))
class MITSTimes(MagModel):
team_id = Column(ForeignKey('mits_team.id'))
showcase_availability = Column(MultiChoice(c.MITS_SHOWCASE_SCHEDULE_OPTS))
availability = Column(MultiChoice(c.MITS_SCHEDULE_OPTS))
class MITSPanelApplication(MagModel):
team_id = Column(ForeignKey('mits_team.id'))
name = Column(UnicodeText)
description = Column(UnicodeText)
length = Column(Choice(c.PANEL_STRICT_LENGTH_OPTS), default=c.SIXTY_MIN)
participation_interest = Column(Boolean, default=False)
@on_startup
def add_applicant_restriction():
"""
We use convenience functions for our form handling, e.g. to
instantiate an attendee from an id or from form data we use the
session.attendee() method. This method runs on startup and overrides
the methods which are used for the game application forms to add a
new "applicant" parameter. If truthy, this triggers three
additional behaviors:
1) We check that there is currently a logged in team, and redirect
to the initial application form if there is not.
2) We check that the item being edited belongs to the
currently-logged-in team and raise an exception if it does not.
This check is bypassed for new things which have not yet been
saved to the database.
3) We set the "team" relationship on the model to the
logged-in team.
"""
from uber.models import Session
def override_getter(method_name):
orig_getter = getattr(Session.SessionMixin, method_name)
@wraps(orig_getter)
def with_applicant(self, *args, **kwargs):
applicant = kwargs.pop('applicant', False)
instance = orig_getter(self, *args, **kwargs)
if applicant:
team = self.logged_in_mits_team()
assert instance.is_new or team == instance.team
instance.team = team
return instance
setattr(Session.SessionMixin, method_name, with_applicant)
for name in [
'mits_applicant', 'mits_game', 'mits_times', 'mits_picture', 'mits_document', 'mits_panel_application'
]:
override_getter(name)
| agpl-3.0 | 212,782,243,609,174,400 | 32.950581 | 120 | 0.662557 | false |
nuagenetworks/tempest | tempest/api/compute/servers/test_virtual_interfaces.py | 1 | 2373 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
from tempest import test
CONF = config.CONF
class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
# This test needs a network and a subnet
cls.set_network_resources(network=True, subnet=True)
super(VirtualInterfacesTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(VirtualInterfacesTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
super(VirtualInterfacesTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@decorators.skip_because(bug="1183436",
condition=CONF.service_available.neutron)
@test.idempotent_id('96c4e2ef-5e4d-4d7f-87f5-fed6dca18016')
@test.services('network')
def test_list_virtual_interfaces(self):
# Positive test:Should be able to GET the virtual interfaces list
# for a given server_id
output = self.client.list_virtual_interfaces(self.server_id)
self.assertIsNotNone(output)
virt_ifaces = output
self.assertNotEqual(0, len(virt_ifaces['virtual_interfaces']),
'Expected virtual interfaces, got 0 interfaces.')
for virt_iface in virt_ifaces['virtual_interfaces']:
mac_address = virt_iface['mac_address']
self.assertTrue(netaddr.valid_mac(mac_address),
"Invalid mac address detected. mac address: %s"
% mac_address)
| apache-2.0 | 5,632,622,960,499,097,000 | 37.901639 | 78 | 0.677202 | false |
dunkhong/grr | grr/server/grr_response_server/gui/api_plugins/timeline.py | 1 | 2641 | #!/usr/bin/env python
"""A module with API handlers related to the timeline colllection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Optional
from typing import Text
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import body
from grr_response_proto.api import timeline_pb2
from grr_response_server import access_control
from grr_response_server import data_store
from grr_response_server.flows.general import timeline
from grr_response_server.gui import api_call_handler_base
from grr_response_server.gui.api_plugins import client as api_client
from grr_response_server.gui.api_plugins import flow as api_flow
class ApiGetCollectedTimelineArgs(rdf_structs.RDFProtoStruct):
"""An RDF wrapper class for the arguments of timeline exporter arguments."""
protobuf = timeline_pb2.ApiGetCollectedTimelineArgs
rdf_deps = [
api_client.ApiClientId,
api_flow.ApiFlowId,
]
class ApiGetCollectedTimelineHandler(api_call_handler_base.ApiCallHandler):
"""An API handler for the timeline exporter."""
args_type = ApiGetCollectedTimelineArgs
def Handle(
self,
args,
token = None,
):
"""Handles requests for the timeline export API call."""
client_id = str(args.client_id)
flow_id = str(args.flow_id)
flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
if flow_obj.flow_class_name != timeline.TimelineFlow.__name__:
message = "Flow '{}' is not a timeline flow".format(flow_id)
raise ValueError(message)
if args.format == ApiGetCollectedTimelineArgs.Format.BODY: # pytype: disable=attribute-error
return self._StreamBody(client_id=client_id, flow_id=flow_id)
if args.format == ApiGetCollectedTimelineArgs.Format.RAW_GZCHUNKED: # pytype: disable=attribute-error
return self._StreamRawGzchunked(client_id=client_id, flow_id=flow_id)
message = "Incorrect timeline export format: {}".format(args.format)
raise ValueError(message)
def _StreamBody(
self,
client_id,
flow_id,
):
entries = timeline.Entries(client_id=client_id, flow_id=flow_id)
content = body.Stream(entries)
filename = "timeline_{}.body".format(flow_id)
return api_call_handler_base.ApiBinaryStream(filename, content)
def _StreamRawGzchunked(
self,
client_id,
flow_id,
):
content = timeline.Blobs(client_id=client_id, flow_id=flow_id)
filename = "timeline_{}.gzchunked".format(flow_id)
return api_call_handler_base.ApiBinaryStream(filename, content)
| apache-2.0 | 8,599,119,469,534,929,000 | 32.858974 | 106 | 0.728891 | false |
barct/odoo-coop | ersep_regulations/__openerp__.py | 1 | 1907 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Fernando Hidalgo (http://www.hidalgofernando.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'ERSeP Regulations',
'version': '9.0.0.0.1',
'category': 'Tools',
'sequence': 1,
'summary': '',
'description': """
ERSeP Regulations
=================
This module is a regionalization of Córdoba province for odoo-coop
Based on the experience of the "Cooperativa Anisacate" cooperative.
Uses the Argentine tax & legal regulations and particularly those of the province of "Córdoba"
through the regulator ERSeP.
""",
'author': 'Fernando Hidalgo',
'website': 'www.hidalgofernando.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'electric_utility',
'base',
'l10n_ar_chart',
],
'external_dependencies': {
# 'python': ['dbfread', 'hashlib'],
},
'data': [
'data/account_chart.xml',
'data/account_tax.xml',
'data/service_category.xml',
],
'demo': [
],
'test': [
],
}
| gpl-3.0 | 8,221,518,737,955,704,000 | 30.75 | 94 | 0.579528 | false |
arsenalstriker14/imagetraccloud | imagetrac_docker/taskmanager/migrations/0002_auto_20170122_1808.py | 1 | 3274 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 18:08
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taskmanager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='inboxentry',
name='attachment',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment10',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment2',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment3',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment4',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment5',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment6',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment7',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment8',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
migrations.AlterField(
model_name='inboxentry',
name='attachment9',
field=models.FileField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/uploads', location='/app/uploads'), upload_to='attachments/'),
),
]
| mit | 2,795,914,541,022,039,600 | 48.606061 | 183 | 0.63989 | false |
dbrattli/python-gearshift | gearshift/visit/api.py | 1 | 10475 | import logging
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
import threading
import time
from random import random
from datetime import timedelta, datetime
import cherrypy
from cherrypy import request
from gearshift import config
from gearshift.util import load_class
from gearshift.identity.base import verify_identity_status
log = logging.getLogger("gearshift.visit")
# Global VisitManager
_manager = None
# Global list of plugins for the Visit Tracking framework
_plugins = list()
# Accessor functions for getting and setting the current visit information.
def current():
"""Retrieve the current visit record from the cherrypy request."""
return getattr(cherrypy.request, "tg_visit", None)
def set_current(visit):
"""Set the current visit record on the cherrypy request being processed."""
cherrypy.request.tg_visit = visit
def _create_visit_manager(timeout):
"""Create a VisitManager based on the plugin specified in the config file."""
plugin_name = config.get("tools.visit.manager",
"gearshift.visit.sovisit.SqlObjectVisitManager")
try:
plugin = load_class(plugin_name)
except Exception, e:
log.error("Error loading visit plugin '%s': %s", plugin_name, e)
raise RuntimeError("VisitManager plugin missing: %s" % plugin_name)
log.debug("Loading visit manager from plugin: %s", plugin_name)
return plugin(timeout)
# Interface for the TurboGears extension
def shutdown_extension():
# Bail out if this extension is not running.
global _manager
if not _manager:
return
log.info("Visit Tracking shutting down")
_manager.shutdown()
_manager = None
def create_extension_model():
"""Create the data model of the VisitManager if one exists."""
if _manager:
_manager.create_model()
def enable_visit_plugin(plugin):
"""Register a visit tracking plugin.
These plugins will be called for each request.
"""
_plugins.append(plugin)
class Visit(object):
"""Basic container for visit related data."""
def __init__(self, key, is_new):
self.key = key
self.is_new = is_new
class VisitTool(cherrypy.Tool):
"""A tool that automatically tracks visitors."""
def __init__(self):
log.debug("Visit tool initialised")
## start_extension()
# Raise priority since we need the VisitTool to run as early as
# possible
return super(VisitTool, self).__init__(point="before_handler",
callable=self.before_handler,
priority=20)
def start_extension(self):
# Bail out if the application hasn't enabled this extension
if not config.get("tools.visit.on", False):
return
# Bail out if this extension is already running
global _manager
if _manager:
return
log.info("Visit Tracking starting")
# How long may the visit be idle before a new visit ID is assigned?
# The default is 20 minutes.
timeout = timedelta(minutes=config.get("tools.visit.timeout", 20))
# Create the thread that manages updating the visits
_manager = _create_visit_manager(timeout)
def before_handler(self, **kw):
"""Check whether submitted request belongs to an existing visit."""
get = kw.get
# Where to look for the session key in the request and in which order
source = [s.strip().lower() for s in
kw.get('source', 'cookie').split(',')]
if set(source).difference(('cookie', 'form')):
log.warning("Unsupported 'tools.visit.source' '%s' in configuration.")
# Get the name to use for the identity cookie.
self.cookie_name = get("cookie.name", "tg-visit")
# and the name of the request param. MUST NOT contain dashes or dots,
# otherwise the NestedVariablesFilter will chocke on it.
visit_key_param = get("form.name", "tg_visit")
# TODO: The path should probably default to whatever
# the root is masquerading as in the event of a
# virtual path filter.
self.cookie_path = get("cookie.path", "/")
# The secure bit should be set for HTTPS only sites
self.cookie_secure = get("cookie.secure", False)
# By default, I don't specify the cookie domain.
self.cookie_domain = get("cookie.domain", None)
assert self.cookie_domain != "localhost", "localhost" \
" is not a valid value for visit.cookie.domain. Try None instead."
# Use max age only if the cookie shall explicitly be permanent
self.cookie_max_age = get("cookie.permanent",
False) and int(get("timeout", "20")) * 60 or None
cpreq = cherrypy.request
visit = current()
if not visit:
visit_key = None
for source in source:
if source == 'cookie':
visit_key = cpreq.cookie.get(self.cookie_name)
if visit_key:
visit_key = visit_key.value
log.debug("Retrieved visit key '%s' from cookie '%s'.",
visit_key, self.cookie_name)
elif source == 'form':
visit_key = cpreq.params.pop(visit_key_param, None)
log.debug(
"Retrieved visit key '%s' from request param '%s'.",
visit_key, visit_key_param)
if visit_key:
visit = _manager.visit_for_key(visit_key)
break
if visit:
log.debug("Using visit from request with key: %s", visit_key)
else:
visit_key = self._generate_key()
visit = _manager.new_visit_with_key(visit_key)
log.debug("Created new visit with key: %s", visit_key)
self.send_cookie(visit_key)
set_current(visit)
# Inform all the plugins that a request has been made for the current
# visit. This gives plugins the opportunity to track click-path or
# retrieve the visitor's identity.
try:
for plugin in _plugins:
plugin.record_request(visit)
except cherrypy.InternalRedirect, e:
# Can't allow an InternalRedirect here because CherryPy is dumb,
# instead change cherrypy.request.path_info to the url desired.
cherrypy.request.path_info = e.path
def _generate_key():
"""Return a (pseudo)random hash based on seed."""
# Adding remote.ip and remote.port doesn't make this any more secure,
# but it makes people feel secure... It's not like I check to make
# certain you're actually making requests from that host and port. So
# it's basically more noise.
key_string = '%s%s%s%s' % (random(), datetime.now(),
cherrypy.request.remote.ip, cherrypy.request.remote.port)
return sha1(key_string).hexdigest()
_generate_key = staticmethod(_generate_key)
def clear_cookie(self):
"""Clear any existing visit ID cookie."""
cookies = cherrypy.response.cookie
# clear the cookie
log.debug("Clearing visit ID cookie")
cookies[self.cookie_name] = ''
cookies[self.cookie_name]['path'] = self.cookie_path
cookies[self.cookie_name]['expires'] = ''
cookies[self.cookie_name]['max-age'] = 0
def send_cookie(self, visit_key):
"""Send an visit ID cookie back to the browser."""
cookies = cherrypy.response.cookie
cookies[self.cookie_name] = visit_key
cookies[self.cookie_name]['path'] = self.cookie_path
if self.cookie_secure:
cookies[self.cookie_name]['secure'] = True
if self.cookie_domain:
cookies[self.cookie_name]['domain'] = self.cookie_domain
max_age = self.cookie_max_age
if max_age:
# use 'expires' because MSIE ignores 'max-age'
cookies[self.cookie_name]['expires'] = '"%s"' % time.strftime(
"%a, %d-%b-%Y %H:%M:%S GMT",
time.gmtime(time.time() + max_age))
# 'max-age' takes precedence on standard conformant browsers
# (this is better because there of no time sync issues here)
cookies[self.cookie_name]['max-age'] = max_age
log.debug("Sending visit ID cookie: %s",
cookies[self.cookie_name].output())
class BaseVisitManager(threading.Thread):
def __init__(self, timeout):
super(BaseVisitManager, self).__init__(name="VisitManager")
self.timeout = timeout
self.queue = dict()
self.lock = threading.Lock()
self._shutdown = threading.Event()
self.interval = 30
self.setDaemon(True)
# We need to create the visit model before the manager thread is
# started.
self.create_model()
self.start()
def create_model(self):
pass
def new_visit_with_key(self, visit_key):
"""Return a new Visit object with the given key."""
raise NotImplementedError
def visit_for_key(self, visit_key):
"""Return the visit for this key.
Return None if the visit doesn't exist or has expired.
"""
raise NotImplementedError
def update_queued_visits(self, queue):
"""Extend the expiration of the queued visits."""
raise NotImplementedError
def update_visit(self, visit_key, expiry):
try:
self.lock.acquire()
self.queue[visit_key] = expiry
finally:
self.lock.release()
def shutdown(self, timeout=None):
self._shutdown.set()
self.join(timeout)
if self.isAlive():
log.error("Visit Manager thread failed to shutdown.")
def run(self):
while not self._shutdown.isSet():
self.lock.acquire()
queue = None
try:
# make a copy of the queue and empty the original
if self.queue:
queue = self.queue.copy()
self.queue.clear()
finally:
self.lock.release()
if queue is not None:
self.update_queued_visits(queue)
self._shutdown.wait(self.interval)
| mit | -8,914,088,577,078,914,000 | 36.27758 | 82 | 0.599427 | false |
elebihan/python-ptraceplus | ptraceplus/tracer.py | 1 | 5688 | # -*- coding: utf-8 -*-
#
# python-ptraceplus - Ptrace bindings + extra stuff
#
# Copyright (c) 2013 Eric Le Bihan <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Process tracing helper
"""
import os
import signal
import ptraceminus as ptrace
from collections import OrderedDict
from gettext import gettext as _
from .process import TracedProcess, create_process_event, SignalEvent
from .utils import spawn_child
from .common import debug
class TracerError(Exception):
"""Error raised when a tracing operation failed"""
class Tracer(object):
"""Trace a process"""
def __init__(self):
self._procs = OrderedDict()
self._fork_enabled = False
self._exec_enabled = False
self._sysgood_enabled = False
self._options = 0
def __getitem__(self, key):
return self._procs[key]
def __iter__(self):
return self._procs.itervalues()
def __contains__(self, key):
return key in self._procs
@property
def has_processes(self):
return (len(self._procs) != 0)
def _set_fork_enabled(self, value):
mask = ptrace.O_TRACEFORK | ptrace.O_TRACEVFORK
if value:
self._options |= mask
else:
self._options &= ~mask
self._fork_enabled = value
def _get_fork_enabled(self):
return self._fork_enabled
fork_enabled = property(_get_fork_enabled, _set_fork_enabled,
None,
"Enable fork tracing")
def _set_exec_enabled(self, value):
mask = ptrace.O_TRACEEXEC | ptrace.O_TRACEEXIT
if value:
self._options |= mask
else:
self._options &= ~mask
self._exec_enabled = value
def _get_exec_enabled(self):
return self._exec_enabled
exec_enabled = property(_get_exec_enabled, _set_exec_enabled,
None,
"Enable exec tracing")
def _set_sysgood_enabled(self, value):
mask = ptrace.O_TRACESYSGOOD
if value:
self._options |= mask
else:
self._options &= ~mask
self._sysgood_enabled = value
def _get_sysgood_enabled(self):
return self._sysgood_enabled
sysgood_enabled = property(_get_sysgood_enabled, _set_sysgood_enabled,
None,
"""Enable sysgood: ask the kernel to set bit
#7 of the signal number if the signal comes
from kernel space. It is unset if it comes
from user space""")
def spawn_process(self, args, env=None, quiet=True):
flags = 0
pid = spawn_child(args, env, quiet)
pid, status = os.waitpid(pid, flags)
proc = self.add_process(pid)
proc.syscall()
return proc
def add_process(self, pid, is_attached=True, parent=None):
if pid in self._procs:
raise TracerError(_('Process {} already registered').format(pid))
debug(_("Adding process {}").format(pid))
proc = self.keep_process(pid, parent)
if not is_attached:
proc.attach()
proc.options = self._options
return proc
def keep_process(self, pid, parent=None):
if pid in self._procs:
debug(_("Remembering process {}").format(pid))
return self._procs[pid]
if parent:
details = "({})".format(parent.pid)
else:
details = ''
debug(_("Keeping process {} {}").format(pid, details))
proc = TracedProcess(pid, parent)
self._procs[pid] = proc
return proc
def remove_process(self, pid):
debug(_("Removing process {}").format(pid))
try:
proc = self._procs.pop(pid)
except KeyError:
raise TracerError(_('Process not found'))
proc.terminate()
proc.detach()
debug(_("{} processes still traced").format(len(self._procs)))
def wait_for_event(self, wanted_pid=None, blocking=True):
flags = 0
if not blocking:
flags |= os.WNOHANG
if wanted_pid and wanted_pid not in self._procs:
raise TracerError(_("Unknown PID ({})").format(wanted_pid))
pid = wanted_pid or -1
pid, status = os.waitpid(pid, flags)
return create_process_event(pid, status)
def wait_for_signal(self, *signals, **kwargs):
pid = kwargs.get('pid', None)
while True:
event = self.wait_for_event(pid)
if isinstance(event, SignalEvent):
if event.signum in signals or not signals:
return event
def wait_for_syscall(self, pid=None):
return self.wait_for_signal(signal.SIGTRAP, pid)
def quit(self):
while self._procs:
pid, proc = self._procs.popitem()
debug(_("Removing process {}").format(pid))
proc.terminate()
proc.detach()
# vim: ts=4 sts=4 sw=4 sta et ai
| gpl-3.0 | 987,896,519,618,176,600 | 30.776536 | 77 | 0.581927 | false |
videntity/tweatwell | apps/twitbot/views.py | 1 | 2939 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
from django.conf import settings
from django.http import HttpResponse, Http404,HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.models import User
from ..accounts.models import UserProfile
from models import TwitBot
from utils import twitbotsearch, convert_twitter_date
from ..checkin.models import Freggie
from ..checkin.freggies import fruit_tuple, veg_tuple
from operator import itemgetter, attrgetter
import json, sys, StringIO, pycurl
from django.forms.models import model_to_dict
def executetwitsearchbot(request, cron_key):
if cron_key != settings.CRON_KEY:
return HttpResponse("Forbidden", status=401)
freggielist=[]
#get the most recent since_id from db
tb=TwitBot.objects.get(pk=1)
d=twitbotsearch(settings.TWITTERHASH, tb.since_id)
latest_since_id=tb.since_id
if d.has_key('results'):
for i in reversed(d['results']):
jsonstr=json.dumps(i, indent = 4,)
x=dict(json.loads(jsonstr))
#if the from_user is in our DB, then create a Freggie
if int(tb.since_id) <= int(x['id']):
latest_since_id=x['id']
try:
freggie=None
up=UserProfile.objects.get(twitter=x['from_user'])
print "process", x['text'], x['id']
for i in fruit_tuple:
if str(x['text']).lower().__contains__(i):
freggie = i
for i in veg_tuple:
if str(x['text']).lower().__contains__(i):
freggie = i
if freggie:
mydate = convert_twitter_date(str(x['created_at']))
f=Freggie.objects.create(user=up.user, freggie=freggie,
text=x['text'], sinceid=x['id'],
evdt=mydate)
freggiedict=model_to_dict(f, exclude=['evdt','photo',
'since_id'])
freggiedict['created_at']=x['created_at']
freggiedict['twitter_id']=x['id']
freggielist.append(freggiedict)
except(UserProfile.DoesNotExist):
print "A tweat was found but no matching user profile"
except:
print str(sys.exc_info())
#return HttpResponse(str(sys.exc_info()), status=500)
tb.since_id=int(latest_since_id)+1
tb.save()
jsonstr=json.dumps(freggielist, indent = 4,)
return HttpResponse(jsonstr, mimetype="text/plain")
| gpl-2.0 | 316,786,970,059,591,800 | 41 | 79 | 0.532494 | false |
gwax/nikola | tests/test_rst_compiler.py | 1 | 8674 | # coding: utf8
# Author: Rodrigo Bistolfi
# Date: 03/2013
""" Test cases for Nikola ReST extensions.
A base class ReSTExtensionTestCase provides the tests basic behaviour.
Subclasses must override the "sample" class attribute with the ReST markup.
The sample will be rendered as HTML using publish_parts() by setUp().
One method is provided for checking the resulting HTML:
* assertHTMLContains(element, attributes=None, text=None)
The HTML is parsed with lxml for checking against the data you provide. The
method takes an element argument, a string representing the *name* of an HTML
tag, like "script" or "iframe". We will try to find this tag in the document
and perform the tests on it. You can pass a dictionary to the attributes kwarg
representing the name and the value of the tag attributes. The text kwarg takes
a string argument, which will be tested against the contents of the HTML
element.
One last caveat: you need to url unquote your urls if you are going to test
attributes like "src" or "link", since the HTML rendered by docutils will be
always unquoted.
"""
import os
import io
try:
from io import StringIO
except ImportError:
from StringIO import StringIO # NOQA
import tempfile
import docutils
from lxml import html
import unittest
import nikola.plugins.compile.rest
from nikola.plugins.compile.rest import vimeo
import nikola.plugins.compile.rest.listing
from nikola.plugins.compile.rest.doc import Plugin as DocPlugin
from nikola.utils import _reload
from .base import BaseTestCase, FakeSite, FakePost
class ReSTExtensionTestCase(BaseTestCase):
""" Base class for testing ReST extensions """
sample = 'foo'
deps = None
def setUp(self):
self.compiler = nikola.plugins.compile.rest.CompileRest()
self.compiler.set_site(FakeSite())
return super(ReSTExtensionTestCase, self).setUp()
def basic_test(self):
""" Parse cls.sample into a HTML document tree """
self.setHtmlFromRst(self.sample)
def setHtmlFromRst(self, rst):
""" Create html output from rst string """
tmpdir = tempfile.mkdtemp()
inf = os.path.join(tmpdir, 'inf')
outf = os.path.join(tmpdir, 'outf')
with io.open(inf, 'w+', encoding='utf8') as f:
f.write(rst)
p = FakePost('', '')
p._depfile[outf] = []
self.compiler.site.post_per_input_file[inf] = p
self.html = self.compiler.compile(inf, outf)
with io.open(outf, 'r', encoding='utf8') as f:
self.html = f.read()
os.unlink(inf)
os.unlink(outf)
depfile = [p for p in p._depfile[outf] if p != outf]
depfile = '\n'.join(depfile)
if depfile:
self.assertEqual(self.deps.strip(), depfile)
os.rmdir(tmpdir)
self.html_doc = html.parse(StringIO(self.html))
def assertHTMLContains(self, element, attributes=None, text=None):
""" Test if HTML document includes an element with the given
attributes and text content
"""
try:
tag = next(self.html_doc.iter(element))
except StopIteration:
raise Exception("<{0}> not in {1}".format(element, self.html))
else:
if attributes:
arg_attrs = set(attributes.items())
tag_attrs = set(tag.items())
self.assertTrue(arg_attrs.issubset(tag_attrs))
if text:
self.assertIn(text, tag.text)
class ReSTExtensionTestCaseTestCase(ReSTExtensionTestCase):
""" Simple test for our base class :) """
sample = '.. raw:: html\n\n <iframe src="foo" height="bar">spam</iframe>'
def test_test(self):
self.basic_test()
self.assertHTMLContains("iframe", attributes={"src": "foo"},
text="spam")
self.assertRaises(Exception, self.assertHTMLContains, "eggs", {})
class MathTestCase(ReSTExtensionTestCase):
sample = ':math:`e^{ix} = \cos x + i\sin x`'
def test_math(self):
""" Test that math is outputting TeX code."""
self.basic_test()
self.assertHTMLContains("span", attributes={"class": "math"},
text="\(e^{ix} = \cos x + i\sin x\)")
class SlidesTestCase(ReSTExtensionTestCase):
""" Slides test case """
sample = '.. slides:: IMG.jpg\n'
def test_slides(self):
""" Test the slides js generation and img tag creation """
self.basic_test()
self.assertHTMLContains("img", attributes={"src": "IMG.jpg"})
class SoundCloudTestCase(ReSTExtensionTestCase):
""" SoundCloud test case """
sample = '.. soundcloud:: SID\n :height: 400\n :width: 600'
def test_soundcloud(self):
""" Test SoundCloud iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("https://w.soundcloud.com"
"/player/?url=http://"
"api.soundcloud.com/"
"tracks/SID"),
"height": "400", "width": "600"})
class VimeoTestCase(ReSTExtensionTestCase):
"""Vimeo test.
Set Vimeo.request_size to False for avoiding querying the Vimeo api
over the network
"""
sample = '.. vimeo:: VID\n :height: 400\n :width: 600'
def setUp(self):
""" Disable query of the vimeo api over the wire """
vimeo.Vimeo.request_size = False
super(VimeoTestCase, self).setUp()
_reload(nikola.plugins.compile.rest)
def test_vimeo(self):
""" Test Vimeo iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("https://player.vimeo.com/"
"video/VID"),
"height": "400", "width": "600"})
class YoutubeTestCase(ReSTExtensionTestCase):
""" Youtube test case """
sample = '.. youtube:: YID\n :height: 400\n :width: 600'
def test_youtube(self):
""" Test Youtube iframe tag generation """
self.basic_test()
self.assertHTMLContains("iframe",
attributes={"src": ("https://www.youtube.com/"
"embed/YID?rel=0&hd=1&"
"wmode=transparent"),
"height": "400", "width": "600"})
class ListingTestCase(ReSTExtensionTestCase):
""" Listing test case and CodeBlock alias tests """
deps = None
sample1 = '.. listing:: nikola.py python\n\n'
sample2 = '.. code-block:: python\n\n import antigravity'
sample3 = '.. sourcecode:: python\n\n import antigravity'
# def test_listing(self):
# """ Test that we can render a file object contents without errors """
# with cd(os.path.dirname(__file__)):
# self.deps = 'listings/nikola.py'
# self.setHtmlFromRst(self.sample1)
def test_codeblock_alias(self):
""" Test CodeBlock aliases """
self.deps = None
self.setHtmlFromRst(self.sample2)
self.setHtmlFromRst(self.sample3)
class DocTestCase(ReSTExtensionTestCase):
""" Ref role test case """
sample = 'Sample for testing my :doc:`doesnt-exist-post`'
sample1 = 'Sample for testing my :doc:`fake-post`'
sample2 = 'Sample for testing my :doc:`titled post <fake-post>`'
def setUp(self):
# Initialize plugin, register role
self.plugin = DocPlugin()
self.plugin.set_site(FakeSite())
# Hack to fix leaked state from integration tests
try:
f = docutils.parsers.rst.roles.role('doc', None, None, None)[0]
f.site = FakeSite()
except AttributeError:
pass
return super(DocTestCase, self).setUp()
def test_doc_doesnt_exist(self):
self.assertRaises(Exception, self.assertHTMLContains, 'anything', {})
def test_doc(self):
self.setHtmlFromRst(self.sample1)
self.assertHTMLContains('a',
text='Fake post',
attributes={'href': '/posts/fake-post'})
def test_doc_titled(self):
self.setHtmlFromRst(self.sample2)
self.assertHTMLContains('a',
text='titled post',
attributes={'href': '/posts/fake-post'})
if __name__ == "__main__":
unittest.main()
| mit | 2,583,950,506,363,282,000 | 34.117409 | 79 | 0.588771 | false |
czcorpus/kontext | lib/plugins/abstract/user_items.py | 1 | 4225 | # Copyright (c) 2015 Charles University in Prague, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2015 Tomas Machalek <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A plug-in template for managing items (corpora, subcorpora, aligned corpora)
user can access via fast access widget. This is a generalization of
user corpus list.
Expected factory method signature: create_instance(config, db)
"""
import abc
import hashlib
import l10n
from controller.errors import UserActionException
class UserItemException(UserActionException):
"""
General error related to
the plug-in
"""
pass
class FavoriteItem(object):
"""
A reference to a corpus in user's list
"""
def __init__(self, data=None):
if data is None:
data = {}
self.name = data.get('name', 'New item')
self.corpora = data.get('corpora', [])
self.size = data.get('size', None)
self.size_info = l10n.simplify_num(self.size) if self.size else None
self.subcorpus_id = data.get('subcorpus_id', None)
self.subcorpus_orig_id = data.get('subcorpus_orig_id', self.subcorpus_id)
self.ident = data.get('id', hashlib.md5(self.sort_key.encode()).hexdigest())
@property
def is_single_corpus(self):
return not self.subcorpus_id and len(self.corpora) == 1
@property
def main_corpus_id(self):
return self.corpora[0]['id']
@property
def sort_key(self):
return '{0} {1}'.format(' '.join(x['name'] for x in self.corpora), self.subcorpus_id)
def to_dict(self):
return dict(
id=self.ident,
name=self.name,
size=self.size,
size_info=self.size_info,
corpora=self.corpora,
subcorpus_id=self.subcorpus_id,
subcorpus_orig_id=self.subcorpus_orig_id
)
class AbstractUserItems(abc.ABC):
"""
A 'user_items' (= favorite corpora, subcorpora, aligned corpora)
plug-in interface.
Please note that to initiate the plug-in with request-specific
data the 'setup(controller)' method must be implemented. The controller
detects it automatically and calls it for all active plug-ins implementing
it.
"""
def from_dict(self, data):
"""
According to provided data it returns a proper
implementation of GeneralItem. OPTIONAL implementation
arguments:
data -- a dict
"""
raise NotImplementedError()
@abc.abstractmethod
def serialize(self, obj):
"""
Exports a GeneralItem instance or a list of GeneralItem instances (both variants
must be supported) to JSON used for internal storage (i.e. no client-side stuff)
"""
@abc.abstractmethod
def get_user_items(self, plugin_ctx):
"""
Returns a list of user items (GeneralItem implementations)
arguments:
plugin_ctx --
return:
a list or a compatible structure containing GeneralItem objects
"""
@abc.abstractmethod
def add_user_item(self, plugin_ctx, item):
"""
Adds (persistently) an item to user's list.
arguments:
plugin_ctx --
item -- an instance of GeneralItem implementation
"""
@abc.abstractmethod
def delete_user_item(self, plugin_ctx, item_id):
"""
Removes (in a persistent way) an item from user's list.
arguments:
plugin_ctx --
item_id -- an ID of GeneralItem instance
"""
| gpl-2.0 | 8,109,087,419,113,496,000 | 29.178571 | 93 | 0.645917 | false |
goodwillcoding/trac-subtickettypes | subtickettypes/web_ui.py | 1 | 9663 | #
# Copyright 2009, Niels Sascha Reedijk <[email protected]>
# All rights reserved. Distributed under the terms of the MIT License.
#
# debug
from pprint import pprint
from pkg_resources import resource_filename
from genshi import HTML
from genshi.builder import tag
from genshi.filters.transform import Transformer
from trac.core import *
from trac.ticket import model
from trac.util.text import unicode_quote_plus
from trac.web.api import IRequestFilter
from trac.web.chrome import ITemplateProvider
from trac.web.chrome import ITemplateStreamFilter
from trac.web.chrome import add_notice
from trac.web.chrome import add_script
from trac.ticket.roadmap import TicketGroupStats
from trac.util.translation import _
# --------------------------------------------------------------------------- #
class SubTicketTypesModule(Component):
"""Implements subtickettypes in Trac's interface."""
implements(IRequestFilter, ITemplateProvider, ITemplateStreamFilter)
# ....................................................................... #
# IRequestFilter method
def pre_process_request(self, req, handler):
# handle the admin panel
if req.path_info.startswith("/admin/ticket/type/"):
# handle cancel submit by redirecting back to the list page
# TODO: patch subcomponents with "cancel" check
if req.method == "POST" and req.args.get('cancel'):
req.redirect(req.href.admin('ticket', 'type'))
if req.method == "POST" and 'rename_children' in req.args:
# if we are not renaming the children for a ticket type that
# has childer, its a regular update, so let trac handle it.
if req.args.get('rename_children') != 'on':
return handler
# other wise first lets rename the parent type first
# get the original name (before renaming)
# 19 is the length of "/admin/ticket/type/" string
parent_ticket_type_name = req.path_info[19:]
parent_ticket_type = model.Type(self.env, parent_ticket_type_name)
parent_ticket_type.name = req.args.get('name')
try:
parent_ticket_type.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The ticket type "%(name)s" already '
'exists.', name=parent_ticket_type_name))
# Now update names in the the child ticket types
child_ticket_types = self._get_tickettype_children(parent_ticket_type_name)
for ticket_type in child_ticket_types:
ticket_type.name = ticket_type.name.replace(parent_ticket_type_name, req.args.get('name'), 1)
ticket_type.update()
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin('ticket', 'type'))
return handler
# ....................................................................... #
# IRequestFilter method
def post_process_request(self, req, template, data, content_type):
# The /query paths are handled in filter_stream()
if req.path_info.startswith('/ticket/') or \
req.path_info.startswith('/newticket'):
add_script(req, 'subtickettypes/tickettypeselect.js')
if template == "query.html":
# Allow users to query for parent ticket types and include all
# sub ticket types as well
# check if the entry already exists (might be added by another
# plugin)
begins_with_select_item = {'name': _("begins with"), 'value': ""}
if begins_with_select_item not in data['modes']['select']:
data['modes']['select'].insert(0, begins_with_select_item)
if template == "milestone_view.html":
# Group tickets in the milestone view by base component.
if data['grouped_by'] == "type":
ticket_type_name = ''
new_groups = []
new_ticket_types = []
for ticket_type in data['groups']:
ticket_type_name = ticket_type['name'].split("/")[0]
if ticket_type_name not in new_ticket_types:
# This ticket type is not yet in the new list of ticket
# types, add it.
new_ticket_types.append(ticket_type_name)
# Fix URLs to the querys (we use unicode_quote_plus to
# replace the '/' with something URL safe (like the
# hrefs are)
new_hrefs = []
for interval_href in ticket_type['interval_hrefs']:
new_hrefs.append(interval_href.replace(unicode_quote_plus(ticket_type['name']), '^' + ticket_type_name))
ticket_type['stats_href'] = ticket_type['stats_href'].replace(unicode_quote_plus(ticket_type['name']), '^' + ticket_type_name)
ticket_type['interval_hrefs'] = new_hrefs
# Set the name to the base name (in case this originally
# is a sub ticket type.
ticket_type['name'] = ticket_type_name
new_groups.append(ticket_type)
else:
# This is a sub ticket type. Add the stats to the main ticket type.
# Note that above two lists are created. Whenever an
# item is added to one, an analogous one is added to
# the other. This code uses that logic.
core_ticket_type = new_groups[new_ticket_types.index(ticket_type_name)]
merged_stats = core_ticket_type['stats'] #TicketGroupStats from trac.ticket.roadmap
new_stats = ticket_type['stats']
# Bear with me as we go to this mess that is the group stats
# (or of course this hack, depending on who's viewpoint).
# First merge the totals
merged_stats.count += new_stats.count
# The stats are divided in intervals, merge these.
i = 0
for interval in merged_stats.intervals:
new_interval = new_stats.intervals[i]
interval['count'] += new_interval['count']
i += 1
merged_stats.refresh_calcs()
# Now store the new milestone tickey type groups
data['groups'] = new_groups
return template, data, content_type
# ....................................................................... #
# ITemplateProvider methods
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
return [('subtickettypes', resource_filename(__name__, 'htdocs'))]
# ....................................................................... #
# ITemplateProvider methods
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
ClearSilver templates.
"""
return ""
# ....................................................................... #
# ITemplateStreamFilter method
def filter_stream(self, req, method, filename, stream, data):
# alternate matching possibilities
# if req.path_info.startswith('/admin/ticket/type'):
# Match to the admin ticket type detail editing panel of ticket type
if filename == "admin_enums.html" \
and data['active_cat'] == u'ticket' \
and data['active_panel'] == u'type' \
and data['view'] == 'detail':
# If ticket type has children, then add a checkbox to rename those
if len(self._get_tickettype_children(data['enum'].name)) > 0:
stream |= Transformer("//div[@class='field'][1]").after(self._build_renamechildren_field())
elif req.path_info.startswith('/query'):
# We need to load our script after the initializeFilters() call done by Trac
html = HTML('<script type="text/javascript" charset="utf-8" src="' +
req.href.base +
'/chrome/subtickettypes/tickettypeselect.js"></script>')
stream |= Transformer('//head').append(html)
return stream
# ....................................................................... #
# Helper function
def _get_tickettype_children(self, name):
tickettypes = model.Type.select(self.env)
result = []
for tickettype in tickettypes:
if tickettype.name.startswith(name + "/") and tickettype.name != name:
result.append(tickettype)
return result
# ....................................................................... #
# Helper function
def _build_renamechildren_field(self):
return tag.div(tag.label(
tag.input(_("Also rename children"), \
type='checkbox',
id='rename_children', \
name='rename_children',
checked='checked') \
), \
class_='field')
| bsd-3-clause | -1,324,141,922,772,074,800 | 45.456731 | 150 | 0.526338 | false |
corpnewt/CorpBot.py | Cogs/Telephone.py | 1 | 22909 | import asyncio, discord, re, os, random
from discord.ext import commands
from collections import OrderedDict
from Cogs import Utils, DisplayName, Nullify, FuzzySearch, PickList, Message
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Telephone(bot, settings))
class Telephone(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.switchboard = []
# Regex for extracting urls from strings
self.regex = re.compile(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?")
self.dregex = re.compile(r"(?i)(discord(\.gg|app\.com)\/)([^\s]+)")
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
# Proof-of-concept placeholders
'''@commands.Cog.listener()
async def on_message_context(self, ctx, message):
return'''
async def killcheck(self, message):
ignore = False
for cog in self.bot.cogs:
real_cog = self.bot.get_cog(cog)
if real_cog == self:
# Don't check ourself
continue
try:
check = await real_cog.message(message)
except AttributeError:
continue
try:
if check['Ignore']:
ignore = True
except KeyError:
pass
return ignore
async def ontyping(self, channel, user, when):
# Check if the channel is typing, and send typing to receiving
# channels if in call
# Don't listen to bots
if user.bot:
return
call = self._incall(channel.guild)
if not call:
return
if not call["Connected"]:
# Don't forward typing until they pick up
return
for caller in call['Members']:
if caller is channel.guild:
continue
# Get the tele channel
call_channel = self._gettelechannel(caller)
if not call_channel:
continue
await call_channel.trigger_typing()
def _gettelechannel(self, server):
teleChannel = self.settings.getServerStat(server, "TeleChannel")
if teleChannel:
teleChannel = DisplayName.channelForName(str(teleChannel), server, "text")
if teleChannel == "":
return None
return teleChannel
def _getsafenumber(self, number, server):
numeric = "0123456789"
found = False
for guild in self.bot.guilds:
if guild.id == server.id:
continue
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if teleNum == number:
found = True
break
if not found:
return number
while True:
found = False
newNum = "".join(random.choice(numeric) for i in range(7))
for guild in self.bot.guilds:
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if teleNum == newNum:
found = True
break
if not found:
return newNum
def _incall(self, server):
for call in self.switchboard:
if server in call["Members"]:
return call
return None
def _getothernumber(self, call, server):
# Returns the other caller's number
if not server in call["Members"]:
# We're uh.. not in this call
return None
for member in call["Members"]:
if not member is server:
# HA! GOTEM
return self.settings.getServerStat(member, "TeleNumber")
def _hangup(self, caller):
# Hangs up all calls the caller is in
for call in self.switchboard:
if caller in call["Members"]:
self.switchboard.remove(call)
@commands.command(pass_context=True)
async def teleblocklinks(self, ctx, *, yes_no = None):
"""Enables/Disables removing links sent over telephone calls (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Block telephone links","TeleBlockLinks",yes_no,default=True))
@commands.command(pass_context=True)
async def phonebook(self, ctx, *, look_up = None):
"""Lets you page through the phonebook - or optionally lets you search for a server name or number."""
# Build our phone list
entries = []
for guild in self.bot.guilds:
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if teleNum:
entries.append({ "name": guild.name, "value": teleNum[:3] + "-" + teleNum[3:] })
if not len(entries):
await ctx.send(":telephone: The phonebook is *empty!*")
return
# Sort alphabetically
entries = sorted(entries, key = lambda x: x["name"])
if look_up == None:
await PickList.PagePicker(title=":telephone: Phonebook",list=entries,ctx=ctx).pick()
return
# Search time!
look_up_num = re.sub(r'\W+', '', look_up)
id_ratio = 0
if len(look_up_num):
look_up_num = look_up_num if len(look_up_num) < 7 else look_up_num[:3]+"-"+look_up_num[3:]
idMatch = FuzzySearch.search(look_up_num, entries, 'value', 3)
id_ratio = idMatch[0]['Ratio']
if id_ratio == 1:
# Found it!
return await Message.Embed(title=":telephone: Phonebook",fields=[idMatch[0]["Item"]],color=ctx.author).send(ctx)
# Look up by name now
nameMatch = FuzzySearch.search(look_up, entries, 'name', 3)
if nameMatch[0]['Ratio'] == 1:
# Exact name
# Found it!
return await Message.Embed(title=":telephone: Phonebook",fields=[nameMatch[0]["Item"]],color=ctx.author).send(ctx)
# now we need to find which is better
matchCheck = []
if nameMatch[0]['Ratio'] > id_ratio:
matchCheck = nameMatch
else:
matchCheck = idMatch
fields = [m["Item"] for m in matchCheck]
return await Message.Embed(title=":telephone: Phonebook - Closest Matches",fields=fields,color=ctx.author).send(ctx)
@commands.command(pass_context=True)
async def telenumber(self, ctx):
"""Prints your telephone number."""
teleNum = self.settings.getServerStat(ctx.guild, "TeleNumber")
if not teleNum:
await ctx.send(":telephone: is currently *disabled*.")
return
teleNumFormat = teleNum[:3] + "-" + teleNum[3:]
await ctx.send("Your :telephone: number is: *{}*".format(teleNumFormat))
@commands.command(pass_context=True)
async def callerid(self, ctx):
"""Reveals the last number to call regardless of *67 settings (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
target = self.settings.getServerStat(ctx.guild, "LastCall")
if target == None:
await ctx.send(":telephone: No prior calls recorded.")
else:
if self.settings.getServerStat(ctx.guild, "LastCallHidden") and not isAdmin:
target = "UNKNOWN CALLER (bot-admins and admins can reveal this)"
await ctx.send(":telephone: Last number recorded: {}".format(target[:3] + "-" + target[3:]))
@commands.command(pass_context=True)
async def settelechannel(self, ctx, *, channel = None):
"""Sets the channel for telephone commands - or disables that if nothing is passed (admin only)."""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if channel == None:
self.settings.setServerStat(ctx.message.guild, "TeleChannel", "")
self.settings.setServerStat(ctx.guild, "TeleNumber", None)
msg = ':telephone: *disabled*.'
await ctx.channel.send(msg)
return
channel = DisplayName.channelForName(channel, ctx.guild, "text")
if channel == None:
await ctx.send("I couldn't find that channel :(")
return
self.settings.setServerStat(ctx.message.guild, "TeleChannel", channel.id)
teleNumber = self._getsafenumber(str(channel.id)[len(str(channel.id))-7:], ctx.guild)
self.settings.setServerStat(ctx.guild, "TeleNumber", teleNumber)
msg = ':telephone: channel set to {}'.format(channel.mention)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def telechannel(self, ctx):
"""Prints the current channel for telephone commands."""
teleChan = self.settings.getServerStat(ctx.guild, "TeleChannel")
if not teleChan:
await ctx.send(":telephone: is currently *disabled*.")
return
channel = DisplayName.channelForName(str(teleChan), ctx.guild, "text")
if channel:
await ctx.send("The current :telephone: channel is {}".format(channel.mention))
return
await ctx.send("Channel id: *{}* no longer exists on this server. Consider updating this setting!".format(teleChan))
@commands.command(pass_context=True)
async def teleblock(self, ctx, *, guild_name = None):
"""Blocks all tele-numbers associated with the passed guild (bot-admin only)."""
isAdmin = ctx.author.permissions_in(ctx.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.guild, "AdminArray")
for role in ctx.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.send('You do not have sufficient privileges to access this command.')
return
if guild_name == None:
await ctx.send("Usage: `{}teleblock [guild_name]`".format(ctx.prefix))
return
# Verify our guild
found = False
target = None
for guild in self.bot.guilds:
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if not teleNum:
continue
if guild.name.lower() == guild_name.lower():
if guild.id == ctx.guild.id:
# We're uh... blocking ourselves.
await ctx.send("You can't block your own number...")
return
found = True
target = guild
break
if not found:
await ctx.send("I couldn't find that guild to block. Maybe they're not setup for :telephone: yet?")
return
# Here, we should have a guild to block
block_list = self.settings.getServerStat(ctx.guild, "TeleBlock")
if block_list == None:
block_list = []
block_list.append(target.id)
self.settings.setServerStat(ctx.guild, "TeleBlock", block_list)
msg = "You are now blocking *{}!*".format(Nullify.escape_all(target.name))
await ctx.send(msg)
@commands.command(pass_context=True)
async def teleunblock(self, ctx, *, guild_name = None):
"""Unblocks all tele-numbers associated with the passed guild (bot-admin only)."""
isAdmin = ctx.author.permissions_in(ctx.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.guild, "AdminArray")
for role in ctx.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.send('You do not have sufficient privileges to access this command.')
return
if guild_name == None:
await ctx.send("Usage: `{}teleunblock [guild_name]`".format(ctx.prefix))
return
block_list = self.settings.getServerStat(ctx.guild, "TeleBlock")
if block_list == None:
block_list = []
if not len(block_list):
await ctx.send("No blocked numbers - nothing to unblock!")
return
# Verify our guild
found = False
target = None
for guild in self.bot.guilds:
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if guild.name.lower() == guild_name.lower():
found = True
target = guild
break
if not found:
await ctx.send("I couldn't find that guild...")
return
if not target.id in block_list:
msg = "*{}* is not currently blocked.".format(Nullify.escape_all(target.name))
await ctx.send(msg)
return
# Here, we should have a guild to unblock
block_list.remove(target.id)
self.settings.setServerStat(ctx.guild, "TeleBlock", block_list)
msg = "You have unblocked *{}!*".format(Nullify.escape_all(target.name))
await ctx.send(msg)
@commands.command(pass_context=True)
async def teleblocks(self, ctx):
"""Lists guilds with blocked tele-numbers."""
block_list = self.settings.getServerStat(ctx.guild, "TeleBlock")
if block_list == None:
block_list = []
if not len(block_list):
await ctx.send("No blocked numbers!")
return
block_names = []
for block in block_list:
server = self.bot.get_guild(block)
if not server:
block_list.remove(block)
continue
block_names.append("*" + server.name + "*")
self.settings.setServerStat(ctx.guild, "TeleBlock", block_list)
msg = "__Tele-Blocked Servers:__\n\n"
#msg += ", ".join(str(x) for x in block_list)
msg += ", ".join(Nullify.escape_all(block_names))
await ctx.send(msg)
@commands.command(pass_context=True)
async def call(self, ctx, *, number = None):
"""Calls the passed number. Can use *67 to hide your identity - or *69 to connect to the last incoming call (ignored if another number is present)."""
teleChan = self._gettelechannel(ctx.guild)
if not teleChan:
await ctx.send(":telephone: is currently *disabled*. You can set it up with `{}settelechannel [channel]`".format(ctx.prefix))
return
if not teleChan.id == ctx.channel.id:
await ctx.send(":telephone: calls must be made in {}".format(teleChan.mention))
return
# Check if we're already in a call
incall = self._incall(ctx.guild)
if incall:
if incall["Hidden"]:
call_with = "UNKOWN CALLER"
else:
teleNum = self._getothernumber(incall, ctx.guild)
call_with = teleNum[:3] + "-" + teleNum[3:]
# Busy :(
caller = self._gettelechannel(ctx.guild)
if caller:
await caller.send(":telephone: You're already in a call with: *{}*".format(call_with))
return
hidden = False
target = None
dial_hide = False
if not number == None:
if "*67" in number:
hidden = True
if "*69" in number:
target = self.settings.getServerStat(ctx.guild, "LastCall")
if self.settings.getServerStat(ctx.guild, "LastCallHidden"):
dial_hide = True
number = number.replace("*67", "").replace("*69", "")
number = re.sub(r'\W+', '', number)
if len(number):
dial_hide = False
target = number
await self._dial(ctx.guild, target, hidden, dial_hide)
async def _dial(self, caller, target, hidden, dial_hide):
if target == None:
# Need a random number
numbers = []
for guild in self.bot.guilds:
if guild.id == caller.id:
continue
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if teleNum:
numbers.append(guild)
if len(numbers):
target = random.choice(numbers)
else:
found = False
for guild in self.bot.guilds:
teleNum = self.settings.getServerStat(guild, "TeleNumber")
if teleNum == target:
if guild.id == caller.id:
# We're uh... calling ourselves.
caller = self._gettelechannel(caller)
if caller:
await caller.send(":telephone: ***Beep beep beep beep!*** *Busy signal...*")
return
found = True
target = guild
break
if not found:
target = None
if target == None:
# We didn't find a server to connect to
caller = self._gettelechannel(caller)
if caller:
await caller.send(":telephone: ***Beep beep beep!*** *We're sorry, the number you've dialed is not in service at this time.*")
return
# Check for a blocked server
block_list = self.settings.getServerStat(caller, "TeleBlock")
if block_list == None:
block_list = []
tblock_list = self.settings.getServerStat(target, "TeleBlock")
if tblock_list == None:
block_list = []
if target.id in block_list or caller.id in tblock_list:
# Blocked! - checks if both parties are blocked by each other
caller = self._gettelechannel(caller)
if caller:
await caller.send(":telephone: ***Beep beep beep!*** *We're sorry, your call cannot be completed as dialed.*")
return
target_channel = self._gettelechannel(target)
if target_channel == None:
# We found a server - but they have no telechannel
caller = self._gettelechannel(caller)
if caller:
await caller.send(":telephone: ***Beep beep beep!*** *We're sorry, the number you've dialed is not in service at this time.*")
return
# Check if the caller is in a call currently
if self._incall(target):
# Busy :(
caller = self._gettelechannel(caller)
if caller:
await caller.send(":telephone: ***Beep beep beep beep!*** *Busy signal...*")
return
# Ring!
try:
await self._ring(caller, target, hidden, dial_hide)
except:
# Something went wrong - hang up and inform both parties that the call was disconnected
self._hangup(caller)
caller = self._gettelechannel(caller)
target = self._gettelechannel(target)
try:
await caller.send(":telephone: The line went dead!")
except:
pass
try:
await target.send(":telephone: The line went dead!")
except:
pass
async def _ring(self, caller, receiver, hidden, dial_hide):
# This should be called when he have a valid caller, receiver, and no one is busy
receiver_chan = self._gettelechannel(receiver)
caller_chan = self._gettelechannel(caller)
if receiver_chan == None or caller_chan == None:
# No dice
return
# Add both to the call list
self.switchboard.append({ "Members": [caller, receiver], "Hidden": hidden, "Connected": False })
our_call = self.switchboard[len(self.switchboard)-1]
# Let the caller know we're dialing
msg = ":telephone: Dialing... "
teleNum = self.settings.getServerStat(receiver, "TeleNumber")
msg_add = []
if hidden:
msg_add.append("*67 ")
if dial_hide:
msg_add.append("###-")
msg_add.append("####")
else:
msg_add.append(teleNum[:3]+"-")
msg_add.append(teleNum[3:])
# Send dialing
message = await caller_chan.send(msg)
# Dialing edits
for i in msg_add:
msg += i
await message.edit(content=msg)
await asyncio.sleep(0.5)
# Here - we should have "dialed"
# Send a message to the other channel that there's a call incoming
# Save last call
self.settings.setServerStat(receiver, "LastCall", self.settings.getServerStat(caller, "TeleNumber"))
if hidden:
caller_number = "UNKNOWN CALLER"
self.settings.setServerStat(receiver, "LastCallHidden", True)
else:
self.settings.setServerStat(receiver, "LastCallHidden", False)
caller_number = self.settings.getServerStat(caller, "TeleNumber")
caller_number = caller_number[:3] + "-" + caller_number[3:]
await receiver_chan.send(":telephone: Incoming call from: *{}*\nType *pickup* to answer.".format(caller_number))
# Ring for 30 seconds - then report no answer
# Setup the check
def check(ctx, msg):
# This now catches the message and the context
# print(ctx)
if msg.author.bot:
return False
m_cont = msg.content.lower()
if msg.channel == receiver_chan and m_cont == "pickup":
return True
if msg.channel == caller_chan and m_cont == "hangup":
return True
return False
# Wait for a response
try:
talk = await self.bot.wait_for('message_context', check=check, timeout=30)
except Exception:
talk = None
if talk:
talk = talk[1]
if talk == None:
# No answer - hangup
self._hangup(caller)
await caller_chan.send(":telephone: No answer...")
await receiver_chan.send(":telephone: Ringing stops.")
return
elif talk.content.lower() == "hangup":
# You hung up the call
self._hangup(caller)
await caller_chan.send(":telephone: You have hung up.")
await receiver_chan.send(":telephone: Ringing stops.")
return
# Connect the call:
our_call["Connected"] = True
# They answered!
await caller_chan.send(":telephone_receiver: Connected.\nType *hangup* to end the call.")
await receiver_chan.send(":telephone_receiver: Connected.\nType *hangup* to end the call.")
# Wait on the call
while True:
# Setup the check
def check_in_call(msg):
if msg.author.bot:
return False
if msg.channel == receiver_chan or msg.channel == caller_chan:
return True
return False
try:
# 1 minute timeout
talk = await self.bot.wait_for('message', check=check_in_call, timeout=60)
except Exception:
talk = None
if talk == None:
# Timed out
self._hangup(caller)
self._hangup(receiver)
await caller_chan.send(":telephone: Disconnected.")
await receiver_chan.send(":telephone: Disconnected.")
return
elif talk.content.lower() == "hangup":
# One side hung up
self._hangup(caller)
self._hangup(receiver)
if talk.channel == caller_chan:
# The caller disconnected
await receiver_chan.send(":telephone: The other phone was hung up.")
await caller_chan.send(":telephone: You have hung up.")
else:
# The receiver disconnected
await caller_chan.send(":telephone: The other phone was hung up.")
await receiver_chan.send(":telephone: You have hung up.")
return
else:
talk_msg = talk.content
# Let's make sure we strip links out - and nullify discord.gg links to patch a spam loophole
# Create a set of all matches (to avoid duplicates in case of spam)
if self.settings.getServerStat(receiver if talk.channel==caller_chan else caller,"TeleBlockLinks",True):
# Remove links only if the target channel chooses to
matches = [x.group(0) for x in re.finditer(self.regex, talk_msg)]
dmatches = [x.group(0) for x in re.finditer(self.dregex, talk_msg)]
matches.extend(dmatches)
matches = OrderedDict.fromkeys(matches) # Use an OrderedDict to avoid duplicates
# Now we iterate that list and replace all links with `link removed`
for x in matches:
talk_msg = talk_msg.replace(x,"`link removed`")
# Clean out mentions from the message
talk_msg = Utils.suppressed(talk.guild,talk_msg,force=True)
# Must be conversation
if talk.channel == caller_chan:
# Coming from the talking channel
if hidden:
await receiver_chan.send(":telephone_receiver: " + talk_msg)
else:
user = DisplayName.name(talk.author).replace("`","").replace("\\","") # Remove @here and @everyone mentions in username
await receiver_chan.send(":telephone_receiver: `{}`: {}".format(user, talk_msg))
else:
user = DisplayName.name(talk.author).replace("`","").replace("\\","") # Remove @here and @everyone mentions in username
await caller_chan.send(":telephone_receiver: `{}`: {}".format(user, talk_msg))
| mit | 4,624,505,264,969,987,000 | 33.462848 | 153 | 0.659173 | false |
TissueMAPS/TmLibrary | tmlib/workflow/metaextract/args.py | 1 | 1432 | # TmLibrary - TissueMAPS library for distibuted image analysis routines.
# Copyright (C) 2016 Markus D. Herrmann, University of Zurich and Robin Hafen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from tmlib.workflow.args import Argument
from tmlib.workflow.args import BatchArguments
from tmlib.workflow.args import SubmissionArguments
from tmlib.workflow import register_step_batch_args
from tmlib.workflow import register_step_submission_args
@register_step_batch_args('metaextract')
class MetaextractBatchArguments(BatchArguments):
batch_size = Argument(
type=int, help='number of images that should be processed per job',
default=100, flag='batch-size', short_flag='b'
)
@register_step_submission_args('metaextract')
class MetaextractSubmissionArguments(SubmissionArguments):
pass
| agpl-3.0 | -114,338,109,586,010,220 | 41.117647 | 78 | 0.776536 | false |
adrianp/cartz | server/api.py | 1 | 1275 | from data.game import Game
from data.player import Player
from utils import random_string
from config import uid_length
games = {}
def game_new(id):
# if games does not exist, create id
if id not in games:
games[id] = Game(id)
if games[id].getPlayerCount() < 2:
player = Player(random_string(uid_length))
games[id].addPlayer(player)
return ({
"joined": True,
"id": games[id].id,
"player": player.id,
"started": games[id].started
}, 200)
else:
return ({
"joined": False,
"id": id
}, 403)
def game_stop(id):
if id in games:
del games[id]
return ("", 200)
else:
return ("", 404)
def game_state(id, player):
if id in games:
return (games[id].get_state(player), 200)
else:
return ("", 404)
def game_pass(id, user):
if id in games:
games[id].next()
return ("", 200)
else:
return ("", 404)
def game_play(id, user, card):
if id in games:
if games[id].play(user, card):
return ("", 200)
else:
return ("", 403)
return ("", 404)
def game_attack(id, user, card, target):
return ("", 501)
| mit | -8,935,933,518,150,531,000 | 18.921875 | 50 | 0.51451 | false |
hammerhorn/hammerhorn-jive | hr/hr.py | 1 | 2017 | #!/usr/bin/env python
"""
Produces horizontal lines for use in shell scripts.
usage: hr.py [-h] [-w WIDTH] [-p PATTERN] [-c]
optional arguments:
-h, --help show this help message and exit
-w WIDTH, --width WIDTH
-p PATTERN, --pattern PATTERN
-c, --center
* floats should give screenwidths, ints shoudl give charwidths
"""
__author__ = 'Chris Horn <[email protected]>'
__license__ = 'GPL'
import argparse
from cjh.cli import Cli
Cli()
#arg_dic = {}
def _parse_args():
"""
Parse all args and return 'args' namespace.
"""
parser = argparse.ArgumentParser(
description='Produces horizontal lines for use in shell scripts.')
parser.add_argument(
'-w', '--width', type=float, help='width in columns or width in ' +
'screenwidths')
parser.add_argument(
'-p', '--pattern', type=str, help='symbol or sequence of symbols')
parser.add_argument('-c', '--center', action='store_true', help='centered')
#parser.add_argument("-s", "--string", action='store_true')
return parser.parse_args()
if __name__ == '__main__':
ARGS = _parse_args()
else: ARGS = None
# If ARGS.width is an integer, convert it to be of type int.
# An int for this value means width in columns.
# A decimal < 1 means a percentage of the width of the terminal.
if ARGS is not None and ARGS.width is not None and\
(ARGS.width == int(ARGS.width)):
ARGS.width = int(ARGS.width)
# possible to this automatically?
def populate_args():
"""
Convert args namespace to a dictionary, for use in the Cli.hrule()
method.
"""
kw_dict = {}
if ARGS is not None:
if ARGS.width is not None:
kw_dict.update({'width': ARGS.width})
if ARGS.pattern is not None:
kw_dict.update({'symbols': ARGS.pattern})
if ARGS.center is True:
kw_dict.update({'centered': ARGS.center})
return kw_dict
# print arg_dic
ARG_DICT = populate_args()
if __name__ == '__main__':
Cli.hrule(**ARG_DICT)
| gpl-2.0 | 8,735,727,038,346,566,000 | 27.408451 | 79 | 0.625682 | false |
rwl/puddle | puddle/python_editor/python_editor_extension.py | 1 | 3510 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Python editor extensions """
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from os.path import dirname
from enthought.pyface.api import ImageResource
from puddle.resource.editor import Editor
#------------------------------------------------------------------------------
# "PythonEditorExtension" class:
#------------------------------------------------------------------------------
class PythonEditorExtension(Editor):
""" Associates a Python editor with *.py files.
"""
# The object contribution's globally unique identifier.
id = "puddle.python_editor"
# A name that will be used in the UI for this editor
name = "Python Editor"
# An icon that will be used for all resources that match the
# specified extensions
image = ImageResource("python")
# The contributed editor class
editor_class = "puddle.python_editor.python_workbench_editor:" \
"PythonWorkbenchEditor"
# The list of file types understood by the editor
extensions = [".py"]
# If true, this editor will be used as the default editor for the type
default = False
#------------------------------------------------------------------------------
# "TextEditorExtension" class:
#------------------------------------------------------------------------------
class TextEditorExtension(Editor):
""" Associates a text editor with *.py files.
"""
# The object contribution's globally unique identifier.
id = "puddle.python_editor.text_editor_extension"
# A name that will be used in the UI for this editor
name = "Text Editor"
# An icon that will be used for all resources that match the
# specified extensions
image = ImageResource("python")
# The contributed editor class
editor_class = "enthought.plugins.text_editor.editor.text_editor:" \
"TextEditor"
# The list of file types understood by the editor
extensions = [".py"]
# If true, this editor will be used as the default editor for the type
default = True
# EOF -------------------------------------------------------------------------
| mit | -6,106,553,099,906,862,000 | 38.438202 | 79 | 0.575783 | false |
marcoprado17/flask-bone | src/r.py | 1 | 4665 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# ======================================================================================================================
# The MIT License (MIT)
# ======================================================================================================================
# Copyright (c) 2016 [Marco Aurélio Prado - [email protected]]
# ======================================================================================================================
class Resources:
def __init__(self):
self.string = self.__Strings()
self.id = self.__Ids()
self.dimen = self.__Dimens()
class __Strings:
def __init__(self):
self.navbar = self.__Navbar()
self.register = self.__Register()
self.email = self.__Email()
self.micro_blog = "Micro Blog"
self.temp_error_html = "Ocorreu um erro inesperado em nossos servidores, nossa equipe técnica resolverá o problema assim que possível. Clique <a href=%(href)s>aqui</a> para voltar à pagina inicial."
self.db_access_error = "Ocorreu um erro ao acessar o banco de dados. Tente novamente."
self.send_email_error = "Ocorreu um erro ao enviar o email. Tente novamente."
self.static = "static"
self.toast = "toast"
self.success = "success"
self.info = "info"
self.warning = "warning"
self.error = "error"
self.category = "%(type)s-%(level)s"
class __Navbar:
def __init__(self):
self.home = "Home"
self.posts = "Posts"
self.add_post = "Adicionar Post"
self.view_posts = "Visualizar Posts"
self.categories = "Categorias"
self.add_category = "Adicionar Categoria"
self.subcategories = "Subcategorias"
self.add_subcategory = "Adicionar Subcategoria"
self.login = "Entrar"
self.register = "Cadastrar"
self.leave = "Sair"
class __Register:
def __init__(self):
self.email_already_registered = \
"Email já cadastrado. Para entrar com este email, clique <a href='%(href)s'>aqui</a>."
self.password_mismatch = "As senhas digitadas não são iguais."
self.password_length = "A senha deve possuir entre %(min_length)d e %(max_length)d caracteres."
self.email_label = "Email"
self.password_label = "Senha"
self.password_confirmation_label = "Confirmação da senha"
self.register = "Cadastrar"
self.already_has_account = "Já possui conta?"
self.title = "Cadastro"
class __Email:
def __init__(self):
self.register = self.__Register()
class __Register:
def __init__(self):
self.welcome = "Olá!"
self.confirm_email = "Deseja utilizar <u>%(email)s</u> como seu email principal na %(title)s?"
self.confirm_button_text = "Sim, desejo utilizar este email"
self.subject = "Confirme seu endereço de e-mail | %(title)s"
class __Ids:
def __init__(self):
self.navbar = self.__Navbar()
self.register = self.__Register()
class __Navbar:
def __init__(self):
self.home = "home"
self.posts = "posts"
self.categories = "categories"
self.subcategories = "subcategories"
self.register = "register"
self.login = "login"
class __Register:
def __init__(self):
self.example = "example"
class __Dimens:
def __init__(self):
self.navbar = self.__Navbar()
self.register = self.__Register()
self.models = self.__Models()
self.default_password_field_max_length = 256
self.default_string_field_max_length = 1024
class __Navbar:
def __init__(self):
self.example = 42
class __Register:
def __init__(self):
self.example = 42
class __Models:
def __init__(self):
self.user = self.__User()
class __User:
def __init__(self):
self.email_max_length = 256
self.password_min_length = 6
self.password_max_length = 32
R = Resources()
| mit | 4,281,181,409,038,982,000 | 38.760684 | 210 | 0.468186 | false |
EricSchles/veyepar | dj/scripts/post_yt.py | 1 | 13878 | #!/usr/bin/python
# posts to youtube
import youtube_v3_uploader
import archive_uploader
import rax_uploader
import os
import pprint
import pw
from process import process
from django.db import DatabaseError
from django.template.defaultfilters import slugify
from add_to_richard import get_video_id
from main.models import Show, Location, Episode, Raw_File, Cut_List
def save_me(ep):
# tring to fix the db timeout problem
try:
ep.save()
except DatabaseError, e:
from django.db import connection
connection.connection.close()
connection.connection = None
ep.save()
class FileNotFound(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return repr(self.value)
class post(process):
ready_state = 4
def construct_description(self, ep):
# collect strings from various sources
# build a wad of text to use as public facing description
show = ep.show
client = show.client
descriptions = [ep.authors,
ep.public_url, ep.conf_url,
ep.description,
]
# show.description, client.description]
# remove blanks
descriptions = [d for d in descriptions if d]
# combine wiht CRs between each item
description = "\n".join(descriptions)
# description = "<br/>\n".join(description.split('\n'))
return description
def get_tags(self,ep):
tags = [ ep.show.client.slug, ep.show.slug, ]
for more_tags in [ ep.show.client.tags, ep.tags, ep.authors ]:
if more_tags is not None:
tags += more_tags.split(',')
# remove spaces
tags = [ tag.replace(' ','') for tag in tags ]
# remove any empty tags
tags = filter(None, tags)
return tags
def get_files(self, ep):
# get a list of video files to upload
# blip and archive support multiple formats, youtube does not.
# youtube and such will only upload the first file.
files = []
for ext in self.options.upload_formats:
src_pathname = os.path.join( self.show_dir, ext, "%s.%s"%(ep.slug,ext))
if os.path.exists(src_pathname):
files.append({'ext':ext,'pathname':src_pathname})
else:
# crapy place to abort, but meh, works for now.
# maybe this is the place to use raise?
print "not found:", src_pathname
raise FileNotFound(src_pathname)
if self.options.debug_log:
# put the mlt and .sh stuff into the log
# blip and firefox want it to be xml, so jump though some hoops
log = "<log>\n"
mlt_pathname = os.path.join( self.show_dir, 'tmp', "%s.mlt"%(ep.slug,))
log += open(mlt_pathname).read()
sh_pathname = os.path.join( self.show_dir, 'tmp', "%s.sh"%(ep.slug,))
shs = open(sh_pathname).read().split('\n')
shs = [ "<line>\n%s\n</line>\n" % l for l in shs if l]
log += "<shell_script>\n%s</shell_script>\n" % ''.join(shs)
log += "</log>"
# blip says: try something like a tt or srt file
log_pathname = os.path.join( self.show_dir, 'tmp', "%s.tt"%(ep.slug,))
log_file=open(log_pathname,'w').write(log)
# add the log to the list of files to be posted
files.append({'ext':'tt', 'pathname':log_pathname})
return files
def collect_metadata(self, ep):
meta = {}
meta['title'] = ep.name
meta['description'] = self.construct_description(ep)
meta['tags'] = self.get_tags(ep)
# if ep.license:
# meta['license'] = str(ep.license)
# meta['rating'] = self.options.rating
# http://gdata.youtube.com/schemas/2007/categories.cat
meta['category'] = 22 # "Education"
if ep.location.lat and ep.location.lon:
meta['latlon'] = (ep.location.lat, ep.location.lon)
meta['privacyStatus'] = 'unlisted'
return meta
def mk_key(self, ep, f):
# make a key for rackspace cdn object key value store
# <category-slug>/<video-id>_<title-of-video>.mp4
# if we have that data handy.
# otherwise client/show/slug
key = ''
if ep.show.client.category_key:
# warning: this does not take into account pvo collisions
# https://github.com/willkg/richard/blob/master/richard/videos/utils.py#L20 def generate_unique_slug(obj, slug_from, slug_field='slug'):
key += slugify( ep.show.client.category_key ) + '/'
else:
key += ep.show.client.slug + '/'+ ep.show.client.slug + '/'
if ep.public_url:
key += get_video_id( ep.public_url) + "_"
key += ep.slug[:50] + "." + f['ext']
return key
def do_yt(self,ep,files,private,meta):
youtube_success = False
uploader = youtube_v3_uploader.Uploader()
uploader.user = ep.show.client.youtube_id
uploader.pathname = files[0]['pathname']
uploader.meta = meta
uploader.private = private
# for replacing.
# (currently not implemented in youtube_v3_uploader
uploader.old_url = ep.host_url
if self.options.test:
print 'test mode:'
print "user key:", uploader.user
print 'files = %s' % files
print 'meta = %s' % pprint.pformat(meta)
print 'skipping youtube_upoad.py uploader.upload()'
print len(meta['description'])
elif ep.host_url and not self.options.replace:
print "skipping youtube, already there."
youtube_success = True
else:
# down to next layer of code that will do the uploading
# uploader.debug_mode=True
youtube_success = uploader.upload()
if youtube_success:
# if self.options.verbose: print uploader.new_url
print(uploader.new_url)
# save new youtube url
ep.host_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print "youtube error! zomg"
ep.comment += "\n%s\n" % (uploader.ret_text.decode('utf-8').encode('ascii', 'xmlcharrefreplace'))
save_me(ep)
return youtube_success
def do_arc(self, ep, files, meta):
# upload to archive.org too.. yuck.
# this should be in post_arc.py, but
# but I don't want 2 processes uploading at the same time.
# bcause bandwidth?
uploader = archive_uploader.Uploader()
uploader.user = ep.show.client.archive_id
uploader.bucket_id = ep.show.client.bucket_id
for f in files:
uploader.pathname = f['pathname']
uploader.key_id = "%s.%s" % ( ep.slug[:30], f['ext'] )
if self.options.test:
print 'test mode...'
print 'skipping archive_uploader .upload()'
elif ep.archive_ogv_url and not self.options.replace:
# um.. what about other formats?
# kinda buggy here.
# but only relevant when things are messed up
# and looking for problemss.
print "skipping archive, ogv already there."
archive_success = True
else:
# actually upload
# uploader.debug_mode=True
archive_success = uploader.upload()
if archive_success:
if self.options.verbose: print uploader.new_url
# this is pretty gross.
# store the archive url
if f['ext'] == "mp4":
ep.archive_mp4_url = uploader.new_url
elif f['ext'] == "ogv":
ep.archive_ogv_url = uploader.new_url
elif f['ext'] == "webm": # omg super gross.
ep.archive_ogv_url = uploader.new_url
# hook for tests so that it can be browsed
self.archive_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print "internet archive error!"
save_me(ep)
return archive_success
def do_rax(self, ep, files, meta):
# upload to rackspace cdn too.. yuck.
# this should be in post_rax.py, but
# but I don't want 2 processes uploading at the same time.
# bcause bandwidth? or something.
# Not sure what the problem is really.
if self.options.verbose: print "do_rax..."
success = False
uploader = rax_uploader.Uploader()
uploader.user = ep.show.client.rax_id
uploader.bucket_id = ep.show.client.bucket_id
for f in files:
uploader.pathname = f['pathname']
uploader.key_id = self.mk_key(ep, f)
if self.options.test:
print 'test mode...'
print 'skipping rax_uploader .upload()'
print 'key_id:', uploader.key_id
elif ep.rax_mp4_url and not self.options.replace:
# above assumes rax_mp4_url is what gets filled in below
# this is so gross.
print "skipping rax, already there."
success = True
else:
# actually upload
# uploader.debug_mode=True
success = uploader.upload()
# possible errors:
# invalid container - halt, it will likely be invalid for all
# transmission - retry
# bad name, mark as error and continue to next
if success:
if self.options.verbose: print uploader.new_url
# this is pretty gross.
# store the url
if f['ext'] == "mp4":
ep.rax_mp4_url = uploader.new_url
elif f['ext'] == "webm":
ep.rax_mp4_url = uploader.new_url
elif f['ext'] == "ogv":
# there is no ep.rax_ogv_url
ep.rax_ogv_url = uploader.new_url
# hook for tests so that it can be browsed
# self.rax_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print "rax error!"
save_me(ep)
return success
def do_vimeo(self,ep,files,private,meta):
vimeo_success = False
uploader = vimeo_uploader.Uploader()
uploader.user = ep.show.client.vimeo_id
uploader.pathname = files[0]['pathname']
uploader.meta = meta
if self.options.test:
print 'test mode:'
print "user key:", uploader.user
print 'files = %s' % files
print 'meta = %s' % pprint.pformat(meta)
print 'skipping vimeo_upoad.py uploader.upload()'
print len(meta['description'])
elif ep.host_url and not self.options.replace:
print "skipping vimeo, already there."
youtube_success = True
else:
# down to next layer of code that will do the uploading
# uploader.debug_mode=True
youtube_success = uploader.upload()
if youtube_success:
if self.options.verbose: print uploader.new_url
# save new youtube url
ep.host_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print "youtube error! zomg"
ep.comment += "\n%s\n" % (uploader.ret_text.decode('utf-8').encode('ascii', 'xmlcharrefreplace'))
save_me(ep)
return youtube_success
def process_ep(self, ep):
if not ep.released: # and not self.options.release_all:
# --release will force the upload, overrides ep.released
if self.options.verbose: print "not released:", ep.released
return False
# collect data needed for uploading
files = self.get_files(ep)
if self.options.verbose:
print "[files]:",
pprint.pprint(files)
meta = self.collect_metadata(ep)
if self.options.verbose: pprint.pprint(meta)
# upload youtube
if not ep.show.client.youtube_id: youtube_success = True
else: youtube_success = self.do_yt(ep,files,True,meta)
# upload archive.org
if not ep.show.client.archive_id: archive_success = True
else: archive_success = self.do_arc(ep,files,meta)
# upload rackspace cdn
if not ep.show.client.rax_id: rax_success = True
else: rax_success = self.do_rax(ep,files,meta)
# upload vimeo (needs upgrading to new api)
# if not ep.show.client.vimeo_id: vimeo_success = True
# else: vimeo_success = self.do_vimeo(ep,files,meta)
return True
# youtube_success
# and archive_success \
# and rax_success
def add_more_options(self, parser):
parser.add_option('--replace', action="store_true",
help="Upload again, step on existing URL.")
parser.add_option('--release-all', action="store_true",
help="ignore the released setting.")
if __name__ == '__main__':
p=post()
p.main()
| mit | -6,825,925,396,388,716,000 | 31.425234 | 149 | 0.540496 | false |
amdouglas/OpenPNM | test/unit/Utilities/VertexOpsTest.py | 1 | 2051 | import OpenPNM
import scipy as sp
import matplotlib.pyplot as plt
import OpenPNM.Utilities.vertexops as vo
class VertexOpsTest:
def setup_class(self):
bp = sp.array([[0.2, 0.2, 0.2], [0.2, 0.8, 0.2], [0.8, 0.2, 0.2],
[0.8, 0.8, 0.2], [0.2, 0.2, 0.8], [0.2, 0.8, 0.8],
[0.8, 0.2, 0.8], [0.8, 0.8, 0.8]])
scale = 1e-4
p = (sp.random.random([len(bp), 3])-0.5)/10000
bp += p
self.ctrl = OpenPNM.Base.Controller()
self.net = OpenPNM.Network.Delaunay(domain_size=[scale, scale, scale],
base_points=bp*scale)
self.net.add_boundaries()
Ps = self.net.pores()
Ts = self.net.throats()
self.fibre_rad = 5e-6
self.geo = OpenPNM.Geometry.Voronoi(network=self.net,
pores=Ps,
throats=Ts,
fibre_rad=self.fibre_rad,
voxel_vol=False,
name='vor')
def test_scale(self):
factor = [1, 1, 0.5]
vo.scale(network=self.net,
scale_factor=factor,
linear_scaling=[True, False, False],
preserve_vol=False)
def test_porosity(self):
por = vo.porosity(self.net)
assert por < 1.0
def test_tortuosity(self):
tor = vo.tortuosity(self.net)
assert sp.all(tor > 1.0)
def test_pore2centroid(self):
temp_coords = self.net['pore.coords']
self.geo['pore.centroid'] = sp.ones([self.geo.num_pores(), 3])
vo.pore2centroid(self.net)
assert sp.sum(self.net['pore.coords'] -
sp.ones([self.geo.num_pores(), 3])) == 0.0
self.net['pore.coords'] = temp_coords
def test_plot_pore(self):
vo.plot_pore(self.geo, self.geo.pores())
def test_plot_throat(self):
vo.plot_throat(self.geo, [1, 2, 3, 4])
| mit | -870,963,963,620,059,500 | 35.625 | 78 | 0.480741 | false |
coala-analyzer/coala-gui | source/workspace/WorkspaceWindow.py | 1 | 5608 | from gi.repository import Gtk
import os
from collections import OrderedDict
from coalib.settings.ConfigurationGathering import load_configuration
from coalib.output.ConfWriter import ConfWriter
from coalib.output.printers.LogPrinter import LogPrinter
from pyprint.NullPrinter import NullPrinter
from coalib.settings.Section import Section
from coalib.misc.DictUtilities import update_ordered_dict_key
from source.workspace.SectionView import SectionView
class WorkspaceWindow(Gtk.ApplicationWindow):
def __init__(self, application, src):
Gtk.ApplicationWindow.__init__(self, application=application)
self.connect("delete_event", self.on_close)
self._ui = Gtk.Builder()
self._ui.add_from_resource("/org/coala/WorkspaceWindow.ui")
self.section_stack_map = {}
self.sections_dict = OrderedDict()
self.src = src
self._setup_view()
self.read_coafile()
def _setup_view(self):
self.headerbar = self._ui.get_object("headerbar")
self.set_titlebar(self.headerbar)
self.add(self._ui.get_object("container"))
self.stack = self._ui.get_object("main_stack")
self.sections = self._ui.get_object("sections")
self.section_switcher = self._ui.get_object("section_switcher")
self.section_switcher.connect("row-selected",
self.on_row_selection_changed)
self.add_section_button = self._ui.get_object("add_section_button")
self.add_section_button.connect("clicked", self.add_section)
def add_section(self, button=None, name=None):
section_row_template = Gtk.Builder()
section_row_template.add_from_resource('/org/coala/SectionRow.ui')
section_row = Gtk.ListBoxRow()
box = section_row_template.get_object("section_row")
editable_label = section_row_template.get_object("name-edit")
delete_button = section_row_template.get_object("delete_button")
if name is not None:
editable_label.entry.set_text(name)
self.create_section_view(widget=editable_label,
row_obejct=section_row)
editable_label.connect("edited",
self.update_section_name,
name,
self.section_stack_map[section_row])
else:
editable_label.connect("edited",
self.create_section_view,
section_row)
section_row.add(box)
section_row.set_visible(True)
delete_button.connect("clicked", self.delete_row, section_row)
self.section_switcher.add(section_row)
self.section_switcher.queue_draw()
return section_row
def delete_row(self, button, listboxrow):
del self.sections_dict[self.section_stack_map[listboxrow].get_name()]
self.section_stack_map[listboxrow].destroy()
del self.section_stack_map[listboxrow]
listboxrow.destroy()
conf_writer = ConfWriter(self.src+'/.coafile')
conf_writer.write_sections(self.sections_dict)
conf_writer.close()
def on_close(self, event, widget):
self.get_application().greeter.show()
self.destroy()
def create_section_view(self, widget=None, arg=None, row_obejct=None):
section_view = SectionView(self.sections_dict, self.src)
section_view.set_visible(True)
section_view.set_name(widget.get_name())
self.sections.add_named(section_view, widget.get_name())
self.sections.set_visible_child_name(widget.get_name())
if arg is not None:
widget.connect("edited",
self.update_section_name,
widget.get_name(),
section_view)
self.sections_dict[widget.get_name()] = Section(widget.get_name())
section_view.add_setting()
conf_writer = ConfWriter(self.src+'/.coafile')
conf_writer.write_sections(self.sections_dict)
conf_writer.close()
self.section_stack_map[row_obejct] = section_view
def on_row_selection_changed(self, listbox, row):
self.sections.set_visible_child(self.section_stack_map[row])
def read_coafile(self):
if os.path.isfile(self.src+'/.coafile'):
self.sections_dict = load_configuration(
["-c", self.src+'/.coafile'], LogPrinter(NullPrinter()))[0]
for section in self.sections_dict:
section_row = self.add_section(name=section)
for setting in self.sections_dict[section].contents:
if "comment" in setting:
continue
self.section_stack_map[section_row].add_setting(
self.sections_dict[section].contents[setting])
self.section_stack_map[section_row].add_setting()
def update_section_name(self, widget, arg, old_name, section_view):
section_view.set_name(widget.get_name())
self.sections_dict[old_name].name = widget.get_name()
self.sections_dict = update_ordered_dict_key(self.sections_dict,
old_name,
widget.get_name())
widget.connect("edited", self.update_section_name, widget.get_name())
conf_writer = ConfWriter(self.src+'/.coafile')
conf_writer.write_sections(self.sections_dict)
conf_writer.close()
| agpl-3.0 | -2,998,170,187,900,760,000 | 42.8125 | 78 | 0.605742 | false |
jamesaud/se1-group4 | jmatcher/posts/migrations/0001_initial.py | 1 | 1897 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 16:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=1000)),
('image', models.ImageField(null=True, upload_to='')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('likes', models.ManyToManyField(related_name='likes', to=settings.AUTH_USER_MODEL)),
('shares', models.ManyToManyField(related_name='shares', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PostComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commenting_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
],
),
]
| mit | -4,786,760,844,580,201,000 | 43.116279 | 129 | 0.605166 | false |
hyunjunbrucelee/2017sejongAI | hash/test.py | 1 | 1327 | from PIL import Image
import numpy as np
# 이미지 데이터를 Averages Hash로 변환 함수 선언
def average_hash(fname, size = 16): #average_hash(파일이름, 사이즈)
img = Image.open(fname) # 이미지 데이터 열기
img = img.convert('L') # 그레이스케일로 변환
#'1'지정하게 되면 이진화 그밖에 "RGB", "RGBA", "CMYAK" 모드 지정가능
img = img.resize((size, size), Image.ANTIALIAS) # 리사이즈
pixel_data = img.getdata() # 픽셀 데이테 가져오기
pixels = np.array(pixel_data) # Numpy 배열로 변환하기
pixels = pixels.reshape((size, size)) # 2차원 배열로 변환
avg = pixels.mean() # 평균 구하기
diff = 1*(pixels>avg) #평균보다 크면 1, 작으면 0으로 변환하기
return diff
# 이전 해시로 변환하는 함수 선언
def np2hash(n):
bhash = []
for n1 in ahash.tolist():
s1 = [str(i) for i in n1]
s2 = "".join(s1)
i = int(s2,2) #이진수를 정수로 변환하기
bhash.append("%04x"%i)
return "".join(bhash)
# Average Hash 출력하기
ahash = average_hash('dog.jpg')
print(ahash)
print(np2hash(ahash))
#학습을 시킬 준비가 됨
| gpl-3.0 | -888,860,325,065,370,600 | 27.083333 | 80 | 0.539637 | false |
stellaf/odoofleet | fleet_task_issue/fleet_task_issue.py | 1 | 7626 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addon by CLEARCORP S.A. <http://clearcorp.co.cr> and AURIUM TECHNOLOGIES <http://auriumtechnologies.com>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class FleetVehicle(models.Model):
_inherit = 'fleet.vehicle'
@api.model
def create(self, vals):
acount_obj=self.env['account.analytic.account']
fleet_id = super(FleetVehicle, self).create(vals)
account_id=acount_obj.create({'name':self._vehicle_name_get(fleet_id)})
fleet_id.write({'analytic_account_id':account_id.id,'use_tasks':True,'use_issues':True})
return fleet_id
@api.multi
def _count_vehicle_task(self):
project_obj = self.env['project.project']
self.task_count=len(project_obj.search([('analytic_account_id', '=', self.analytic_account_id.id)]).task_ids)
@api.multi
def _count_vehicle_issue(self):
issue_obj = self.env['project.project']
self.issue_count=len(issue_obj.search([('analytic_account_id', '=', self.analytic_account_id.id)]).issue_ids)
@api.multi
def _count_swhwlogbook_task(self):
domain=[('project_id.analytic_account_id', '=', self.analytic_account_id.id), ('task_type_id.name','ilike','SWHW')]
self.swhw_task_count=self.env['project.task'].search_count(domain)
@api.multi
def _count_wkshop_task(self):
domain=[('project_id.analytic_account_id', '=', self.analytic_account_id.id), ('task_type_id.name','ilike','Workshop')]
self.wkshop_task_count=self.env['project.task'].search_count(domain)
@api.multi
def write(self, vals):
acount_obj=self.env['account.analytic.account']
res = super(FleetVehicle, self).write(vals)
if not self.analytic_account_id:
account_id=acount_obj.create({'name':self._vehicle_name_get(self)})
self.write({'analytic_account_id':account_id.id,'use_tasks':True,'use_issues':True})
self.analytic_account_id.write({'name':self.name,'use_tasks':True,'use_issues':True})
return res
@api.multi
def _vehicle_name_get(self,record):
res = record.model_id.brand_id.name + '/' + record.model_id.name + '/' + record.license_plate
return res
@api.multi
def action_view_alltasks(self):
action = self.env.ref('project.act_project_project_2_project_task_all')
active_id = self.env['project.project'].search([('analytic_account_id', '=', self.analytic_account_id.id)]).id
context = {'group_by': 'stage_id', 'search_default_project_id': [active_id], 'default_project_id': active_id, }
return {
'key2':'tree_but_open',
'name': action.name,
'res_model': 'project.task',
'help': action.help,
'type': action.type,
'view_type': action.view_type,
'view_mode': action.view_mode,
'res_id': active_id,
'views': action.views,
'target': action.target,
'context':context,
'nodestroy': True,
'flags': {'form': {'action_buttons': True}}
}
@api.multi
def action_view_allissues(self):
action = self.env.ref('project_issue.act_project_project_2_project_issue_all')
active_id = self.env['project.project'].search([('analytic_account_id', '=', self.analytic_account_id.id)]).id
context = {'group_by': 'stage_id', 'search_default_project_id': [active_id], 'default_project_id': active_id,}
return {
'name': action.name,
'res_model': 'project.issue',
'help': action.help,
'type': action.type,
'view_type': action.view_type,
'view_mode': action.view_mode,
'views': action.views,
'target': action.target,
'res_id': active_id,
'context':context,
'nodestroy': True,
'flags': {'form': {'action_buttons': True}}
}
# this part of code, you shall define the project task type to "SWHW" and "Workshop", using the apps in the odoo store, named "task type color"
# @api.multi
# def action_view_SWHWlogbooktasks(self):
# self.ensure_one()
# action = self.env.ref('project.act_project_project_2_project_task_all')
# active_id = self.env['project.project'].search([('analytic_account_id', '=', self.analytic_account_id.id)]).id
# context = {'group_by': 'stage_id', 'search_default_project_id': [active_id], 'default_project_id': active_id, 'task_type_id.name':'SWHW',}
# return {
# 'key2':'tree_but_open',
# 'name': action.name,
# 'res_model': 'project.task',
# 'help': action.help,
# 'type': action.type,
# 'view_type': action.view_type,
# 'view_mode': action.view_mode,
# 'res_id': active_id,
# 'views': action.views,
# 'target': action.target,
# 'context':context,
# 'nodestroy': True,
# 'flags': {'form': {'action_buttons': True}}
# }
#
# @api.multi
# def action_view_Workshoptasks(self):
# self.ensure_one()
# action = self.env.ref('project.act_project_project_2_project_task_all')
# active_id = self.env['project.project'].search([('analytic_account_id', '=', self.analytic_account_id.id)]).id
# context = {'group_by': 'stage_id', 'search_default_project_id': [active_id], 'default_project_id': active_id, 'task_type_id.name':'Workshop',}
# return {
# 'key2':'tree_but_open',
# 'name': action.name,
# 'res_model': 'project.task',
# 'help': action.help,
# 'type': action.type,
# 'view_type': action.view_type,
# 'view_mode': action.view_mode,
# 'res_id': active_id,
# 'views': action.views,
# 'target': action.target,
# 'context':context,
# 'nodestroy': True,
# 'flags': {'form': {'action_buttons': True}}
# }
analytic_account_id = fields.Many2one('account.analytic.account',string='Analytic Account')
task_count = fields.Integer(compute=_count_vehicle_task, string="Vehicle Tasks" , multi=True)
issue_count = fields.Integer(compute=_count_vehicle_issue, string="Vehicle Issues" , multi=True)
# swhw_task_count = fields.Integer(compute=_count_swhwlogbook_task, string="SWHWlogbook Tasks" , multi=True)
# wkshop_task_count = fields.Integer(compute=_count_wkshop_task, string="workshop Tasks" , multi=True)
class fleet_vehicle_log_services(models.Model):
_inherit = 'fleet.vehicle.log.services'
invoice_id = fields.Many2one('account.invoice',string='Facture')
| gpl-3.0 | -4,983,628,240,611,340,000 | 44.664671 | 152 | 0.590873 | false |
ByrdOfAFeather/AlphaTrion | Community/migrations/0034_auto_20171121_1316.py | 1 | 1619 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-21 18:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Community', '0033_auto_20171112_1742'),
]
operations = [
migrations.CreateModel(
name='SongSuggestions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('suggestions', models.TextField(help_text="Please list links to songs, we can't play it with just a name")),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Community.CommunityInst')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='communityextraratings',
name='overall_rating',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, 'e'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=5),
),
migrations.AlterField(
model_name='communitygameratings',
name='game_rating',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, 'e'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=5),
),
]
| mit | 4,240,877,986,806,022,000 | 42.756757 | 169 | 0.575664 | false |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert_gui/tools/load_results/load_results_tool.py | 1 | 1806 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'load_results_tool.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert_gui.models.connectors.load_results import LoadResultsModel
from ert_gui.tools import Tool
from ert_gui.tools.load_results import LoadResultsPanel
from ert_gui.widgets import util
from ert_gui.widgets.closable_dialog import ClosableDialog
class LoadResultsTool(Tool):
def __init__(self):
super(LoadResultsTool, self).__init__("Load results manually", "tools/load_manually", util.resourceIcon("ide/table_import"))
self.__import_widget = None
self.__dialog = None
self.setVisible(False)
def trigger(self):
if self.__import_widget is None:
self.__import_widget = LoadResultsPanel()
self.__dialog = ClosableDialog("Load results manually", self.__import_widget, self.parent())
self.__import_widget.setCurrectCase()
self.__dialog.addButton("Load", self.load)
self.__dialog.exec_()
def load(self):
self.__import_widget.load()
self.__dialog.accept()
def toggleAdvancedMode(self, advanced_mode):
self.setVisible(advanced_mode)
if not LoadResultsModel().isValidRunPath():
self.setEnabled(False)
| gpl-3.0 | 7,848,559,897,122,455,000 | 35.12 | 132 | 0.696567 | false |
sergpolly/FluUtils | FluDB_coding_aln/subsample_msa_random.py | 1 | 2581 | import re
import os
import sys
from Bio import Seq
from Bio import SeqIO
import pandas as pd
import itertools
import numpy as np
import random
import subprocess as sub
def get_least_gaps_seq(seq_dict,length,side='left'):
middle = length/2
min_gaps = length
min_key = ''
# for all sequences check number of gaps in either half and return its id ...
for seq_id in seq_dict:
seq_half = seq_dict[seq_id].seq[:middle] if side=='left' else seq_dict[seq_id].seq[middle:]
num_gaps = seq_half.count('-')
# reassign the min gaps counts and id in a procedural fashion ...
if num_gaps < min_gaps:
min_gaps = num_gaps
min_key = seq_id
# return ...
return (min_key, min_gaps)
# command to clust sequences and get a draft alignment ...
# usearch -cluster_fast seg1.fasta -id 0.993 -centroids nr.fasta -uc clust.uc
path = "/home/venevs/fludb_pH1N1"
if len(sys.argv) < 3:
print "Call signature is: \"%s msa_fname subs_size\""
msa_fname = sys.argv[1]
subs_size = int(sys.argv[2])
msa = SeqIO.parse(msa_fname,"fasta")
msa = SeqIO.to_dict(msa)
# chosen msa keys ...
chosen_keys = random.sample(msa,subs_size)
# add sequences with the longest UTRs as well ...
alignment_len = len(msa[chosen_keys[0]].seq)
# find sequence with the least gaps in the left half of the sequence ...
# supposedly - longest left-UTR
left_utr_key,_ = get_least_gaps_seq(msa,alignment_len,side='left')
# find sequence with the least gaps in the right half of the sequence ...
# supposedly - longest right-UTR
right_utr_key,_ = get_least_gaps_seq(msa,alignment_len,side='right')
# include those 2 if they are yet in the subsampled alignement ..
if left_utr_key not in chosen_keys:
chosen_keys += [left_utr_key, ]
if right_utr_key not in chosen_keys:
chosen_keys += [right_utr_key, ]
# now extract aligned sequences ...
alignment_out = [msa[sid] for sid in chosen_keys]
# output the alignment now ...
tmp_afa_fname = "tmp.afa"
SeqIO.write(alignment_out,tmp_afa_fname,"fasta")
# htm out fname :
out_htm = os.path.basename(msa_fname)+'.htm'
cmd = "mview -in fasta -ruler on -moltype dna -coloring consensus -threshold 60 -consensus on -con_threshold 60 -html head %s > %s"%(tmp_afa_fname,out_htm)
print
print cmd
print
retcode = sub.call(cmd,shell=True)
if retcode == 0:
print "Complete ..."
else:
print "mview retcode was %s"%str(retcode)
# #
# # remove temporary file here ...
# os.remove(tmp_afa_fname)
# print "tmp file removed ..."
# # now make an html alignment using mview ...
| mit | -1,590,030,344,426,314,000 | 12.728723 | 155 | 0.673382 | false |
leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py | 2 | 39416 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for interacting with Bugzilla
import mimetypes
import re
import StringIO
import socket
import urllib
from datetime import datetime # used in timestamp()
from .attachment import Attachment
from .bug import Bug
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.config import committers
import webkitpy.common.config.urls as config_urls
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.user import User
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
class EditUsersParser(object):
def __init__(self):
self._group_name_to_group_string_cache = {}
def _login_and_uid_from_row(self, row):
first_cell = row.find("td")
# The first row is just headers, we skip it.
if not first_cell:
return None
# When there were no results, we have a fake "<none>" entry in the table.
if first_cell.find(text="<none>"):
return None
# Otherwise the <td> contains a single <a> which contains the login name or a single <i> with the string "<none>".
anchor_tag = first_cell.find("a")
login = unicode(anchor_tag.string).strip()
user_id = int(re.search(r"userid=(\d+)", str(anchor_tag['href'])).group(1))
return (login, user_id)
def login_userid_pairs_from_edit_user_results(self, results_page):
soup = BeautifulSoup(results_page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
results_table = soup.find(id="admin_table")
login_userid_pairs = [self._login_and_uid_from_row(row) for row in results_table('tr')]
# Filter out None from the logins.
return filter(lambda pair: bool(pair), login_userid_pairs)
def _group_name_and_string_from_row(self, row):
label_element = row.find('label')
group_string = unicode(label_element['for'])
group_name = unicode(label_element.find('strong').string).rstrip(':')
return (group_name, group_string)
def user_dict_from_edit_user_page(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
user_table = soup.find("table", {'class': 'main'})
user_dict = {}
for row in user_table('tr'):
label_element = row.find('label')
if not label_element:
continue # This must not be a row we know how to parse.
if row.find('table'):
continue # Skip the <tr> holding the groups table.
key = label_element['for']
if "group" in key:
key = "groups"
value = user_dict.get('groups', set())
# We must be parsing a "tr" inside the inner group table.
(group_name, _) = self._group_name_and_string_from_row(row)
if row.find('input', {'type': 'checkbox', 'checked': 'checked'}):
value.add(group_name)
else:
value = unicode(row.find('td').string).strip()
user_dict[key] = value
return user_dict
def _group_rows_from_edit_user_page(self, edit_user_page):
soup = BeautifulSoup(edit_user_page, convertEntities=BeautifulSoup.HTML_ENTITIES)
return soup('td', {'class': 'groupname'})
def group_string_from_name(self, edit_user_page, group_name):
# Bugzilla uses "group_NUMBER" strings, which may be different per install
# so we just look them up once and cache them.
if not self._group_name_to_group_string_cache:
rows = self._group_rows_from_edit_user_page(edit_user_page)
name_string_pairs = map(self._group_name_and_string_from_row, rows)
self._group_name_to_group_string_cache = dict(name_string_pairs)
return self._group_name_to_group_string_cache[group_name]
def timestamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
# A container for all of the logic for making and parsing bugzilla queries.
class BugzillaQueries(object):
def __init__(self, bugzilla):
self._bugzilla = bugzilla
def _is_xml_bugs_form(self, form):
# ClientForm.HTMLForm.find_control throws if the control is not found,
# so we do a manual search instead:
return "xml" in [control.id for control in form.controls]
# This is kinda a hack. There is probably a better way to get this information from bugzilla.
def _parse_result_count(self, results_page):
result_count_text = BeautifulSoup(results_page).find(attrs={'class': 'bz_result_count'}).string
result_count_parts = result_count_text.strip().split(" ")
if result_count_parts[0] == "Zarro":
return 0
if result_count_parts[0] == "One":
return 1
return int(result_count_parts[0])
# Note: _load_query, _fetch_bug and _fetch_bugs_from_advanced_query
# are the only methods which access self._bugzilla.
def _load_query(self, query):
self._bugzilla.authenticate()
full_url = "%s%s" % (config_urls.bug_server_url, query)
return self._bugzilla.browser.open(full_url)
def _fetch_bugs_from_advanced_query(self, query):
results_page = self._load_query(query)
# Some simple searches can return a single result.
results_url = results_page.geturl()
if results_url.find("/show_bug.cgi?id=") != -1:
bug_id = int(results_url.split("=")[-1])
return [self._fetch_bug(bug_id)]
if not self._parse_result_count(results_page):
return []
# Bugzilla results pages have an "XML" submit button at the bottom
# which can be used to get an XML page containing all of the <bug> elements.
# This is slighty lame that this assumes that _load_query used
# self._bugzilla.browser and that it's in an acceptable state.
self._bugzilla.browser.select_form(predicate=self._is_xml_bugs_form)
bugs_xml = self._bugzilla.browser.submit()
return self._bugzilla._parse_bugs_from_xml(bugs_xml)
def _fetch_bug(self, bug_id):
return self._bugzilla.fetch_bug(bug_id)
def _fetch_bug_ids_advanced_query(self, query):
soup = BeautifulSoup(self._load_query(query))
# The contents of the <a> inside the cells in the first column happen
# to be the bug id.
return [int(bug_link_cell.find("a").string)
for bug_link_cell in soup('td', "first-child")]
def _parse_attachment_ids_request_query(self, page):
digits = re.compile("\d+")
attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
attachment_links = SoupStrainer("a", href=attachment_href)
return [int(digits.search(tag["href"]).group(0))
for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
def _fetch_attachment_ids_request_query(self, query):
return self._parse_attachment_ids_request_query(self._load_query(query))
def _parse_quips(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulSoup.HTML_ENTITIES)
quips = soup.find(text=re.compile(r"Existing quips:")).findNext("ul").findAll("li")
return [unicode(quip_entry.string) for quip_entry in quips]
def fetch_quips(self):
return self._parse_quips(self._load_query("/quips.cgi?action=show"))
# List of all r+'d bugs.
def fetch_bug_ids_from_pending_commit_list(self):
needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
def fetch_bugs_matching_quicksearch(self, search_string):
# We may want to use a more explicit query than "quicksearch".
# If quicksearch changes we should probably change to use
# a normal buglist.cgi?query_format=advanced query.
quicksearch_url = "buglist.cgi?quicksearch=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(quicksearch_url)
# Currently this returns all bugs across all components.
# In the future we may wish to extend this API to construct more restricted searches.
def fetch_bugs_matching_search(self, search_string):
query = "buglist.cgi?query_format=advanced"
if search_string:
query += "&short_desc_type=allwordssubstr&short_desc=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(query)
def fetch_patches_from_pending_commit_list(self):
return sum([self._fetch_bug(bug_id).reviewed_patches()
for bug_id in self.fetch_bug_ids_from_pending_commit_list()], [])
def fetch_bugs_from_review_queue(self, cc_email=None):
query = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
if cc_email:
query += "&emailcc1=1&emailtype1=substring&email1=%s" % urllib.quote(cc_email)
return self._fetch_bugs_from_advanced_query(query)
def fetch_bug_ids_from_commit_queue(self):
commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed"
return self._fetch_bug_ids_advanced_query(commit_queue_url)
def fetch_patches_from_commit_queue(self):
# This function will only return patches which have valid committers
# set. It won't reject patches with invalid committers/reviewers.
return sum([self._fetch_bug(bug_id).commit_queued_patches()
for bug_id in self.fetch_bug_ids_from_commit_queue()], [])
def fetch_bug_ids_from_review_queue(self):
review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
return self._fetch_bug_ids_advanced_query(review_queue_url)
# This method will make several requests to bugzilla.
def fetch_patches_from_review_queue(self, limit=None):
# [:None] returns the whole array.
return sum([self._fetch_bug(bug_id).unreviewed_patches()
for bug_id in self.fetch_bug_ids_from_review_queue()[:limit]], [])
# NOTE: This is the only client of _fetch_attachment_ids_request_query
# This method only makes one request to bugzilla.
def fetch_attachment_ids_from_review_queue(self):
review_queue_url = "request.cgi?action=queue&type=review&group=type"
return self._fetch_attachment_ids_request_query(review_queue_url)
# This only works if your account has edituser privileges.
# We could easily parse https://bugs.webkit.org/userprefs.cgi?tab=permissions to
# check permissions, but bugzilla will just return an error if we don't have them.
def fetch_login_userid_pairs_matching_substring(self, search_string):
review_queue_url = "editusers.cgi?action=list&matchvalue=login_name&matchstr=%s&matchtype=substr" % urllib.quote(search_string)
results_page = self._load_query(review_queue_url)
# We could pull the EditUsersParser off Bugzilla if needed.
return EditUsersParser().login_userid_pairs_from_edit_user_results(results_page)
# FIXME: We should consider adding a BugzillaUser class.
def fetch_logins_matching_substring(self, search_string):
pairs = self.fetch_login_userid_pairs_matching_substring(search_string)
return map(lambda pair: pair[0], pairs)
class Bugzilla(object):
def __init__(self, committers=committers.CommitterList()):
self.authenticated = False
self.queries = BugzillaQueries(self)
self.committers = committers
self.cached_quips = []
self.edit_user_parser = EditUsersParser()
self._browser = None
def _get_browser(self):
if not self._browser:
self.setdefaulttimeout(600)
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
# Ignore bugs.webkit.org/robots.txt until we fix it to allow this script.
self._browser.set_handle_robots(False)
return self._browser
def _set_browser(self, value):
self._browser = value
browser = property(_get_browser, _set_browser)
def setdefaulttimeout(self, value):
socket.setdefaulttimeout(value)
def fetch_user(self, user_id):
self.authenticate()
edit_user_page = self.browser.open(self.edit_user_url_for_id(user_id))
return self.edit_user_parser.user_dict_from_edit_user_page(edit_user_page)
def add_user_to_groups(self, user_id, group_names):
self.authenticate()
user_edit_page = self.browser.open(self.edit_user_url_for_id(user_id))
self.browser.select_form(nr=1)
for group_name in group_names:
group_string = self.edit_user_parser.group_string_from_name(user_edit_page, group_name)
self.browser.find_control(group_string).items[0].selected = True
self.browser.submit()
def quips(self):
# We only fetch and parse the list of quips once per instantiation
# so that we do not burden bugs.webkit.org.
if not self.cached_quips:
self.cached_quips = self.queries.fetch_quips()
return self.cached_quips
def bug_url_for_bug_id(self, bug_id, xml=False):
if not bug_id:
return None
content_type = "&ctype=xml&excludefield=attachmentdata" if xml else ""
return "%sshow_bug.cgi?id=%s%s" % (config_urls.bug_server_url, bug_id, content_type)
def short_bug_url_for_bug_id(self, bug_id):
if not bug_id:
return None
return "http://webkit.org/b/%s" % bug_id
def add_attachment_url(self, bug_id):
return "%sattachment.cgi?action=enter&bugid=%s" % (config_urls.bug_server_url, bug_id)
def attachment_url_for_id(self, attachment_id, action="view"):
if not attachment_id:
return None
action_param = ""
if action and action != "view":
action_param = "&action=%s" % action
return "%sattachment.cgi?id=%s%s" % (config_urls.bug_server_url,
attachment_id,
action_param)
def edit_user_url_for_id(self, user_id):
return "%seditusers.cgi?action=edit&userid=%s" % (config_urls.bug_server_url, user_id)
def _parse_attachment_flag(self,
element,
flag_name,
attachment,
result_key):
flag = element.find('flag', attrs={'name': flag_name})
if flag:
attachment[flag_name] = flag['status']
if flag['status'] == '+':
attachment[result_key] = flag['setter']
# Sadly show_bug.cgi?ctype=xml does not expose the flag modification date.
def _string_contents(self, soup):
# WebKit's bugzilla instance uses UTF-8.
# BeautifulStoneSoup always returns Unicode strings, however
# the .string method returns a (unicode) NavigableString.
# NavigableString can confuse other parts of the code, so we
# convert from NavigableString to a real unicode() object using unicode().
return unicode(soup.string)
# Example: 2010-01-20 14:31 PST
# FIXME: Some bugzilla dates seem to have seconds in them?
# Python does not support timezones out of the box.
# Assume that bugzilla always uses PST (which is true for bugs.webkit.org)
_bugzilla_date_format = "%Y-%m-%d %H:%M:%S"
@classmethod
def _parse_date(cls, date_string):
(date, time, time_zone) = date_string.split(" ")
if time.count(':') == 1:
# Add seconds into the time.
time += ':0'
# Ignore the timezone because python doesn't understand timezones out of the box.
date_string = "%s %s" % (date, time)
return datetime.strptime(date_string, cls._bugzilla_date_format)
def _date_contents(self, soup):
return self._parse_date(self._string_contents(soup))
def _parse_attachment_element(self, element, bug_id):
attachment = {}
attachment['bug_id'] = bug_id
attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
attachment['id'] = int(element.find('attachid').string)
# FIXME: No need to parse out the url here.
attachment['url'] = self.attachment_url_for_id(attachment['id'])
attachment["attach_date"] = self._date_contents(element.find("date"))
attachment['name'] = self._string_contents(element.find('desc'))
attachment['attacher_email'] = self._string_contents(element.find('attacher'))
attachment['type'] = self._string_contents(element.find('type'))
self._parse_attachment_flag(
element, 'review', attachment, 'reviewer_email')
self._parse_attachment_flag(
element, 'commit-queue', attachment, 'committer_email')
return attachment
def _parse_log_descr_element(self, element):
comment = {}
comment['comment_email'] = self._string_contents(element.find('who'))
comment['comment_date'] = self._date_contents(element.find('bug_when'))
comment['text'] = self._string_contents(element.find('thetext'))
return comment
def _parse_bugs_from_xml(self, page):
soup = BeautifulSoup(page)
# Without the unicode() call, BeautifulSoup occasionally complains of being
# passed None for no apparent reason.
return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')]
def _parse_bug_dictionary_from_xml(self, page):
soup = BeautifulStoneSoup(page, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
bug = {}
bug["id"] = int(soup.find("bug_id").string)
bug["title"] = self._string_contents(soup.find("short_desc"))
bug["bug_status"] = self._string_contents(soup.find("bug_status"))
dup_id = soup.find("dup_id")
if dup_id:
bug["dup_id"] = self._string_contents(dup_id)
bug["reporter_email"] = self._string_contents(soup.find("reporter"))
bug["assigned_to_email"] = self._string_contents(soup.find("assigned_to"))
bug["cc_emails"] = [self._string_contents(element) for element in soup.findAll('cc')]
bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')]
bug["comments"] = [self._parse_log_descr_element(element) for element in soup.findAll('long_desc')]
return bug
# Makes testing fetch_*_from_bug() possible until we have a better
# BugzillaNetwork abstration.
def _fetch_bug_page(self, bug_id):
bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
log("Fetching: %s" % bug_url)
return self.browser.open(bug_url)
def fetch_bug_dictionary(self, bug_id):
try:
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
except KeyboardInterrupt:
raise
except:
self.authenticate()
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
# FIXME: A BugzillaCache object should provide all these fetch_ methods.
def fetch_bug(self, bug_id):
return Bug(self.fetch_bug_dictionary(bug_id), self)
def fetch_attachment_contents(self, attachment_id):
attachment_url = self.attachment_url_for_id(attachment_id)
# We need to authenticate to download patches from security bugs.
self.authenticate()
return self.browser.open(attachment_url).read()
def _parse_bug_id_from_attachment_page(self, page):
# The "Up" relation happens to point to the bug.
up_link = BeautifulSoup(page).find('link', rel='Up')
if not up_link:
# This attachment does not exist (or you don't have permissions to
# view it).
return None
match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
return int(match.group('bug_id'))
def bug_id_for_attachment_id(self, attachment_id):
self.authenticate()
attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
log("Fetching: %s" % attachment_url)
page = self.browser.open(attachment_url)
return self._parse_bug_id_from_attachment_page(page)
# FIXME: This should just return Attachment(id), which should be able to
# lazily fetch needed data.
def fetch_attachment(self, attachment_id):
# We could grab all the attachment details off of the attachment edit
# page but we already have working code to do so off of the bugs page,
# so re-use that.
bug_id = self.bug_id_for_attachment_id(attachment_id)
if not bug_id:
return None
attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True)
for attachment in attachments:
if attachment.id() == int(attachment_id):
return attachment
return None # This should never be hit.
def authenticate(self):
if self.authenticated:
return
credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla")
attempts = 0
while not self.authenticated:
attempts += 1
username, password = credentials.read_credentials()
log("Logging in as %s..." % username)
self.browser.open(config_urls.bug_server_url +
"index.cgi?GoAheadAndLogIn=1")
self.browser.select_form(name="login")
self.browser['Bugzilla_login'] = username
self.browser['Bugzilla_password'] = password
self.browser.find_control("Bugzilla_restrictlogin").items[0].selected = False
response = self.browser.submit()
match = re.search("<title>(.+?)</title>", response.read())
# If the resulting page has a title, and it contains the word
# "invalid" assume it's the login failure page.
if match and re.search("Invalid", match.group(1), re.IGNORECASE):
errorMessage = "Bugzilla login failed: %s" % match.group(1)
# raise an exception only if this was the last attempt
if attempts < 5:
log(errorMessage)
else:
raise Exception(errorMessage)
else:
self.authenticated = True
self.username = username
# FIXME: Use enum instead of two booleans
def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue):
if mark_for_landing:
user = self.committers.account_by_email(self.username)
mark_for_commit_queue = True
if not user:
log("Your Bugzilla login is not listed in committers.py. Uploading with cq? instead of cq+")
mark_for_landing = False
elif not user.can_commit:
log("You're not a committer yet or haven't updated committers.py yet. Uploading with cq? instead of cq+")
mark_for_landing = False
if mark_for_landing:
return '+'
if mark_for_commit_queue:
return '?'
return 'X'
# FIXME: mark_for_commit_queue and mark_for_landing should be joined into a single commit_flag argument.
def _fill_attachment_form(self,
description,
file_object,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False,
is_patch=False,
filename=None,
mimetype=None):
self.browser['description'] = description
if is_patch:
self.browser['ispatch'] = ("1",)
# FIXME: Should this use self._find_select_element_for_flag?
self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',)
self.browser['flag_type-3'] = (self._commit_queue_flag(mark_for_landing, mark_for_commit_queue),)
filename = filename or "%s.patch" % timestamp()
if not mimetype:
mimetypes.add_type('text/plain', '.patch') # Make sure mimetypes knows about .patch
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = "text/plain" # Bugzilla might auto-guess for us and we might not need this?
self.browser.add_file(file_object, mimetype, filename, 'data')
def _file_object_for_upload(self, file_or_string):
if hasattr(file_or_string, 'read'):
return file_or_string
# Only if file_or_string is not already encoded do we want to encode it.
if isinstance(file_or_string, unicode):
file_or_string = file_or_string.encode('utf-8')
return StringIO.StringIO(file_or_string)
# timestamp argument is just for unittests.
def _filename_for_upload(self, file_object, bug_id, extension="txt", timestamp=timestamp):
if hasattr(file_object, "name"):
return file_object.name
return "bug-%s-%s.%s" % (bug_id, timestamp(), extension)
def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
self.authenticate()
log('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = filename or self._filename_for_upload(file_object, bug_id)
self._fill_attachment_form(description, file_object, filename=filename, mimetype=mimetype)
if comment_text:
log(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: The arguments to this function should be simplified and then
# this should be merged into add_attachment_to_bug
def add_patch_to_bug(self,
bug_id,
file_or_string,
description,
comment_text=None,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False):
self.authenticate()
log('Adding patch "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = self._filename_for_upload(file_object, bug_id, extension="patch")
self._fill_attachment_form(description,
file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
mark_for_landing=mark_for_landing,
is_patch=True,
filename=filename)
if comment_text:
log(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: There has to be a more concise way to write this method.
def _check_create_bug_response(self, response_html):
match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>",
response_html)
if match:
return match.group('bug_id')
match = re.search(
'<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">',
response_html,
re.DOTALL)
error_message = "FAIL"
if match:
text_lines = BeautifulSoup(
match.group('error_message')).findAll(text=True)
error_message = "\n" + '\n'.join(
[" " + line.strip()
for line in text_lines if line.strip()])
raise Exception("Bug not created: %s" % error_message)
def create_bug(self,
bug_title,
bug_description,
component=None,
diff=None,
patch_description=None,
cc=None,
blocked=None,
assignee=None,
mark_for_review=False,
mark_for_commit_queue=False):
self.authenticate()
log('Creating bug with title "%s"' % bug_title)
self.browser.open(config_urls.bug_server_url + "enter_bug.cgi?product=WebKit")
self.browser.select_form(name="Create")
component_items = self.browser.find_control('component').items
component_names = map(lambda item: item.name, component_items)
if not component:
component = "New Bugs"
if component not in component_names:
component = User.prompt_with_list("Please pick a component:", component_names)
self.browser["component"] = [component]
if cc:
self.browser["cc"] = cc
if blocked:
self.browser["blocked"] = unicode(blocked)
if not assignee:
assignee = self.username
if assignee and not self.browser.find_control("assigned_to").disabled:
self.browser["assigned_to"] = assignee
self.browser["short_desc"] = bug_title
self.browser["comment"] = bug_description
if diff:
# _fill_attachment_form expects a file-like object
# Patch files are already binary, so no encoding needed.
assert(isinstance(diff, str))
patch_file_object = StringIO.StringIO(diff)
self._fill_attachment_form(
patch_description,
patch_file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
is_patch=True)
response = self.browser.submit()
bug_id = self._check_create_bug_response(response.read())
log("Bug %s created." % bug_id)
log("%sshow_bug.cgi?id=%s" % (config_urls.bug_server_url, bug_id))
return bug_id
def _find_select_element_for_flag(self, flag_name):
# FIXME: This will break if we ever re-order attachment flags
if flag_name == "review":
return self.browser.find_control(type='select', nr=0)
elif flag_name == "commit-queue":
return self.browser.find_control(type='select', nr=1)
raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
def clear_attachment_flags(self,
attachment_id,
additional_comment_text=None):
self.authenticate()
comment_text = "Clearing flags on attachment: %s" % attachment_id
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
log(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
self.browser.submit()
def set_flag_on_attachment(self,
attachment_id,
flag_name,
flag_value,
comment_text=None,
additional_comment_text=None):
# FIXME: We need a way to test this function on a live bugzilla
# instance.
self.authenticate()
# FIXME: additional_comment_text seems useless and should be merged into comment-text.
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
log(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
if comment_text:
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag(flag_name).value = (flag_value,)
self.browser.submit()
# FIXME: All of these bug editing methods have a ridiculous amount of
# copy/paste code.
def obsolete_attachment(self, attachment_id, comment_text=None):
self.authenticate()
log("Obsoleting attachment: %s" % attachment_id)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.find_control('isobsolete').items[0].selected = True
# Also clear any review flag (to remove it from review/commit queues)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
if comment_text:
log(comment_text)
# Bugzilla has two textareas named 'comment', one is somehow
# hidden. We want the first.
self.browser.set_value(comment_text, name='comment', nr=0)
self.browser.submit()
def add_cc_to_bug(self, bug_id, email_address_list):
self.authenticate()
log("Adding %s to the CC list for bug %s" % (email_address_list, bug_id))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["newcc"] = ", ".join(email_address_list)
self.browser.submit()
def post_comment_to_bug(self, bug_id, comment_text, cc=None):
self.authenticate()
log("Adding comment to bug %s" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["comment"] = comment_text
if cc:
self.browser["newcc"] = ", ".join(cc)
self.browser.submit()
def close_bug_as_fixed(self, bug_id, comment_text=None):
self.authenticate()
log("Closing bug %s as fixed" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if comment_text:
self.browser['comment'] = comment_text
self.browser['bug_status'] = ['RESOLVED']
self.browser['resolution'] = ['FIXED']
self.browser.submit()
def _has_control(self, form, id):
return id in [control.id for control in form.controls]
def reassign_bug(self, bug_id, assignee=None, comment_text=None):
self.authenticate()
if not assignee:
assignee = self.username
log("Assigning bug %s to %s" % (bug_id, assignee))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if not self._has_control(self.browser, "assigned_to"):
log("""Failed to assign bug to you (can't find assigned_to) control.
Do you have EditBugs privileges at bugs.webkit.org?
https://bugs.webkit.org/userprefs.cgi?tab=permissions
If not, you should email [email protected] or ask in #webkit
for someone to add EditBugs to your bugs.webkit.org account.""")
return
if comment_text:
log(comment_text)
self.browser["comment"] = comment_text
self.browser["assigned_to"] = assignee
self.browser.submit()
def reopen_bug(self, bug_id, comment_text):
self.authenticate()
log("Re-opening bug %s" % bug_id)
# Bugzilla requires a comment when re-opening a bug, so we know it will
# never be None.
log(comment_text)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
bug_status = self.browser.find_control("bug_status", type="select")
# This is a hack around the fact that ClientForm.ListControl seems to
# have no simpler way to ask if a control has an item named "REOPENED"
# without using exceptions for control flow.
possible_bug_statuses = map(lambda item: item.name, bug_status.items)
if "REOPENED" in possible_bug_statuses:
bug_status.value = ["REOPENED"]
# If the bug was never confirmed it will not have a "REOPENED"
# state, but only an "UNCONFIRMED" state.
elif "UNCONFIRMED" in possible_bug_statuses:
bug_status.value = ["UNCONFIRMED"]
else:
# FIXME: This logic is slightly backwards. We won't print this
# message if the bug is already open with state "UNCONFIRMED".
log("Did not reopen bug %s, it appears to already be open with status %s." % (bug_id, bug_status.value))
self.browser['comment'] = comment_text
self.browser.submit()
| bsd-3-clause | -8,365,164,566,894,153,000 | 45.046729 | 229 | 0.622006 | false |
dtroyer/python-openstacksdk | openstack/identity/v3/role_domain_group_assignment.py | 1 | 1276 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource
class RoleDomainGroupAssignment(resource.Resource):
resource_key = 'role'
resources_key = 'roles'
base_path = '/domains/%(domain_id)s/groups/%(group_id)s/roles'
service = identity_service.IdentityService()
# capabilities
allow_list = True
# Properties
#: name of the role *Type: string*
name = resource.Body('name')
#: The links for the service resource.
links = resource.Body('links')
#: The ID of the domain to list assignment from. *Type: string*
domain_id = resource.URI('domain_id')
#: The ID of the group to list assignment from. *Type: string*
group_id = resource.URI('group_id')
| apache-2.0 | -8,509,374,455,824,138,000 | 36.529412 | 75 | 0.717085 | false |
YouAreTheHat/pngr | pngr.py | 1 | 17939 |
### ### ### ### ### ### ### IMPORTANT ### ### ### ### ### ### ###
# #
# This module is a work in progress. Comments, particularly the #
# ones preceding a class or function definition, are intended #
# to represent the desired end result of this code. Until such #
# time as a version 1.0 of this work is published, with this #
# warning removed, all the contents and functions herein are to #
# be considered experimental, incomplete, and mutable. All #
# comments outside of this box are to be considered lies and #
# wishful thinking, not accurate documentation. #
# #
### ### ### ### ### ### ### #### #### ### ### ### ### ### ### ###
import math, zlib
# A custom error raised for issues with this module only.
class PngError(Exception):
def __init__(self, message=None):
if message is None:
message = "an unspecified error has occurred"
self.message = message
super(PngError, self).__init__(self.message)
# Reads PNG files.
# Largely acts as a wrapper for open(), automatically
# reading in positions and increments appropriate for the PNG format.
# Is also capable of spawning PngChunks.
class PngReader:
""" !!! WIP !!!
Reads PNG files and returns chunks of information.
"""
def __init__(self, pngfile):
self.png_path = pngfile #path to PNG file
self.png = None
# This will hold the file's first 8 bytes; in a PNG, these should
# always be the static PNG signature
self.png_sig = b''
# Check if the passed file really is a PNG; if not, raise error
if not self.is_valid():
raise PngError("file {} is corrupt or not a PNG".format(\
self.png_path))
# For using the 'with' statement to initialize
def __enter__(self):
self.open_png()
return self
# For using the 'with' statement to initialize
def __exit__(self, type, value, traceback):
self.close_png()
# Checks if the file location passed at init refers to a valid PNG.
# Never call this if the file is already open
def is_valid(self):
# This is the signature of all properly constructed PNGs; if the first
# 8 bytes of the file are not this, it isn't a PNG
sig = b'\x89PNG\r\n\x1a\n'
with open(self.png_path, 'rb') as f:
self.png_sig = f.read(8)
f.seek(0)
if self.png_sig == sig:
return True
else:
return False
# Acts as a wrapper for open(); also records the cursor position
def open_png(self):
if (self.png is None) or (self.png and self.png.closed):
self.png = open(self.png_path, 'rb')
self.last_pos = self.png.tell()
# Closes the PNG
def close_png(self):
if self.png and not self.png.closed:
self.png.close()
# Allows an instance to resume reading a file from the position in which
# it was after its last successful open_png() or next_chunk() call.
def resume(self):
if self.png and not self.png.closed:
self.png.seek(self.last_pos)
# Reads the next chunk in the file and returns a PngChunk object.
# If at the beginning of a file, it will skip the PNG signature.
# It will fail if its associated PNG is not opened for reading.
def next_chunk(self):
# Skip the PNG signature because it is not a chunk
if self.png.tell() == 0:
self.png.seek(8)
# Make a list to hold the chunk
self.cur_chunk = []
# Read the length, type, data, and crc
self.cur_chunk.append(self.png.read(4))
self.cur_chunk.append(self.png.read(4))
self.cur_chunk.append(self.png.read(\
int.from_bytes(self.cur_chunk[0], 'big')))
self.cur_chunk.append(self.png.read(4))
# Record the cursor position
self.last_pos = self.png.tell()
try:
# Return a PngChunk for the read bytes
return PngChunk(self.cur_chunk)
finally:
# We've finished reading, so forget about the current chunk
# (since it's no longer "current")
del self.cur_chunk
# Check if there is at least one more chunk.
# It will fail if its associated PNG is not opened for reading.
def has_more(self):
if len(self.png.read(12)) < 12:
self.png.seek(self.last_pos)
return False
else:
self.png.seek(self.last_pos)
return True
# Stores organized data for a single chunk of a PNG.
# Superclass for specific chunk types.
# The 'meta' dict is sued to stores the store the attributes of the chunk
# which the chunk itself stores (length, type, CRC).
# Subclasses should extend the 'info' dict with the parsed information the
# chunk actually carries (e.g., IHDR adds 'Width', 'Height', etc).
class PngChunk:
""" !!! WIP !!!
Stores organized data on a PNG chunk.
"""
# Must be passed the entire binary chunk as a list
def __init__(self, c_bytes):
self.meta = {}
self.meta['Length'] = int.from_bytes(c_bytes[0], 'big')
self.meta['Type'] = c_bytes[1].decode()
self.meta['CRC'] = c_bytes[3]
self.data = bytearray(c_bytes[2])
self.info = {}
# Getter for chunk meta-data
def get_meta(self, property_name=None):
"""\tReturns dict of chunk length, type, and CRC.
Specify a key to return only that value."""
if property_name is None:
return self.meta
return self.meta[property_name]
# Getter for raw data
def get_data(self, buffer=None):
"""\tReturns generator over unparsed data, <buffer> bytes at a time.
Defaults to entire data field at once.
This does not include the length, type, or CRC fields.
Use get_raw() for a binary version of the entire chunk.
WARNING: may be up to 2^31 bytes long w/o buffer, use with caution"""
return self._raw_generator(buffer, 8, -4)
# Getter for parsed contents; most useful for subtypes
def get_info(self, info_name=None):
"""\tReturns parsed chunk data as dict (may be empty).
For known chunk types, this should return their stored information
in human-readable form."""
if info_name is None:
return self.info
return self.info[info_name]
# Getter for the binary data of the entire chunk
def get_raw(self, buffer: '4 to 2147483659'=None):
"""\tReturns generator over binary chunk, <buffer> bytes at a time.
Defaults to entire chunk at once.
WARNING: may be over 2^31 bytes long w/o buffer, use with caution"""
if buffer is not None:
if buffer < 4:
raise PngError("buffer length out of range")
return self._raw_generator(buffer)
# Makes generator over binary form of chunk (or part of chunk)
def _raw_generator(self, buffer, start=0, end=0):
l = 12 + len(self.data)
if end < 0:
l += end
if start >= 0:
num = start
elif abs(start) <= l:
num = l + start
if buffer is None:
buffer = l
while num < l:
result, toread = b'', buffer
while toread > 0:
b_l = len(result)
if num < 4:
result += self.meta['Length'].to_bytes(4, 'big')\
[num:num + toread]
elif num >= 4 and num < 8:
result += bytes(self.meta['Type'], 'utf8')\
[num - 4:num - 4 + toread]
elif num >= 8 and num < (l - 4):
result += self.data[num - 8:num - 8 + toread]
elif num - l + toread < 0:
result += self.meta['CRC'][num - l:num - l + toread]
else:
result += self.meta['CRC'][num - l:]
toread = 0
num += len(result) - b_l
toread -= len(result) - b_l
yield result
# Sets the 'Length' to the actual length of its raw data
def set_length(self):
"""\tSet 'Length' to length of raw data.
Returns difference between new and old lengths."""
if self.meta('Length') != len(self.raw):
oldlen = self.meta('Length')
self.meta('Length') = len(self.raw)
return (self.meta('Length') - oldlen)
return 0
# Stores parsed data from the IHDR chunk.
# PngData objects can use IHDR info dict to extract image properties
class IHDR(PngChunk):
# IHDR can extract all of its info at init
def __init__(self, genchunk):
if not isinstance(genchunk, PngChunk):
raise PngError("expected PngChunk, but {} found"\
.format(type(genchunk).__name__))
self.meta = genchunk.meta
self.data = genchunk.data
self.info = genchunk.info
self.info['Width'] = int.from_bytes(self.data[:4], 'big')
self.info['Height'] = int.from_bytes(self.data[4:8], 'big')
self.info['Bit depth'] = self.data[8]
self.info['Color type'] = self.data[9]
self.info['Interlace'] = self.data[-1]
# Stores parsed data from an IDAT chunk.
class IDAT(PngChunk):
# Init does not parse info because info from other chunks (IHDR and
# possibly others) is needed to understand the formatting.
# Plus, it's kind of a large and memory-intensive process.
def __init__(self, genchunk):
if not isinstance(genchunk, PngChunk):
raise PngError("expected PngChunk, but {} found"\
.format(type(genchunk).__name__))
self.meta = genchunk.meta
self.data = genchunk.data
self.info = genchunk.info
class PLTE(PngChunk):
pass
class IEND(PngChunk):
pass
# Stores PngChunks and analyzes their attributes.
# Acts as an object representation of the PNG file, since it holds all of the
# file's data in chunk form.
# Generic PngChunks should be passed to it through the 'addchunk' method;
# it will convert them to an appropriate subtype if one is defined.
class PngData:
""" !!! WIP !!!
Stores and analyzes PngChunks and prints their data.
"""
# Static mapping of chunk types to chunk subclasses.
# Used to replace generic chunks with their specific classes for
# analyzation.
# Critical chunks are unconditionally supported; ancillary chunks will
# be supported selectively as they are developed and added to the module.
chunktypes = {'IHDR': IHDR,
'IDAT': IDAT,
'PLTE': PLTE,
'IEND': IEND}
# Static mapping of color types to their sample information.
# The first value in the tuple is the number of samples/channels in the
# decompressed IDAT stream. This should be used for parsing the filter
# and, consequently, the scanlines.
# The second value reflects the presence of a PLTE. True means that a PLTE
# must appear; False means it must not appear; None means it may appear,
# but may also be safely ignored.
# Note that type 3 implies that the pixels in PLTE are 3-tuples of 1-byte
# samples (a bit depth less than 8 just adds leading zeroes).
colortypes = {0: (1, False),
2: (3, None),
3: (1, True),
4: (2, False),
6: (4, None)}
# Static PNG signature; it will be needed when writing
signature = b'\x89PNG\r\n\x1a\n'
def __init__(self):
self.chunks = []
self.ihdr_pos = None
self.plte_pos = None
def add_chunk(self, chunk):
if not isinstance(chunk, PngChunk):
raise PngError("expected PngChunk, but {} found"\
.format(type(chunk).__name__))
ctype = chunk.get_meta('Type')
if ctype in self.chunktypes.keys():
if ctype == 'IHDR':
self.ihdr_pos = len(self.chunks)
elif ctype == 'PLTE':
self.plte_pos = len(self.chunks)
self.chunks.append(self.chunktypes[ctype](chunk))
else:
self.chunks.append(chunk)
# Rough unfiltering method.
# Currently works naively on an array of scanlines.
# No support for interlacing. Requires precalculated pixel depth. May
# work improperly on color type 0 for bit depths less than 8.
def _unfilter(self, lines, px_depth):
for i in range(len(lines)):
l = bytearray(lines[i])
if l[0] == 0: #filter 'none'
pass
elif l[0] == 1: #filter 'sub'
for j in range((1 + px_depth), len(l)):
l[j] = (l[j] + l[j - px_depth])%256
elif l[0] == 2: #filter 'up'
for j in range(1, len(l)):
if i == 0:
prior = 0
else:
prior = lines[i - 1][j - 1]
l[j] = (l[j] + prior)%256
elif l[0] == 3: #filter 'average'
for j in range(1, len(l)):
if j in range(1, (1 + px_depth)):
prev = 0
else:
prev = l[j - px_depth]
if i == 0:
prior = 0
else:
prior = lines[i - 1][j - 1]
l[j] = (l[j] + math.floor((prev + prior)/2))%256
elif l[0] == 4: #filter 'Paeth'
for j in range(1, len(l)):
flg = False
if j in range(1, (1 + px_depth)):
prev = 0
flg = True
else:
prev = l[j - px_depth]
if i == 0:
prior = 0
flg = True
else:
prior = lines[i - 1][j - 1]
if flg:
prevpri = 0
else:
prevpri = lines[i - 1][(j - 1) - px_depth]
p_p = prev + prior + prevpri
p_d = []
for p_v in [prev, prior, prevpri]:
p_d.append(math.abs(p_p - p_v))
if p_d[0] <= p_d[1] and p_d[0] <= p_d[2]:
paeth = prev
elif p_d[1] <= p_d[2]:
paeth = prior
else:
paeth = prevpri
l[j] = (l[j] + paeth)%256
l = l[1:]
lines[i] = l
return lines
# Rough method for extracting pixel data from IDATs
# Currently works naively on all data at once, returns array. No support
# for interlacing. May work improperly on color type 0 for bit depths less
# than 8.
def get_scanlines(self):
info = self.chunks[self.ihdr_pos].get_info()
if info['Interlace']:
raise PngError("interlacing not supported")
c_count = self.colortypes[info['Color type']][0]
c_depth = max([info['Bit depth']//8, 1])
p_depth = c_depth * c_count
p_w, p_h = info['Width'], info['Height']
cmp = b''
for chunk in [c for c in self.chunks if isinstance(c, IDAT)]:
for d in chunk.get_data():
cmp += d
dcmp = zlib.decompress(cmp)
scanlines = []
for i in range(0, len(dcmp), ((p_depth * p_w) + 1)):
scanlines.append(dcmp[i:i + ((p_depth * p_w) + 1)])
scanlines = self._unfilter(scanlines, p_depth)
return scanlines
## Notes
# pngr_test.py has some testing, basic implementations, etc
# add PngChunk subclasses for each critical type (and hopefully important
# ancillary types as well). use them for analyzing chunks more effectively.
# project purpose has been changed: the goal is now to make a PNG decoder,
# including parsing, modification, and re-writing
# for the above goal:
# - data class would hold info attributes (probably)
# - only chunks which affect the reading/writing of IDAT/pixel data would need
# to be parsed (others are optional)
# - only critical info/data would need to be stored
# - maybe a gateway to stegosaurus?
# make chunk subtypes able to init with bin arrays from reader
# ...because reasons?
# OR
# eliminate subtypes and meta array, trust 'Type' for chunk typing, have data
# class parse and store information to avoid redundant storage. this may
# be necessary for cat'ing IDATs and using IHDR and PLTE info anyway
# for the above, only certain data has to be stored; chunks can still be
# mostly responsible for themselves.
# keep mem usage in mind. at minimum, entire file is in mem. decompressing
# IDAT(s) all at once nearly doubles that. copying decomp'd data to array
# doubles decomp'd data length, which is already longer than IDAT. working
# with data in place as much as possible would be wise.
# the above may be complicated in the case of Adam7 interlacing
# (de)compression, making scanlines, and (un)filtering may also benefit from
# generators/buffered IO (or be impossible - look into that)
# scanline and unfiltering functions are very rough; revise to ensure they are
# compatible withh color types and bit depths. also include a buffered read
# by way of a generator.
# for above, carefully consider how decompression and unfiltering will work;
# the compressed data must be at least 2 scanlines long to be useful for
# unfiltering.
# if this will work as a proper PNG decoder, ensure that all requirements from
# the PNG standard are followed.
##
| lgpl-3.0 | -588,470,887,037,475,200 | 38.864444 | 78 | 0.564468 | false |
be-cloud-be/horizon-addons | partner-contact/partner_external_map/tests/test_partner_external_map.py | 1 | 3467 | # -*- coding: utf-8 -*-
# © 2016 Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp.tests import common
from openerp.exceptions import Warning as UserError
from ..hooks import set_default_map_settings
class TestPartnerExternalMap(common.TransactionCase):
def setUp(self):
super(TestPartnerExternalMap, self).setUp()
self.user = self.env['res.users'].create({
'name': 'Test user',
'login': 'test_login',
'context_map_website_id': self.ref(
'partner_external_map.google_maps'),
'context_route_map_website_id': self.ref(
'partner_external_map.google_maps'),
})
self.user.partner_id.city = 'Tomelloso'
self.partner = self.env['res.partner'].create({
'name': 'Test partner',
'city': 'Madrid',
})
def test_post_init_hook(self):
# Call this again for coverage purposes, but it has been already run
set_default_map_settings(self.cr, self.registry)
self.assertTrue(self.env.user.context_map_website_id)
self.assertTrue(self.env.user.context_route_map_website_id)
self.assertEqual(self.env.user.partner_id,
self.env.user.context_route_start_partner_id)
def test_create_user(self):
self.assertEqual(
self.user.partner_id, self.user.context_route_start_partner_id)
def test_open_map(self):
action = self.partner.sudo(self.user.id).open_map()
self.assertEqual(
action['url'], "https://www.google.com/maps?ie=UTF8&q=Madrid")
def test_open_route_map(self):
action = self.partner.sudo(self.user.id).open_route_map()
self.assertEqual(
action['url'], "https://www.google.com/maps?saddr=Tomelloso&daddr="
"Madrid&directionsmode=driving")
def test_open_map_with_coordinates(self):
# Simulate that we have the base_geolocalize module installed creating
# by hand the variables - This can't be done with routes
partner = self.partner.sudo(self.user.id)
partner.partner_latitude = 39.15837
partner.partner_longitude = -3.02145
action = partner.open_map()
self.assertEqual(
action['url'],
"https://www.google.com/maps?z=15&q=39.15837,-3.02145")
def test_exception_no_map_website(self):
self.user.context_map_website_id = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_map()
def test_exception_no_map_route_website(self):
self.user.context_route_start_partner_id = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_route_map()
def test_exception_no_starting_partner(self):
self.user.context_route_map_website_id = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_route_map()
def test_exception_no_address_url(self):
self.user.context_map_website_id.address_url = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_map()
def test_exception_no_route_address_url(self):
self.user.context_map_website_id.route_address_url = False
with self.assertRaises(UserError):
self.partner.sudo(self.user.id).open_route_map()
| agpl-3.0 | -35,478,124,546,341,290 | 40.261905 | 79 | 0.632141 | false |
Linaro/lava-dispatcher | lava_dispatcher/test/test_uboot_ums.py | 1 | 2817 | # Copyright (C) 2018 Linaro Limited
#
# Author: Matthew Hart <[email protected]>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import os
import unittest
from lava_dispatcher.device import NewDevice
from lava_dispatcher.parser import JobParser
from lava_dispatcher.test.test_basic import Factory, StdoutTestCase
from lava_dispatcher.test.utils import DummyLogger, infrastructure_error
class UBootUMSFactory(Factory): # pylint: disable=too-few-public-methods
"""
Not Model based, this is not a Django factory.
Factory objects are dispatcher based classes, independent
of any database objects.
"""
def create_warp7_job(self, filename): # pylint: disable=no-self-use
device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/imx7s-warp-01.yaml'))
bbb_yaml = os.path.join(os.path.dirname(__file__), filename)
with open(bbb_yaml) as sample_job_data:
parser = JobParser()
job = parser.parse(sample_job_data, device, 4212, None, "")
job.logger = DummyLogger()
return job
class TestUbootUMSAction(StdoutTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
super(TestUbootUMSAction, self).setUp()
self.factory = UBootUMSFactory()
@unittest.skipIf(infrastructure_error('dd'), "dd not installed")
def test_ums_action(self):
job = self.factory.create_warp7_job('sample_jobs/warp7-ums.yaml')
self.assertIsNotNone(job)
description_ref = self.pipeline_reference('uboot-ums.yaml', job=job)
self.assertEqual(description_ref, job.pipeline.describe(False))
self.assertIsNone(job.validate())
self.assertEqual(job.device['device_type'], 'imx7s-warp')
uboot = [action for action in job.pipeline.actions if action.name == 'uboot-action'][0]
retry = [action for action in uboot.internal_pipeline.actions if action.name == 'uboot-retry'][0]
flash = [action for action in retry.internal_pipeline.actions if action.name == 'flash-uboot-ums'][0]
self.assertEqual("ums", flash.parameters['commands'])
self.assertEqual("/dev/vde", flash.usb_mass_device)
| gpl-2.0 | 2,561,922,625,065,863,000 | 42.338462 | 109 | 0.707845 | false |
php1ic/inch | scripts/randomChart.py | 1 | 7115 | #!/usr/bin/env python3
"""
Create chart(s) with random parameters
Using either the executable provided, or
searching for it in standard locations,
and farm the creation on multiple threads
"""
import argparse
import multiprocessing
import os
import random
import shutil
import subprocess
import colorama
from joblib import Parallel, delayed
def getExecutableName():
"""
Get the name of the executable that is going to be used.
@param: None
@return[success] The basename of the executable that will be used
@return[failure] The default value
"""
# Set a default program name incase this function fails
programName = "inch"
# Store where we are so we can comeback
currentdir = os.getcwd()
# Get the path of this script
scriptdir = os.path.realpath(__file__)
# Move into the script directory as it's guaranteed to part of the git repo
os.chdir(os.path.dirname(scriptdir))
# Use git to get the repo directory name, assume this is also the exe name
gitExe = shutil.which("git")
if gitExe is None:
print("Looks like git is not installed on this system")
print(f"Using the default {programName} as the executable name")
return programName
output = subprocess.run([gitExe, "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
programName = os.path.basename(output.stdout.strip().decode())
os.chdir(currentdir)
return programName
# -------------------------------------------------
def validateExecutable(exe):
"""
Check the provided executable exists, otherwise look for it in the
'standard' locations
@param: File path
@return[success]: Path to valid executable
@return[failure]: None
"""
colorama.init()
if exe is not None:
if os.path.isfile(exe):
return exe
print(colorama.Fore.YELLOW + "WARNING: " + colorama.Style.RESET_ALL
+ f"{exe} does not exist."
"Looking for executable in standard build locations")
exeName = getExecutableName()
scriptdir = os.path.realpath(os.path.dirname(__file__))
commonPath = os.path.join("bin", exeName)
gnumakePath = os.path.abspath(os.path.join(scriptdir, "..", commonPath))
cmakePath = os.path.abspath(os.path.join(scriptdir, "..", "..", "build", commonPath))
fullExe = None
if os.path.isfile(gnumakePath):
fullExe = gnumakePath
elif os.path.isfile(cmakePath):
fullExe = cmakePath
else:
print(colorama.Fore.RED + "ERROR: " + colorama.Style.RESET_ALL
+ f" Couldn't find an executable to use")
colorama.deinit()
return fullExe
# -------------------------------------------------
def createSingleChart(MAX_LOW_Z, MAX_Z):
"""
Generate a single chart with random parameters.
Limit the Z range to [MAX_LOW_Z,MAX_Z]
@param: Highest value of Z to use as Zmin
@param: Largest value of Z allowed
@return: Nothing
"""
# This script removes the ability to interact with the program so need to make sure
# that the file we are try to create does not already exist. Otherwise the script will
# get stuck waiting for a user input that will never come
while True:
# Randomly pick 0,1,2
experimental = random.choice(range(0, 3))
# If the experimental option is '1' i.e theoretical, there is one less property
# to colour by so randomly pick form a,b,c,d and possibly e
choice = random.choice(range(0, 4 if experimental == 1 else 5))
minZ = random.randrange(MAX_LOW_Z)
maxZ = minZ + random.randrange(MAX_Z - minZ)
name = f"Zmin-{minZ:03d}_Zmax-{maxZ:03d}_Exp-{experimental}_Type-{choice}"
if not os.path.isfile(name+".eps"):
break
print(f"Creating - {name}")
with open(name+".in", 'w') as ofile:
ofile.write(f"section=1\n"
f"Zmin={minZ}\n"
f"Zmax={maxZ}\n"
f"required=0\n"
f"type={experimental}\n"
f"choice={choice}\n")
ofile.close()
subprocess.run([exe, "-o", name, "-i", name+".in"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# -------------------------------------------------
def runExecutable(exe, number, threads):
"""
Run <exe> <number> times, randomising the input parameters.
Each execution is independent so allow <exe> to be run over <threads> threads
@param: Executable to run
@param: Number of times to run <exe>
@param: Number of threads to concurrently use
@return: Nothing
"""
# We could read this from ../includes/inputs.h
MAX_Z = 118
# Set the minimum possible Z range
MAX_LOW_Z = MAX_Z - 1
colorama.init()
print(f"\nUsing: "
+ colorama.Fore.GREEN + exe + colorama.Style.RESET_ALL
+ " to create "
+ colorama.Fore.GREEN + str(number) + colorama.Style.RESET_ALL
+ " chart(s)\n")
colorama.deinit()
Parallel(threads)(delayed(createSingleChart)(MAX_LOW_Z, MAX_Z) for i in range(0, number))
print()
# -------------------------------------------------
def check_positive(value):
"""
Check that the value is positive while also converting to an int.
Use to ensure that the number of charts option make sense.
@param: Number
@return[success]: The integer version of the number
@return[failure]: ArgparseTypeError
"""
intValue = int(value)
if intValue <= 0:
raise argparse.ArgumentTypeError(f"{intValue} is an invalid positive int value")
return intValue
# -------------------------------------------------
def parse_arguments():
"""
Encapsulate the use of argparse
@param: None
@return: An instance of argparse
"""
parser = argparse.ArgumentParser(description="Create some random charts")
# Required
# Nothing
# Optional
parser.add_argument("-e", "--executable",
help="The executable to use [default: None]",
type=str,
default=None)
parser.add_argument("-n", "--number",
help="Number of charts to randomly create [default: %(default)s]",
type=check_positive,
default=1)
parser.add_argument("-t", "--threads",
help="Number of threads to use [default: %(default)s]",
type=int,
default=multiprocessing.cpu_count()-1,
choices=range(1, multiprocessing.cpu_count()))
return parser.parse_args()
# -------------------------------------------------
if __name__ == "__main__":
colorama.init()
args = parse_arguments()
exe = validateExecutable(args.executable)
if exe is not None:
runExecutable(exe, args.number, args.threads)
colorama.deinit()
# -------------------------------------------------
| gpl-3.0 | -8,901,250,184,690,402,000 | 28.279835 | 93 | 0.583275 | false |
neelchauhan/OnionLauncher | OnionLauncher/main.py | 1 | 2902 | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from PyQt5.uic import loadUi
from var import values, version
import torctl
from fn_handle import detect_filename
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
# Load .ui file
loadUi(detect_filename("ui_files/main.ui"), self)
# Define buttons
buttons = {
self.tbAdd: self.addRow,
self.tbRemove: self.removeRow,
self.btnSwitchTor: self.switchTor,
self.btnAbout: self.showAbout
}
self.evAddClick(buttons)
# Function to connect objects from dictionary
def evAddClick(self, obj_dict):
for obj in obj_dict:
obj.clicked.connect(obj_dict[obj])
# Function to set objects enabled or not
def evSetListEnabled(self, lst, state):
for item in lst:
item.setEnabled(state)
# Function to add a blank row
def addRow(self):
rowPos = self.twSettings.rowCount() # Get position
self.twSettings.insertRow(rowPos)
# Function to delete a selected row
def removeRow(self):
rows = sorted(set(index.row() for index in self.twSettings.selectedIndexes())) # Get selected rows
rows.reverse() # Reverse rows (we're deleting from last->first)
for row in rows:
self.twSettings.removeRow(row)
def optToDict(self): # Function to conert options in a QTableWidget to a Python Dictionary
rows = self.twSettings.rowCount() # Row count (we're iterating the hard way)
output_dict = {}
for row in range(rows):
# Get values in two variables
setting = self.twSettings.item(row, 0)
parameter = self.twSettings.item(row, 1)
# Add them to dictionary
if setting is not None and parameter is not None:
output_dict[setting.text()] = parameter.text().split()
return output_dict
def switchTor(self): # Enable (or Disable) Tor
modList = [
self.twSettings,
self.tbAdd,
self.tbRemove
]
if values["torEnabled"]: # Turn off if Tor is on
values["torEnabled"] = False
self.btnSwitchTor.setText("Start Tor")
self.lblSwitchTor.setText("Tor Not Running")
self.evSetListEnabled(modList, True)
torctl.stopTor(values["process_desc"])
else: # Turn on Tor
values["process_desc"] = torctl.startTor(self, self.optToDict())
# If Tor started correctly, then mark as "on"
if values["process_desc"] != None:
values["torEnabled"] = True
self.btnSwitchTor.setText("Stop Tor")
self.lblSwitchTor.setText("Tor Running")
self.evSetListEnabled(modList, False)
# Refresh elements
QApplication.processEvents()
def showAbout(self): # Show about dialog
message = "About OnionLauncher " + version + "\n\n" \
"Copyright 2016 Neel Chauhan\n" \
"https://github.com/neelchauhan/OnionLauncher"
QMessageBox.information(self, "Information", message)
def main_loop():
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main_loop()
| bsd-2-clause | 8,074,531,801,412,537,000 | 29.547368 | 100 | 0.709511 | false |
bsarsgard/blackrocktickets | texas/forms.py | 1 | 1312 | """
Texas - Ticket Sales System
Copyright (C) 2010 Ben Sarsgard
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(label='Email Address', max_length=75)
password = forms.CharField(widget=forms.PasswordInput(render_value=False))
class SaleForm(forms.Form):
tickets = forms.CharField(label='Number of Tickets', max_length=2)
name = forms.CharField(label='Full Name', max_length=50)
email = forms.CharField(label='Email', max_length=75)
class ChanceForm(forms.Form):
name = forms.CharField(label='Full Name', max_length=255)
email = forms.CharField(label='Primary Email', max_length=255)
| apache-2.0 | -4,711,378,940,113,325,000 | 40 | 78 | 0.730945 | false |
cbouilla/3sum-pool | share.py | 1 | 5451 | import time
import struct
import random
from hashlib import sha256
from binascii import hexlify, unhexlify
JOB_TYPES = ['FOO', 'BAR', 'FOOBAR']
def sha256d(x):
return sha256(sha256(x).digest()).digest()
def swap_endian_words(hex_words):
'''Swaps the endianness of a hexidecimal string of words and converts to binary string.'''
message = unhexlify(hex_words)
if len(message) % 4 != 0: raise ValueError('Must be 4-byte word aligned')
return b''.join([ message[4 * i: 4 * i + 4][::-1] for i in range(0, len(message) // 4) ])
def version_prev_block(kind):
"""Return the "block version" & the "hash of previous block" according to our categories (FOO, BAR, FOOBAR)"""
if kind == 0: # 'FOO'
block_version = hexlify(b'-OOF').decode()
prev_block_hash = hexlify(swap_endian_words(hexlify(b' Charles Bouillaguet'))).decode()
elif kind == 1: # 'BAR'
block_version = hexlify(b'-RAB').decode()
prev_block_hash = hexlify(swap_endian_words(hexlify(b' Pierre-Alain Fouque'))).decode()
elif kind == 2: # 'FOOBAR'
block_version = hexlify(b'BOOF').decode()
prev_block_hash = hexlify(swap_endian_words(hexlify(b'AR- Claire Delaplace'))).decode()
return (block_version, prev_block_hash)
class JobContext:
extranonce1 = None
kind = None
D = None
def __init__(self, extranonce1, D):
self.extranonce1 = extranonce1
self.kind = random.randrange(3)
self.D = D
def work_parameters(self):
block_version, prev_block_hash = version_prev_block(self.kind)
ntime = "{:08x}".format(int(time.time()))
return [prev_block_hash, Share.coinbase_1, Share.coinbase_2, [], block_version, Share.ndiff, ntime]
class Share:
"""representation of a full share (i.e. a block whose hash is correct)"""
# variable part. Strings, in hex.
extranonce1 = None
extranonce2 = None
nonce = None
ntime = None # network time
# metadata
D = None # actual difficulty of the share
kind = None # 0==FOO, 1==BAR, 2==FOOBAR
# static values. These choices yields invalid bitcoin blocks.
# This means that we don't actually mine bitcoins.
ndiff = "efbeadde" # encoded network difficulty
extraNonce2_size = 4
coinbase_1 = "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff20020862062f503253482f04b8864e5008"
coinbase_2 = "072f736c7573682f000000000100f2052a010000001976a914d23fcdf86f7e756a64a7a9688ef9903327048ed988ac00000000"
def __init__(self, extranonce2, nonce, ntime, job_context=None, kind=None, D=None, extranonce1=None):
self.extranonce2 = extranonce2
self.nonce = nonce
self.ntime = ntime
self._hash = None
if job_context:
self.extranonce1 = job_context.extranonce1
self.kind = job_context.kind
self.D = job_context.D
else:
self.extranonce1 = extranonce1
self.kind = kind
self.D = D
def block(self):
"""build the (binary) block this shares represent"""
block_version, prev_block_hash = version_prev_block(self.kind)
coinbase = self.coinbase_1 + self.extranonce1 + self.extranonce2 + self.coinbase_2
coinbase_hash_bin = sha256d(unhexlify(coinbase))
merkle_root = hexlify(coinbase_hash_bin)
version_bin = struct.pack("<I", int(block_version, base=16))
prev_hash_bin = swap_endian_words(prev_block_hash) # must be LE
mrt_bin = unhexlify(merkle_root) # must be LE
time_bin = struct.pack("<I", int(self.ntime, base=16))
diff_bin = struct.pack("<I", int(self.ndiff, base=16))
nonce_bin = struct.pack("<I", int(self.nonce, base=16))
return version_bin + prev_hash_bin + mrt_bin + time_bin + diff_bin + nonce_bin
def __str__(self):
return "({} / D={} / {} / {} / {})".format(JOB_TYPES[self.kind], self.D, self.extranonce1, self.extranonce2, self.nonce)
def block_hash(self):
if not self._hash:
self._hash = sha256d(self.block())
return self._hash
def valid(self):
#print(hexlify(self.block()).decode())
#print(self.formated_hex_block())
block_hash = self.block_hash()
#print(hexlify(block_hash).decode())
return block_hash[28:] == bytes([0,0,0,0])
def formated_hex_block(self):
h = hexlify(self.block()).decode()
return "{} {} {} {} {} {}".format(h[0:8], h[8:72], h[72:136], h[136:144], h[144:152], h[152:160])
def serialize(self):
"""dump this share into 160 bits"""
return struct.pack('<HHIIII', self.kind, self.D, int(self.extranonce2, base=16),
int(self.extranonce1, base=16), int(self.nonce, base=16), int(self.ntime, base=16))
@staticmethod
def unserialize(buf):
"""Generate a Share object given a 128-bit serialized share"""
kind, D, extranonce2_bin, extranonce1_bin, nonce_bin, ntime_bin = struct.unpack('<HHIIII', buf)
extranonce1 = "{:08x}".format(extranonce1_bin)
extranonce2 = "{:08x}".format(extranonce2_bin)
nonce = "{:08x}".format(nonce_bin)
ntime = "{:08x}".format(ntime_bin)
return Share(extranonce2, nonce, ntime, D=D, kind=kind, extranonce1=extranonce1)
| gpl-3.0 | -3,494,661,404,866,030,600 | 39.080882 | 135 | 0.616401 | false |
stackforge/python-monascaclient | monascaclient/tests/v2_0/shell/test_alarm_definitions.py | 1 | 5743 | # Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from monascaclient.osc import migration as migr
from monascaclient.v2_0 import alarm_definitions as ad
from monascaclient.v2_0 import shell
class FakeV2Client(object):
def __init__(self):
super(FakeV2Client, self).__init__()
self.alarm_definitions = mock.Mock(
spec=ad.AlarmDefinitionsManager)
class TestAlarmDefinitionShellV2(base.BaseTestCase):
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_update(self, mc):
mc.return_value = c = FakeV2Client()
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_name = 'alarm_name'
ad_desc = 'test_alarm_definition'
ad_expr = 'avg(Test_Metric_1)>=10'
ad_action_id = '16012650-0b62-4692-9103-2d04fe81cc93'
ad_action_enabled = 'True'
ad_match_by = 'hostname'
ad_severity = 'CRITICAL'
raw_args = [
ad_id, ad_name, ad_desc, ad_expr,
ad_action_id, ad_action_id, ad_action_id, ad_action_enabled,
ad_match_by, ad_severity
]
name, cmd_clazz = migr.create_command_class(
'do_alarm_definition_update',
shell
)
cmd = cmd_clazz(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
parsed_args = parser.parse_args(raw_args)
cmd.run(parsed_args)
c.alarm_definitions.update.assert_called_once_with(
actions_enabled=True,
alarm_actions=[ad_action_id],
alarm_id=ad_id,
description=ad_desc,
expression=ad_expr,
match_by=[ad_match_by],
name=ad_name,
ok_actions=[ad_action_id],
severity=ad_severity,
undetermined_actions=[ad_action_id]
)
@mock.patch('monascaclient.osc.migration.make_client')
def test_alarm_definitions_list(self, mc):
mc.return_value = c = FakeV2Client()
c.alarm_definitions.list.return_value = [{
"name": "ntp_sync_check",
"id": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee",
"expression": "(max(ntp.offset{}, deterministic)>=1)",
"match_by": ['hostname'],
"description": "NTP time sync check",
"actions_enabled": True,
"deterministic": True,
"alarm_actions": ['aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'],
"ok_actions": [],
"undetermined_actions": [],
"severity": "HIGH",
}]
name, cmd_class = migr.create_command_class(
'do_alarm_definition_list',
shell
)
cmd = cmd_class(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
raw_args = []
parsed_args = parser.parse_args(raw_args)
cmd.run(parsed_args)
c.alarm_definitions.list.assert_called_once()
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_name(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_name = 'patch_name'
raw_args = '{0} --name {1}'.format(ad_id, ad_name).split(' ')
self._patch_test(mc, raw_args, alarm_id=ad_id, name=ad_name)
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_actions(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_action_id = '16012650-0b62-4692-9103-2d04fe81cc93'
actions = ['alarm-actions', 'ok-actions',
'undetermined-actions']
for action in actions:
raw_args = ('{0} --{1} {2}'.format(ad_id, action, ad_action_id)
.split(' '))
self._patch_test(mc, raw_args, **{
'alarm_id': ad_id,
action.replace('-', '_'): [ad_action_id]
})
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_severity(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
severity_types = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']
for st in severity_types:
raw_args = ('{0} --severity {1}'.format(ad_id, st)
.split(' '))
self._patch_test(mc, raw_args, alarm_id=ad_id, severity=st)
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_not_patch_unknown_severity(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
st = 'foo'
raw_args = ('{0} --severity {1}'.format(ad_id, st)
.split(' '))
self._patch_test(mc, raw_args, called=False)
@staticmethod
def _patch_test(mc, args, called=True, **kwargs):
mc.return_value = c = FakeV2Client()
name, cmd_clazz = migr.create_command_class(
'do_alarm_definition_patch',
shell
)
cmd = cmd_clazz(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
parsed_args = parser.parse_args(args)
cmd.run(parsed_args)
if called:
c.alarm_definitions.patch.assert_called_once_with(**kwargs)
else:
c.alarm_definitions.patch.assert_not_called()
| apache-2.0 | -7,564,279,838,991,846,000 | 33.806061 | 75 | 0.587672 | false |
dahebolangkuan/ToughRADIUS | radiusd/plugins/acct_stop_process.py | 1 | 3010 | #!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from pyrad import packet
from store import store
from settings import *
import logging
import decimal
import datetime
import utils
decimal.getcontext().prec = 11
decimal.getcontext().rounding = decimal.ROUND_UP
def process(req=None,user=None,runstat=None):
if not req.get_acct_status_type() == STATUS_TYPE_STOP:
return
runstat.acct_stop += 1
ticket = req.get_ticket()
if not ticket.nas_addr:
ticket.nas_addr = req.source[0]
_datetime = datetime.datetime.now()
online = store.get_online(ticket.nas_addr,ticket.acct_session_id)
if not online:
session_time = ticket.acct_session_time
stop_time = _datetime.strftime( "%Y-%m-%d %H:%M:%S")
start_time = (_datetime - datetime.timedelta(seconds=int(session_time))).strftime( "%Y-%m-%d %H:%M:%S")
ticket.acct_start_time = start_time
ticket.acct_stop_time = stop_time
ticket.start_source= STATUS_TYPE_STOP
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
else:
store.del_online(ticket.nas_addr,ticket.acct_session_id)
ticket.acct_start_time = online['acct_start_time']
ticket.acct_stop_time= _datetime.strftime( "%Y-%m-%d %H:%M:%S")
ticket.start_source = online['start_source']
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
if not user:return
product = store.get_product(user['product_id'])
if product and product['product_policy'] == FEE_TIMES:
# PrePay fee times policy
user_balance = store.get_user_balance(user['account_number'])
sessiontime = decimal.Decimal(req.get_acct_sessiontime())
billing_times = decimal.Decimal(online['billing_times'])
acct_length = sessiontime-billing_times
fee_price = decimal.Decimal(product['fee_price'])
usedfee = acct_length/decimal.Decimal(3600) * fee_price
usedfee = actual_fee = int(usedfee.to_integral_value())
balance = user_balance - usedfee
if balance < 0 :
balance = 0
actual_fee = user_balance
store.update_billing(utils.Storage(
account_number = online['account_number'],
nas_addr = online['nas_addr'],
acct_session_id = online['acct_session_id'],
acct_start_time = online['acct_start_time'],
acct_session_time = req.get_acct_sessiontime(),
acct_length = int(acct_length.to_integral_value()),
acct_fee = usedfee,
actual_fee = actual_fee,
balance = balance,
is_deduct = 1,
create_time = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S")
),False)
log.msg('%s Accounting stop request, remove online'%req.get_user_name(),level=logging.INFO)
| bsd-2-clause | -922,994,961,154,856,700 | 36.17284 | 111 | 0.599336 | false |
alvason/infectious-pulse | code/sir_array_cross_immunity.py | 1 | 10364 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Infectious Pulse
# https://github.com/alvason/infectious-pulse/
#
# ### Many-strain SIR evolution --- its equilibrium state and infectious pulse due to mutation and cross-immunity
# <codecell>
'''
author: Alvason Zhenhua Li
date: 03/23/2015
'''
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import alva_machinery as alva
AlvaFontSize = 23
AlvaFigSize = (9, 7)
numberingFig = 0
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 6))
plt.axis('off')
plt.title(r'$ Many-strain \ SIR \ equations \ (mutation \ and \ cross-immunity) $',fontsize = AlvaFontSize)
plt.text(0, 4.0/6,r'$ \frac{\partial S_n(t)}{\partial t} = \
-\beta S_n(t)\sum_{\eta = n_{min}}^{n_{max}} (1 - \frac{|n - \eta|}{r + |n - \eta|})I_{\eta}(t) + \mu N - \mu S_n(t)$'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 2.0/6, r'$ \frac{\partial I_n(t)}{\partial t} = \
+\beta S_n(t)I_n(t) - \gamma I_n(t) - \mu I_n(t) \
+ m \frac{I_{n - 1}(t) - 2I_n(t) + I_{n + 1}(t)}{(\Delta n)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 0.0/6,r'$ \frac{\partial R_n(t)}{\partial t} = \
+\gamma I_n(t) - \mu R_n(t) - \beta S_n(t)I_n(t)\
+ \beta S_n(t)\sum_{\eta = n_{min}}^{n_{max}} (1 - \frac{|n - \eta|}{r + |n - \eta|})I_{\eta}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.show()
# define many-strain S-I-R equation
def dSdt_array(SIRxt = [], *args):
# naming
S = SIRxt[0]
I = SIRxt[1]
R = SIRxt[2]
x_totalPoint = SIRxt.shape[1]
# there are n dSdt
dS_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
for xn in range(x_totalPoint):
dS_dt_array[xn] = -infecRate*S[xn]*crossInfect(cross_radius, x_totalPoint, I, xn) + inOutRate*totalSIR - inOutRate*S[xn]
return(dS_dt_array)
def dIdt_array(SIRxt = [], *args):
# naming
S = SIRxt[0]
I = SIRxt[1]
R = SIRxt[2]
x_totalPoint = SIRxt.shape[1]
# there are n dIdt
dI_dt_array = np.zeros(x_totalPoint)
# each dIdt with the same equation form
Icopy = np.copy(I)
centerX = Icopy[:]
leftX = np.roll(Icopy[:], 1)
rightX = np.roll(Icopy[:], -1)
leftX[0] =centerX[0]
rightX[-1] = centerX[-1]
for xn in range(x_totalPoint):
dI_dt_array[xn] = +infecRate*S[xn]*I[xn] - recovRate*I[xn] - inOutRate*I[xn] + mutatRate*(leftX[xn]
- 2*centerX[xn]
+ rightX[xn])/(dx**2)
return(dI_dt_array)
def dRdt_array(SIRxt = [], *args):
# naming
S = SIRxt[0]
I = SIRxt[1]
R = SIRxt[2]
x_totalPoint = SIRxt.shape[1]
# there are n dRdt
dR_dt_array = np.zeros(x_totalPoint)
# each dIdt with the same equation form
for xn in range(x_totalPoint):
dR_dt_array[xn] = +recovRate*I[xn] - inOutRate*R[xn] + \
(-infecRate*S[xn]*I[xn] + infecRate*S[xn]*crossInfect(cross_radius, x_totalPoint, I, xn))
return(dR_dt_array)
def monodA(r, i):
outM = np.absolute(i)/(r + np.absolute(i))
return (outM)
def crossInfect(cross_radius, cross_range, infect, current_i):
invertM = np.zeros(cross_range)
cross = 0.0
for neighbor in range(cross_range):
invertM[neighbor] = 1 - monodA(cross_radius, dx*(current_i - neighbor))
cross = cross + invertM[neighbor]*infect[neighbor]
# print (neighbor, invertM[neighbor], cross) # for checking purpose
# plt.plot(gridX, invertM, marker = 'o') # for checking purpose
if cross_radius < 0.1: cross = infect[current_i]
return (cross)
# <codecell>
# setting parameter
timeUnit = 'year'
if timeUnit == 'day':
day = 1
year = 365
elif timeUnit == 'year':
year = 1
day = float(1)/365
totalSIR = float(1) # total population
reprodNum = 1.8 # basic reproductive number R0: one infected person will transmit to 1.8 person
recovRate = float(1)/(4*day) # 4 days per period ==> rate/year = 365/4
inOutRate = float(1)/(30*year) # birth rate per year
infecRate = reprodNum*(recovRate + inOutRate)/totalSIR # per year, per person, per total-population
mutatRate = float(1)/(10**17) # mutation rate
cross_radius = float(5) # radius of cross-immunity (the distance of half-of-value in the Monod equation)
# time boundary and griding condition
minT = float(0)*year
maxT = float(40)*year
totalGPoint_T = int(1*10**3 + 1)
spacingT = np.linspace(minT, maxT, num = totalGPoint_T, retstep = True)
gridT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(40)
totalGPoint_X = int(maxX + 1)
gridingX = np.linspace(minX, maxX, num = totalGPoint_X, retstep = True)
gridX = gridingX[0]
dx = gridingX[1]
gridS_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridI_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridR_array = np.zeros([totalGPoint_X, totalGPoint_T])
# initial output condition (only one virus in equilibrium condition)
# for fast switching from one-virus equilibrium to many-virus equilibrium, invert-Monod distribution of S and R are applied
gridI_array[0, 0] = inOutRate*totalSIR*(reprodNum - 1)/infecRate # only one virus exists
gridR_array[:, 0] = recovRate*totalSIR*(reprodNum - 1)/infecRate * (1 - monodA(cross_radius, gridX))
gridS_array[:, 0] = totalSIR - gridI_array[:, 0] - gridR_array[:, 0]
# Runge Kutta numerical solution
pde_array = np.array([dSdt_array, dIdt_array, dRdt_array])
startingOut_Value = np.array([gridS_array, gridI_array, gridR_array])
gridOut_array = alva.AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX, maxX, totalGPoint_X, minT, maxT, totalGPoint_T)
# plotting
gridS = gridOut_array[0]
gridI = gridOut_array[1]
gridR = gridOut_array[2]
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.contourf(gridT, gridX, gridI, levels = np.arange(0, gridI_array[0, 0]*4, gridI_array[0, 0]/100))
plt.title(r'$ Infectious \ pulse \ by \ mutation \ and \ cross-immunity $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ discrete \ space \ (strain) $', fontsize = AlvaFontSize);
plt.colorbar()
plt.text(maxT*4.0/3, maxX*5.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*4.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*3.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*2.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*1.0/6, r'$ m = %f $'%(mutatRate*10**14), fontsize = AlvaFontSize)
plt.text(maxT*4.0/3, maxX*0.0/6, r'$ r = %f $'%(cross_radius), fontsize = AlvaFontSize)
plt.show()
# <codecell>
# plot by listing each strain
numberingFig = numberingFig + 1;
for i in range(0, totalGPoint_X, int(totalGPoint_X/10)):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridS[i], label = r'$ S_{%i}(t) $'%(i))
plt.plot(gridT, gridR[i], label = r'$ R_{%i}(t) $'%(i))
plt.plot(gridT, gridI[i], label = r'$ I_{%i}(t) $'%(i))
plt.plot(gridT, infecRate*gridS[i].T*gridI[i].T*day, label = r'$ \beta \ S_{%i}(t)I_{%i}(t) $'%(i, i)
, linestyle = 'dashed', color = 'red')
plt.plot(gridT, (gridS[i] + gridI[i] + gridR[i]).T, label = r'$ S_{%i}(t)+I_{%i}(t)+R_{%i}(t) $'%(i, i, i)
, color = 'black')
plt.grid(True)
plt.title(r'$ Prevalence \ and \ incidence \ of \ SIR $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ Proportion \ of \ population $', fontsize = AlvaFontSize);
plt.text(maxT, totalSIR*7.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*6.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*5.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*4.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*3.0/6, r'$ m = %f $'%(mutatRate), fontsize = AlvaFontSize)
plt.legend(loc = (1,0))
plt.show()
# <codecell>
# 3D plotting
# define GridXX function for making 2D-grid from 1D-grid
def AlvaGridXX(gridX, totalGPoint_Y):
gridXX = gridX;
for n in range(totalGPoint_Y - 1):
gridXX = np.vstack((gridXX, gridX))
return gridXX
# for 3D plotting
X = AlvaGridXX(gridT, totalGPoint_X)
Y = AlvaGridXX(gridX, totalGPoint_T).T
Z = gridI
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize=(16, 7))
figure1 = figure.add_subplot(1,2,1, projection='3d')
figure1.view_init(30, -80)
figure1.plot_wireframe(X, Y, Z, cstride = totalGPoint_T, rstride = int(dx))
plt.xlabel(r'$t \ (time)$', fontsize = AlvaFontSize)
plt.ylabel(r'$x \ (virus \ space)$', fontsize = AlvaFontSize)
figure2 = figure.add_subplot(1,2,2, projection='3d')
figure2.view_init(30, 10)
figure2.plot_wireframe(X, Y, Z, cstride = totalGPoint_T/20, rstride = int(maxX))
plt.xlabel(r'$t \ (time)$', fontsize = AlvaFontSize)
plt.ylabel(r'$x \ (virus \ space)$', fontsize = AlvaFontSize)
figure.tight_layout()
plt.show()
# <codecell>
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridS.T)
plt.plot(gridT, gridR.T)
plt.plot(gridT, gridI.T)
plt.plot(gridT, (gridS + gridI + gridR).T, label = r'$ S(t)+I(t)+R(t) $', color = 'black')
plt.title(r'$ Many-strain \ SIR $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Proportion \ of \ population $', fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*6.0/6, r'$ R_0 = %f $'%(reprodNum), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*5.0/6, r'$ \gamma = %f $'%(recovRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*4.0/6, r'$ \beta = %f $'%(infecRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*3.0/6, r'$ \mu = %f $'%(inOutRate), fontsize = AlvaFontSize)
plt.text(maxT, totalSIR*2.0/6, r'$ m = %f $'%(mutatRate), fontsize = AlvaFontSize)
plt.show()
# <codecell>
| gpl-2.0 | 7,623,518,905,974,450,000 | 39.484375 | 231 | 0.623794 | false |
e7dal/hexy | hexy/commands/cmd_point.py | 1 | 1476 | # -*- coding: utf-8 -*-
# Part of hexy. See LICENSE file for full copyright and licensing details.
import click
from ..cli import pass_hexy
from .. import Hexy
@click.command('point',
short_help='Put a single point on a grid and show grid in hexy tool')
@click.option('--xsize',
'-x',
type=int,
default=10,
help='set the x size (horizontal) for the grid')
@click.option('--ysize',
'-y',
type=int,
default=10,
help='set the y size (vertical) for the grid')
@click.option('--xpos',
'-i',
type=int,
default=3,
help='set the x position for the point')
@click.option('--ypos',
'-j',
type=int,
default=3,
help='set the y posistin for the point')
@click.option('--char',
'-c',
type=str,
default='x',
help='the character to put in the given point i,j')
@pass_hexy
def cli(ctx, xsize,ysize,xpos,ypos,char):
"""Show example for doing some task in hexy(experimental)"""
ctx.say('grid', stuff=(xsize,ysize),verbosity=100)
ctx.say('point',stuff=(xpos,ypos,char),verbosity=100)
if len(char)>1:
ctx.mumble('point, the character is longer than one, using first char',verbosity=100)
char=char[0]
g=Hexy(x=xsize,y=ysize)
g.point(xpos=xpos,ypos=ypos,char=char)
click.echo(g.show())
| gpl-3.0 | -5,836,573,175,946,536,000 | 29.75 | 87 | 0.553523 | false |
unstko/adventofcode2016 | 01/solution.py | 1 | 3446 | from lib import solution
from lib.point import Point2D
from lib.map import Map
import copy
class Solution(solution.Solution):
def __init__(self, nr):
super().__init__(nr)
self.instructions = []
self.directions = ['N', 'E', 'S', 'W']
self.face = 0
self.source = Point2D(0, 0)
self.destination = Point2D(0, 0)
self.distance = 0
self.map = Map('RGB', (350, 350), 0, 'center')
def calculate(self, test=False):
self.test = test
self.map_init()
self.read_instructions()
self.calc_destination()
self.calc_distance()
self.map_result()
def map_init(self):
self.map.set_point(self.source, (0, 255, 0))
def map_result(self):
if not self.test:
self.map.show()
self.map.print_min_and_max()
def read_instructions(self):
self.read_input()
self.instructions = self.input.split(', ')
def calc_destination(self):
for instruction in self.instructions:
self.calc_face(instruction)
self.move_destination(instruction)
self.set_and_check_path()
def calc_face(self, instruction):
turn = instruction[0]
move = 1
if turn == 'L':
move = -1
self.face = (self.face + move) % len(self.directions)
def move_destination(self, instruction):
blocks = int(instruction[1:])
direction = self.get_direction()
self.source = copy.copy(self.destination)
if direction == 'N':
self.destination.move(0, blocks)
elif direction == 'S':
self.destination.move(0, -1 * blocks)
elif direction == 'E':
self.destination.move(blocks, 0)
elif direction == 'W':
self.destination.move(-1 * blocks, 0)
def get_direction(self):
return self.directions[self.face]
def calc_distance(self):
self.distance = self.destination.manhattan_distance(Point2D(0, 0))
self.set_solution(1, self.distance)
def set_and_check_path(self):
if not self.is_calculated(2):
x_src = self.source.get_x()
y_src = self.source.get_y()
x_dst = self.destination.get_x()
y_dst = self.destination.get_y()
direction = self.get_direction()
step = 1
if direction == 'S' or direction == 'W':
step = -1
range_x = range(x_src, x_dst+step, step)
range_y = range(y_src, y_dst+step, step)
for x in range_x:
if x == x_src:
continue
point = Point2D(x, y_dst)
check = self.set_and_check_point(point)
if check:
return
for y in range_y:
if y == y_src:
continue
point = Point2D(x_dst, y)
check = self.set_and_check_point(point)
if check:
return
def set_and_check_point(self, point: Point2D):
check = False
if self.map.get_point(point) == (255, 255, 255):
self.map.set_point(point, (255, 0, 0))
distance = point.manhattan_distance(Point2D(0, 0))
self.set_solution(2, distance)
check = True
else:
self.map.set_point(point, (255, 255, 255))
return check
| mit | -315,879,149,843,139,140 | 31.205607 | 74 | 0.525537 | false |
wadobo/socializa | backend/socializa/settings.py | 1 | 9337 | """
Django settings for socializa project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import logging
# Removed log oauth2 when execute test. If you want to activate debug, change logging.ERROR by
# logging.DEBUG
log = logging.getLogger('oauthlib')
log.setLevel(logging.ERROR)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gh)^9&mtcp($nlm-zvlnb(lpe+b8kgbk(l30@u%xdpk@w5@n%j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEV = False
ALLOWED_HOSTS = []
ADMINS = (
('wadobo', '[email protected]'),
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework.authtoken',
'oauth2_provider',
'social_django',
'rest_framework_social_oauth2',
'rest_framework_swagger',
'django_nose',
'frontend',
'player',
'event',
'game',
'clue',
'store',
'editor',
'landing',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'socializa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'socializa.wsgi.application'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
#'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
#],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework_social_oauth2.authentication.SocialAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'socializa',
'USER': 'socializa',
'PASSWORD': 'socializa',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
('en', _('English')),
('es', _('Spanish')),
]
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"), )
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Dev active
if DEV:
MIDDLEWARE += ('silk.middleware.SilkyMiddleware',)
INSTALLED_APPS += ('silk', 'django_extensions')
SILKY_PYTHON_PROFILER = True
SILKY_META = True
SILKY_DYNAMIC_PROFILING = [
{'module': 'player.views', 'function': 'PlayersNear.get', 'name': 'near players'},
{'module': 'player.views', 'function': 'MeetingCreate.post', 'name': 'meeting players'}
]
GRAPH_MODELS = {
'all_applications': False,
'group_models': True,
}
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
BASE_URL = 'https://socializa.wadobo.com'
DEFAULT_FROM_EMAIL = '[email protected]'
# SOCIAL AUTHENTICATION
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'rest_framework_social_oauth2.backends.DjangoOAuth2',
'django.contrib.auth.backends.ModelBackend'
)
SOCIAL_AUTH_PIPELINE = (
# Get the information we can about the user and return it in a simple
# format to create the user instance later. On some cases the details are
# already part of the auth response from the provider, but sometimes this
# could hit a provider API.
'social.pipeline.social_auth.social_details',
# Get the social uid from whichever service we're authing thru. The uid is
# the unique identifier of the given user in the provider.
'social.pipeline.social_auth.social_uid',
# Verifies that the current auth process is valid within the current
# project, this is where emails and domains whitelists are applied (if
# defined).
'social.pipeline.social_auth.auth_allowed',
# Checks if the current social-account is already associated in the site.
'social.pipeline.social_auth.social_user',
# Make up a username for this person, appends a random string at the end if
# there's any collision.
'social.pipeline.user.get_username',
# Send a validation email to the user to verify its email address.
# Disabled by default.
'social.pipeline.mail.mail_validation',
# Associates the current social details with another user account with
# a similar email address. Disabled by default.
'social.pipeline.social_auth.associate_by_email',
# Create a user account if we haven't found one yet.
'social.pipeline.user.create_user',
# Custom function
'player.utils.create_player',
# Create the record that associates the social account with the user.
'social.pipeline.social_auth.associate_user',
# Populate the extra_data field in the social record with the values
# specified by settings (and the default ones like access_token, etc).
'social.pipeline.social_auth.load_extra_data',
# Update the user record with any changed info from the auth service.
'social.pipeline.user.user_details',
)
PROPRIETARY_BACKEND_NAME = 'Django'
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
OAUTH2_PROVIDER = {
'ACCESS_TOKEN_EXPIRE_SECONDS': 24 * 60 * 60 * 365, # a whole year
}
# DEBUG SOCIAL_AUTH
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# GOOGLE
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = 'update me'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'update me'
GOOGLE_APIKEY = 'update me'
# FACEBOOK
SOCIAL_AUTH_FACEBOOK_KEY = 'update me'
SOCIAL_AUTH_FACEBOOK_SECRET = 'update me'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email',
}
SWAGGER_SETTINGS = {
'DOC_EXPANSION': 'list',
'APIS_SORTER': 'alpha',
'JSON_EDITOR': True,
'OPERATIONS_SORTER': 'alpha',
'USE_SESSION_AUTH': False,
'SHOW_REQUEST_HEADERS': True,
'SECURITY_DEFINITIONS': {
'token': {
'type': 'apiKey',
'name': 'authorization',
'description': 'The valid api_key should be: "Token xxxxxxxxxxx"',
'in': 'header'
},
},
}
# For celery
CELERY_BROKER_URL = 'amqp://localhost'
CELERY_RESULT_BACKEND = 'amqp://localhost'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
DEFAULT_VISION_DISTANCE = 1000 # m
DEFAULT_MEETING_DISTANCE = 10 # m
QR_LENGTH = 16
DEFAULT_PAGINATION = 20
try:
from local_settings import *
except:
print("NO LOCAL SETTINGS")
| agpl-3.0 | 583,695,365,934,439,000 | 28.269592 | 95 | 0.678912 | false |
mfussenegger/python-prompt-toolkit | prompt_toolkit/key_bindings/vi.py | 1 | 35929 | from __future__ import unicode_literals
from ..enums import IncrementalSearchDirection, InputMode
from ..keys import Keys
from ..line import ClipboardData, ClipboardDataType, SelectionType, indent, unindent
from ..selection import SelectionType
from .basic import basic_bindings
from .utils import create_handle_decorator
import codecs
__all__ = (
'vi_bindings',
)
class CursorRegion(object):
"""
Return struct for functions wrapped in ``change_delete_move_yank_handler``.
"""
def __init__(self, start, end=0):
self.start = start
self.end = end
def sorted(self):
"""
Return a (start, end) tuple where start <= end.
"""
if self.start < self.end:
return self.start, self.end
else:
return self.end, self.start
def vi_bindings(registry, cli_ref):
"""
Vi extensions.
# Overview of Readline Vi commands:
# http://www.catonmat.net/download/bash-vi-editing-mode-cheat-sheet.pdf
"""
basic_bindings(registry, cli_ref)
line = cli_ref().line
search_line = cli_ref().lines['search']
handle = create_handle_decorator(registry, line)
_last_character_find = [None] # (char, backwards) tuple
_search_direction = [IncrementalSearchDirection.FORWARD]
vi_transform_functions = [
# Rot 13 transformation
(('g', '?'), lambda string: codecs.encode(string, 'rot_13')),
# To lowercase
(('g', 'u'), lambda string: string.lower()),
# To uppercase.
(('g', 'U'), lambda string: string.upper()),
# Swap case.
# (XXX: If we would implement 'tildeop', the 'g' prefix is not required.)
(('g', '~'), lambda string: string.swapcase()),
]
@registry.add_after_handler_callback
def check_cursor_position(event):
"""
After every command, make sure that if we are in navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
if (
event.input_processor.input_mode == InputMode.VI_NAVIGATION and
line.document.is_cursor_at_the_end_of_line and
len(line.document.current_line) > 0):
line.cursor_position -= 1
@handle(Keys.Escape)
def _(event):
"""
Escape goes to vi navigation mode.
"""
if event.input_processor.input_mode in (InputMode.INSERT,
InputMode.VI_REPLACE):
line.cursor_position += line.document.get_cursor_left_position()
if event.input_processor.input_mode == InputMode.SELECTION:
line.exit_selection()
event.input_processor.pop_input_mode()
else:
event.input_processor.input_mode = InputMode.VI_NAVIGATION
@handle(Keys.Up, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Arrow up in navigation mode.
"""
line.auto_up(count=event.arg)
@handle(Keys.Down, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Arrow down in navigation mode.
"""
line.auto_down(count=event.arg)
@handle(Keys.Backspace, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
In navigation-mode, move cursor.
"""
line.cursor_position += line.document.get_cursor_left_position(count=event.arg)
@handle(Keys.ControlV, Keys.Any, in_mode=InputMode.INSERT)
def _(event):
"""
Insert a character literally (quoted insert).
"""
line.insert_text(event.data, overwrite=False)
@handle(Keys.ControlN, in_mode=InputMode.INSERT)
def _(event):
line.complete_next()
@handle(Keys.ControlN, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Control-N: Next completion.
"""
line.auto_down()
@handle(Keys.ControlP, in_mode=InputMode.INSERT)
def _(event):
"""
Control-P: To previous completion.
"""
line.complete_previous()
@handle(Keys.ControlY, in_mode=InputMode.INSERT)
def _(event):
"""
Accept current completion.
"""
line.complete_state = None
@handle(Keys.ControlE, in_mode=InputMode.INSERT)
def _(event):
"""
Cancel completion. Go back to originally typed text.
"""
line.cancel_completion()
@handle(Keys.ControlP, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
CtrlP in navigation mode goes up.
"""
line.auto_up()
@handle(Keys.ControlJ, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlM, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
In navigation mode, pressing enter will always return the input.
"""
if line.validate():
line.add_to_history()
cli_ref().set_return_value(line.document)
# ** In navigation mode **
# List of navigation commands: http://hea-www.harvard.edu/~fine/Tech/vi.html
@handle('a', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.cursor_position += line.document.get_cursor_right_position()
event.input_processor.input_mode = InputMode.INSERT
@handle('A', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.cursor_position += line.document.get_end_of_line_position()
event.input_processor.input_mode = InputMode.INSERT
@handle('C', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
# Change to end of line.
# Same as 'c$' (which is implemented elsewhere.)
"""
deleted = line.delete(count=line.document.get_end_of_line_position())
if deleted:
data = ClipboardData(deleted)
line.set_clipboard(data)
event.input_processor.input_mode = InputMode.INSERT
@handle('c', 'c', in_mode=InputMode.VI_NAVIGATION)
@handle('S', in_mode=InputMode.VI_NAVIGATION)
def _(event): # TODO: implement 'arg'
"""
Change current line
"""
# We copy the whole line.
data = ClipboardData(line.document.current_line, ClipboardDataType.LINES)
line.set_clipboard(data)
# But we delete after the whitespace
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
line.delete(count=line.document.get_end_of_line_position())
event.input_processor.input_mode = InputMode.INSERT
@handle('D', in_mode=InputMode.VI_NAVIGATION)
def _(event):
deleted = line.delete(count=line.document.get_end_of_line_position())
line.set_clipboard(ClipboardData(deleted))
@handle('d', 'd', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Delete line. (Or the following 'n' lines.)
"""
# Split string in before/deleted/after text.
lines = line.document.lines
before = '\n'.join(lines[:line.document.cursor_position_row])
deleted = '\n'.join(lines[line.document.cursor_position_row: line.document.cursor_position_row + event.arg])
after = '\n'.join(lines[line.document.cursor_position_row + event.arg:])
# Set new text.
if before and after:
before = before + '\n'
line.text = before + after
# Set cursor position. (At the start of the first 'after' line, after the leading whitespace.)
line.cursor_position = len(before) + len(after) - len(after.lstrip(' '))
# Set clipboard data
line.set_clipboard(ClipboardData(deleted, ClipboardDataType.LINES))
@handle('G', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
If an argument is given, move to this line in the history. (for
example, 15G) Otherwise, go the the last line of the current string.
"""
# If an arg has been given explicitely.
if event._arg:
line.go_to_history(event.arg - 1)
# Otherwise this goes to the last line of the file.
else:
line.cursor_position = len(line.text)
@handle('i', in_mode=InputMode.VI_NAVIGATION)
def _(event):
event.input_processor.input_mode = InputMode.INSERT
@handle('I', in_mode=InputMode.VI_NAVIGATION)
def _(event):
event.input_processor.input_mode = InputMode.INSERT
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('J', in_mode=InputMode.VI_NAVIGATION)
def _(event):
for i in range(event.arg):
line.join_next_line()
@handle('n', in_mode=InputMode.VI_NAVIGATION)
def _(event): # XXX: use `change_delete_move_yank_handler` and implement 'arg'
"""
Search next.
"""
line.incremental_search(_search_direction[0])
@handle('N', in_mode=InputMode.VI_NAVIGATION)
def _(event): # TODO: use `change_delete_move_yank_handler` and implement 'arg'
"""
Search previous.
"""
if _search_direction[0] == IncrementalSearchDirection.BACKWARD:
line.incremental_search(IncrementalSearchDirection.FORWARD)
else:
line.incremental_search(IncrementalSearchDirection.BACKWARD)
@handle('p', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Paste after
"""
for i in range(event.arg):
line.paste_from_clipboard()
@handle('P', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Paste before
"""
for i in range(event.arg):
line.paste_from_clipboard(before=True)
@handle('r', Keys.Any, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Replace single character under cursor
"""
line.insert_text(event.data * event.arg, overwrite=True)
line.cursor_position -= 1
@handle('R', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to 'replace'-mode.
"""
event.input_processor.input_mode = InputMode.VI_REPLACE
@handle('s', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Substitute with new text
(Delete character(s) and go to insert mode.)
"""
data = ClipboardData(''.join(line.delete() for i in range(event.arg)))
line.set_clipboard(data)
event.input_processor.input_mode = InputMode.INSERT
@handle('u', in_mode=InputMode.VI_NAVIGATION)
def _(event):
for i in range(event.arg):
line.undo()
@handle('v', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.open_in_editor()
# @handle('v', in_mode=InputMode.VI_NAVIGATION)
# def _(event):
# """
# Start characters selection.
# """
# line.start_selection(selection_type=SelectionType.CHARACTERS)
# event.input_processor.push_input_mode(InputMode.SELECTION)
@handle('V', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Start lines selection.
"""
line.start_selection(selection_type=SelectionType.LINES)
event.input_processor.push_input_mode(InputMode.SELECTION)
@handle('a', 'w', in_mode=InputMode.SELECTION)
@handle('a', 'W', in_mode=InputMode.SELECTION)
def _(event):
"""
Switch from visual linewise mode to visual characterwise mode.
"""
if line.selection_state and line.selection_state.type == SelectionType.LINES:
line.selection_state.type = SelectionType.CHARACTERS
@handle('x', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Delete character.
"""
data = ClipboardData(line.delete(count=event.arg))
line.set_clipboard(data)
@handle('x', in_mode=InputMode.SELECTION)
@handle('d', 'd', in_mode=InputMode.SELECTION)
def _(event):
"""
Cut selection.
"""
selection_type = line.selection_state.type
deleted = line.cut_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
@handle('c', in_mode=InputMode.SELECTION)
def _(event):
"""
Change selection (cut and go to insert mode).
"""
selection_type = line.selection_state.type
deleted = line.cut_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
event.input_processor.input_mode = InputMode.INSERT
@handle('y', in_mode=InputMode.SELECTION)
def _(event):
"""
Copy selection.
"""
selection_type = line.selection_state.type
deleted = line.copy_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
@handle('X', in_mode=InputMode.VI_NAVIGATION)
def _(event):
data = line.delete_before_cursor()
line.set_clipboard(data)
@handle('y', 'y', in_mode=InputMode.VI_NAVIGATION)
@handle('Y', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Yank the whole line.
"""
text = '\n'.join(line.document.lines_from_current[:event.arg])
data = ClipboardData(text, ClipboardDataType.LINES)
line.set_clipboard(data)
@handle('+', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Move to first non whitespace of next line
"""
line.cursor_position += line.document.get_cursor_down_position(count=event.arg)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('-', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Move to first non whitespace of previous line
"""
line.cursor_position += line.document.get_cursor_up_position(count=event.arg)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('>', '>', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Indent lines.
"""
current_row = line.document.cursor_position_row
indent(line, current_row, current_row + event.arg)
@handle('<', '<', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Unindent lines.
"""
current_row = line.document.cursor_position_row
unindent(line, current_row, current_row + event.arg)
@handle('>', in_mode=InputMode.SELECTION)
def _(event):
"""
Indent selection
"""
selection_type = line.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = line.document.selection_range()
from_, _ = line.document.translate_index_to_position(from_)
to, _ = line.document.translate_index_to_position(to)
indent(line, from_ - 1, to, count=event.arg) # XXX: why does translate_index_to_position return 1-based indexing???
event.input_processor.pop_input_mode()
@handle('<', in_mode=InputMode.SELECTION)
def _(event):
"""
Unindent selection
"""
selection_type = line.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = line.document.selection_range()
from_, _ = line.document.translate_index_to_position(from_)
to, _ = line.document.translate_index_to_position(to)
unindent(line, from_ - 1, to, count=event.arg)
event.input_processor.pop_input_mode()
@handle('O', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Open line above and enter insertion mode
"""
line.insert_line_above()
event.input_processor.input_mode = InputMode.INSERT
@handle('o', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Open line below and enter insertion mode
"""
line.insert_line_below()
event.input_processor.input_mode = InputMode.INSERT
@handle('~', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Reverse case of current character and move cursor forward.
"""
c = line.document.current_char
if c is not None and c != '\n':
c = (c.upper() if c.islower() else c.lower())
line.insert_text(c, overwrite=True)
@handle('/', in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlS, in_mode=InputMode.INSERT)
@handle(Keys.ControlS, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlS, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Vi-style forward search.
"""
_search_direction[0] = direction = IncrementalSearchDirection.FORWARD
line.incremental_search(direction)
if event.input_processor.input_mode != InputMode.VI_SEARCH:
event.input_processor.push_input_mode(InputMode.VI_SEARCH)
@handle('?', in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlR, in_mode=InputMode.INSERT)
@handle(Keys.ControlR, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlR, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Vi-style backward search.
"""
_search_direction[0] = direction = IncrementalSearchDirection.BACKWARD
line.incremental_search(direction)
if event.input_processor.input_mode != InputMode.VI_SEARCH:
event.input_processor.push_input_mode(InputMode.VI_SEARCH)
@handle('#', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to previous occurence of this word.
"""
pass
@handle('*', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to next occurence of this word.
"""
pass
@handle('(', in_mode=InputMode.VI_NAVIGATION)
def _(event):
# TODO: go to begin of sentence.
pass
@handle(')', in_mode=InputMode.VI_NAVIGATION)
def _(event):
# TODO: go to end of sentence.
pass
def change_delete_move_yank_handler(*keys, **kw):
"""
Register a change/delete/move/yank handlers. e.g. 'dw'/'cw'/'w'/'yw'
The decorated function should return a ``CursorRegion``.
This decorator will create both the 'change', 'delete' and move variants,
based on that ``CursorRegion``.
"""
no_move_handler = kw.pop('no_move_handler', False)
# TODO: Also do '>' and '<' indent/unindent operators.
# TODO: Also "gq": text formatting
# See: :help motion.txt
def decorator(func):
if not no_move_handler:
@handle(*keys, in_mode=InputMode.VI_NAVIGATION)
@handle(*keys, in_mode=InputMode.SELECTION)
def move(event):
""" Create move handler. """
region = func(event)
line.cursor_position += region.start
def create_transform_handler(transform_func, *a):
@handle(*(a + keys), in_mode=InputMode.VI_NAVIGATION)
def _(event):
""" Apply transformation (uppercase, lowercase, rot13, swap case). """
region = func(event)
start, end = region.sorted()
# Transform.
line.transform_region(
line.cursor_position + start,
line.cursor_position + end,
transform_func)
# Move cursor
line.cursor_position += (region.end or region.start)
for k, f in vi_transform_functions:
create_transform_handler(f, *k)
@handle('y', *keys, in_mode=InputMode.VI_NAVIGATION)
def yank_handler(event):
""" Create yank handler. """
region = func(event)
start, end = region.sorted()
substring = line.text[line.cursor_position + start: line.cursor_position + end]
if substring:
line.set_clipboard(ClipboardData(substring))
def create(delete_only):
""" Create delete and change handlers. """
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
def _(event):
region = func(event)
deleted = ''
if region:
start, end = region.sorted()
# Move to the start of the region.
line.cursor_position += start
# Delete until end of region.
deleted = line.delete(count=end-start)
# Set deleted/changed text to clipboard.
if deleted:
line.set_clipboard(ClipboardData(''.join(deleted)))
# Only go back to insert mode in case of 'change'.
if not delete_only:
event.input_processor.input_mode = InputMode.INSERT
create(True)
create(False)
return func
return decorator
@change_delete_move_yank_handler('b')
def _(event):
""" Move one word or token left. """
return CursorRegion(line.document.find_start_of_previous_word(count=event.arg) or 0)
@change_delete_move_yank_handler('B')
def _(event):
""" Move one non-blank word left """
return CursorRegion(line.document.find_start_of_previous_word(count=event.arg, WORD=True) or 0)
@change_delete_move_yank_handler('$')
def key_dollar(event):
""" 'c$', 'd$' and '$': Delete/change/move until end of line. """
return CursorRegion(line.document.get_end_of_line_position())
@change_delete_move_yank_handler('w')
def _(event):
""" 'word' forward. 'cw', 'dw', 'w': Delete/change/move one word. """
return CursorRegion(line.document.find_next_word_beginning(count=event.arg) or
line.document.end_position)
@change_delete_move_yank_handler('W')
def _(event):
""" 'WORD' forward. 'cW', 'dW', 'W': Delete/change/move one WORD. """
return CursorRegion(line.document.find_next_word_beginning(count=event.arg, WORD=True) or
line.document.end_position)
@change_delete_move_yank_handler('e')
def _(event):
""" End of 'word': 'ce', 'de', 'e' """
end = line.document.find_next_word_ending(count=event.arg)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('E')
def _(event):
""" End of 'WORD': 'cE', 'dE', 'E' """
end = line.document.find_next_word_ending(count=event.arg, WORD=True)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('i', 'w', no_move_handler=True)
def _(event):
""" Inner 'word': ciw and diw """
start, end = line.document.find_boundaries_of_current_word()
return CursorRegion(start, end)
@change_delete_move_yank_handler('a', 'w', no_move_handler=True)
def _(event):
""" A 'word': caw and daw """
start, end = line.document.find_boundaries_of_current_word(include_trailing_whitespace=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('i', 'W', no_move_handler=True)
def _(event):
""" Inner 'WORD': ciW and diW """
start, end = line.document.find_boundaries_of_current_word(WORD=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('a', 'W', no_move_handler=True)
def _(event):
""" A 'WORD': caw and daw """
start, end = line.document.find_boundaries_of_current_word(WORD=True, include_trailing_whitespace=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('^')
def key_circumflex(event):
""" 'c^', 'd^' and '^': Soft start of line, after whitespace. """
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=True))
@change_delete_move_yank_handler('0', no_move_handler=True)
def key_zero(event):
"""
'c0', 'd0': Hard start of line, before whitespace.
(The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.)
"""
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=False))
def create_ci_ca_handles(ci_start, ci_end, inner):
# TODO: 'dab', 'dib', (brackets or block) 'daB', 'diB', Braces.
# TODO: 'dat', 'dit', (tags (like xml)
"""
Delete/Change string between this start and stop character. But keep these characters.
This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations.
"""
@change_delete_move_yank_handler('ai'[inner], ci_start, no_move_handler=True)
@change_delete_move_yank_handler('ai'[inner], ci_end, no_move_handler=True)
def _(event):
start = line.document.find_backwards(ci_start, in_current_line=True)
end = line.document.find(ci_end, in_current_line=True)
if start is not None and end is not None:
offset = 0 if inner else 1
return CursorRegion(start + 1 - offset, end + offset)
for inner in (False, True):
for ci_start, ci_end in [('"', '"'), ("'", "'"), ("`", "`"),
('[', ']'), ('<', '>'), ('{', '}'), ('(', ')')]:
create_ci_ca_handles(ci_start, ci_end, inner)
@change_delete_move_yank_handler('{') # TODO: implement 'arg'
def _(event):
"""
Move to previous blank-line separated section.
Implements '{', 'c{', 'd{', 'y{'
"""
line_index = line.document.find_previous_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_up_position(count=-line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('}') # TODO: implement 'arg'
def _(event):
"""
Move to next blank-line separated section.
Implements '}', 'c}', 'd}', 'y}'
"""
line_index = line.document.find_next_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_down_position(count=line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('f', Keys.Any)
def _(event):
"""
Go to next occurance of character. Typing 'fx' will move the
cursor to the next occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match or 0)
@change_delete_move_yank_handler('F', Keys.Any)
def _(event):
"""
Go to previous occurance of character. Typing 'Fx' will move the
cursor to the previous occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, True)
return CursorRegion(line.document.find_backwards(event.data, in_current_line=True, count=event.arg) or 0)
@change_delete_move_yank_handler('t', Keys.Any)
def _(event):
"""
Move right to the next occurance of c, then one char backward.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match - 1 if match else 0)
@change_delete_move_yank_handler('T', Keys.Any)
def _(event):
"""
Move left to the previous occurance of c, then one char forward.
"""
_last_character_find[0] = (event.data, True)
match = line.document.find_backwards(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match + 1 if match else 0)
def repeat(reverse):
"""
Create ',' and ';' commands.
"""
@change_delete_move_yank_handler(',' if reverse else ';')
def _(event):
# Repeat the last 'f'/'F'/'t'/'T' command.
pos = 0
if _last_character_find[0]:
char, backwards = _last_character_find[0]
if reverse:
backwards = not backwards
if backwards:
pos = line.document.find_backwards(char, in_current_line=True, count=event.arg)
else:
pos = line.document.find(char, in_current_line=True, count=event.arg)
return CursorRegion(pos or 0)
repeat(True)
repeat(False)
@change_delete_move_yank_handler('h')
@change_delete_move_yank_handler(Keys.Left)
def _(event):
""" Implements 'ch', 'dh', 'h': Cursor left. """
return CursorRegion(line.document.get_cursor_left_position(count=event.arg))
@change_delete_move_yank_handler('j')
def _(event):
""" Implements 'cj', 'dj', 'j', ... Cursor up. """
return CursorRegion(line.document.get_cursor_down_position(count=event.arg))
@change_delete_move_yank_handler('k')
def _(event):
""" Implements 'ck', 'dk', 'k', ... Cursor up. """
return CursorRegion(line.document.get_cursor_up_position(count=event.arg))
@change_delete_move_yank_handler('l')
@change_delete_move_yank_handler(' ')
@change_delete_move_yank_handler(Keys.Right)
def _(event):
""" Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right. """
return CursorRegion(line.document.get_cursor_right_position(count=event.arg))
@change_delete_move_yank_handler('H')
def _(event):
""" Implements 'cH', 'dH', 'H'. """
# Vi moves to the start of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(-len(line.document.text_before_cursor))
@change_delete_move_yank_handler('L')
def _(event):
# Vi moves to the end of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(len(line.document.text_after_cursor))
@change_delete_move_yank_handler('%')
def _(event):
"""
Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.)
If an 'arg' has been given, go this this % position in the file.
"""
if event._arg:
# If 'arg' has been given, the meaning of % is to go to the 'x%'
# row in the file.
if 0 < event.arg <= 100:
absolute_index = line.document.translate_row_col_to_index(
int(event.arg * line.document.line_count / 100), 0)
return CursorRegion(absolute_index - line.document.cursor_position)
else:
return CursorRegion(0) # Do nothing.
else:
# Move to the corresponding opening/closing bracket (()'s, []'s and {}'s).
return CursorRegion(line.document.matching_bracket_position)
@change_delete_move_yank_handler('|')
def _(event):
# Move to the n-th column (you may specify the argument n by typing
# it on number keys, for example, 20|).
return CursorRegion(line.document.get_column_cursor_position(event.arg))
@change_delete_move_yank_handler('g', 'g')
def _(event):
"""
Implements 'gg', 'cgg', 'ygg'
"""
# Move to the top of the input.
return CursorRegion(line.document.home_position)
@handle('!', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
'!' opens the system prompt.
"""
event.input_processor.push_input_mode(InputMode.SYSTEM)
@handle(Keys.Any, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.Any, in_mode=InputMode.SELECTION)
def _(event):
"""
Always handle numberics in navigation mode as arg.
"""
if event.data in '123456789' or (event._arg and event.data == '0'):
event.append_to_arg_count(event.data)
elif event.data == '0':
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=False)
@handle(Keys.Any, in_mode=InputMode.VI_REPLACE)
def _(event):
"""
Insert data at cursor position.
"""
line.insert_text(event.data, overwrite=True)
@handle(Keys.Any, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Insert text after the / or ? prompt.
"""
search_line.insert_text(event.data)
line.set_search_text(search_line.text)
@handle(Keys.ControlJ, in_mode=InputMode.VI_SEARCH)
@handle(Keys.ControlM, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Enter at the / or ? prompt.
"""
# Add query to history of searh line.
search_line.add_to_history()
search_line.reset()
# Go back to navigation mode.
event.input_processor.pop_input_mode()
@handle(Keys.Backspace, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Backspace at the vi-search prompt.
"""
if search_line.text:
search_line.delete_before_cursor()
line.set_search_text(search_line.text)
else:
# If no text after the prompt, cancel search.
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
@handle(Keys.Up, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the previous history item at the search prompt.
"""
search_line.auto_up()
line.set_search_text(search_line.text)
@handle(Keys.Down, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the next history item at the search prompt.
"""
search_line.auto_down()
search_line.cursor_position = len(search_line.text)
line.set_search_text(search_line.text)
@handle(Keys.Left, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow left at the search prompt.
"""
search_line.cursor_left()
@handle(Keys.Right, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow right at the search prompt.
"""
search_line.cursor_right()
@handle(Keys.ControlC, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Cancel search.
"""
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
def create_selection_transform_handler(keys, transform_func):
"""
Apply transformation on selection (uppercase, lowercase, rot13, swap case).
"""
@handle(*keys, in_mode=InputMode.SELECTION)
def _(event):
range = line.document.selection_range()
if range:
line.transform_region(range[0], range[1], transform_func)
event.input_processor.pop_input_mode()
for k, f in vi_transform_functions:
create_selection_transform_handler(k, f)
@handle(Keys.ControlX, Keys.ControlL, in_mode=InputMode.INSERT)
def _(event):
"""
Pressing the ControlX - ControlL sequence in Vi mode does line
completion based on the other lines in the document and the history.
"""
line.start_history_lines_completion()
@handle(Keys.ControlX, Keys.ControlF, in_mode=InputMode.INSERT)
def _(event):
"""
Complete file names.
"""
# TODO
pass
| bsd-3-clause | 7,872,446,468,992,112,000 | 34.19001 | 128 | 0.583234 | false |
prestodb/presto-admin | tests/unit/test_presto_conf.py | 1 | 4099 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the presto_conf module
"""
import re
from mock import patch
from prestoadmin.presto_conf import get_presto_conf, validate_presto_conf
from prestoadmin.util.exception import ConfigurationError
from tests.base_test_case import BaseTestCase
class TestPrestoConf(BaseTestCase):
@patch('prestoadmin.presto_conf.os.path.isdir')
@patch('prestoadmin.presto_conf.os.listdir')
@patch('prestoadmin.presto_conf.get_conf_from_properties_file')
@patch('prestoadmin.presto_conf.get_conf_from_config_file')
def test_get_presto_conf(self, config_mock, props_mock, listdir_mock,
isdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = ['log.properties', 'jvm.config', ]
config_mock.return_value = ['prop1', 'prop2']
props_mock.return_value = {'a': '1', 'b': '2'}
conf = get_presto_conf('dummy/dir')
config_mock.assert_called_with('dummy/dir/jvm.config')
props_mock.assert_called_with('dummy/dir/log.properties')
self.assertEqual(conf, {'log.properties': {'a': '1', 'b': '2'},
'jvm.config': ['prop1', 'prop2']})
@patch('prestoadmin.presto_conf.os.listdir')
@patch('prestoadmin.presto_conf.os.path.isdir')
@patch('prestoadmin.presto_conf.get_conf_from_properties_file')
def test_get_non_presto_file(self, get_mock, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = ['test.properties']
self.assertFalse(get_mock.called)
def test_conf_not_exists_is_empty(self):
self.assertEqual(get_presto_conf('/does/not/exist'), {})
def test_valid_conf(self):
conf = {'node.properties': {}, 'jvm.config': [],
'config.properties': {'discovery.uri': 'http://uri'}}
self.assertEqual(validate_presto_conf(conf), conf)
def test_invalid_conf(self):
conf = {'jvm.config': [],
'config.properties': {}}
self.assertRaisesRegexp(ConfigurationError,
'Missing configuration for required file:',
validate_presto_conf,
conf)
def test_invalid_node_type(self):
conf = {'node.properties': '', 'jvm.config': [],
'config.properties': {}}
self.assertRaisesRegexp(ConfigurationError,
'node.properties must be an object with key-'
'value property pairs',
validate_presto_conf,
conf)
def test_invalid_jvm_type(self):
conf = {'node.properties': {}, 'jvm.config': {},
'config.properties': {}}
self.assertRaisesRegexp(ConfigurationError,
re.escape('jvm.config must contain a json '
'array of jvm arguments ([arg1, '
'arg2, arg3])'),
validate_presto_conf,
conf)
def test_invalid_config_type(self):
conf = {'node.properties': {}, 'jvm.config': [],
'config.properties': []}
self.assertRaisesRegexp(ConfigurationError,
'config.properties must be an object with key-'
'value property pairs',
validate_presto_conf,
conf)
| apache-2.0 | 3,146,728,801,849,457,000 | 43.075269 | 79 | 0.575262 | false |
google/glazier | glazier/lib/stage_test.py | 1 | 7596 | # Lint as: python3
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.stage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from glazier.lib import stage
import mock
FLAGS = flags.FLAGS
class StageTest(absltest.TestCase):
@mock.patch.object(stage.registry, 'set_value', autospec=True)
def test_exit_stage(self, sv):
stage.exit_stage(3)
sv.assert_has_calls([
mock.call('End', mock.ANY, 'HKLM', stage.STAGES_ROOT + r'\3'),
mock.call('_Active', '', 'HKLM', stage.STAGES_ROOT)
])
@mock.patch.object(stage.registry, 'set_value', autospec=True)
def test_exit_stage_invalid(self, sv):
sv.side_effect = stage.registry.Error
self.assertRaises(stage.Error, stage.exit_stage, 3)
@mock.patch.object(stage.registry, 'get_value', autospec=True)
@mock.patch.object(stage, '_check_expiration', autospec=True)
def test_get_active_stage(self, check, gv):
gv.return_value = '5'
self.assertEqual(stage.get_active_stage(), 5)
gv.assert_called_with('_Active', 'HKLM', stage.STAGES_ROOT)
check.assert_called_with(5)
@mock.patch.object(stage.registry, 'get_value', autospec=True)
def test_get_active_stage_none(self, gv):
gv.side_effect = stage.registry.Error
self.assertIsNone(stage.get_active_stage())
@mock.patch.object(stage.registry, 'get_value', autospec=True)
@mock.patch.object(stage, '_load_time', autospec=True)
def test_get_active_time_with_end(self, load, gv):
gv.return_value = None
load.side_effect = (datetime.datetime(2019, 11, 6, 17, 38, 52, 0),
datetime.datetime(2019, 11, 6, 19, 18, 52, 0))
self.assertEqual(
stage.get_active_time(3), datetime.timedelta(hours=1, minutes=40))
load.assert_has_calls([
mock.call(3, 'Start'),
mock.call(3, 'End')
])
@mock.patch.object(stage.registry, 'get_value', autospec=True)
@mock.patch.object(stage, '_load_time', autospec=True)
def test_get_active_time_no_start(self, load, gv):
gv.return_value = None
load.side_effect = (None, datetime.datetime(2019, 11, 6, 19, 18, 52, 0))
self.assertRaises(stage.Error, stage.get_active_time, 4)
@mock.patch.object(stage.registry, 'get_value', autospec=True)
@mock.patch.object(stage, '_utc_now', autospec=True)
@mock.patch.object(stage, '_load_time', autospec=True)
def test_get_active_time_no_end(self, load, utc, gv):
start = datetime.datetime(2019, 10, 20, 19, 18, 12, 0)
now = datetime.datetime(2019, 11, 6, 10, 45, 12, 0)
utc.return_value = now
gv.return_value = None
load.side_effect = (start, None)
self.assertEqual(
stage.get_active_time(6),
datetime.timedelta(days=16, hours=15, minutes=27))
@mock.patch.object(stage, '_get_start_end', autospec=True)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
def test_get_status_complete(self, active, start_end):
active.return_value = 5
start_end.return_value = (datetime.datetime.now(), datetime.datetime.now())
self.assertEqual(stage.get_status(), 'Complete')
@mock.patch.object(stage, '_get_start_end', autospec=True)
@mock.patch.object(stage, '_check_expiration', autospec=True)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
def test_get_status_expired(self, active, expiration, start_end):
active.return_value = 5
start_end.return_value = (datetime.datetime.now(), None)
expiration.side_effect = stage.Error('Expired')
self.assertEqual(stage.get_status(), 'Expired')
self.assertTrue(expiration.called)
@mock.patch.object(stage, '_get_start_end', autospec=True)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
def test_get_status_no_start(self, active, start_end):
active.return_value = 4
start_end.return_value = (None, None)
self.assertEqual(stage.get_status(), 'Unknown')
@mock.patch.object(stage, '_get_start_end', autospec=True)
@mock.patch.object(stage, '_check_expiration', autospec=True)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
def test_get_status_running(self, active, expiration, start_end):
active.return_value = 5
start_end.return_value = (datetime.datetime.now(), None)
self.assertEqual(stage.get_status(), 'Running')
self.assertTrue(expiration.called)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
def test_get_status_unknown(self, active):
active.return_value = None
self.assertEqual(stage.get_status(), 'Unknown')
@mock.patch.object(stage.registry, 'get_value', autospec=True)
def test_load_time_parse(self, gv):
gv.return_value = '2019-11-06T17:37:43.279253'
self.assertEqual(
stage._load_time(1, 'Start'),
datetime.datetime(2019, 11, 6, 17, 37, 43, 279253))
@mock.patch.object(stage.registry, 'get_value', autospec=True)
def test_load_time_parse_err(self, gv):
gv.return_value = '12345'
self.assertIsNone(stage._load_time(1, 'End'))
@mock.patch.object(stage.registry, 'get_value', autospec=True)
def test_load_time_reg_err(self, gv):
gv.side_effect = stage.registry.Error
self.assertIsNone(stage._load_time(1, 'End'))
@mock.patch.object(stage.registry, 'set_value', autospec=True)
@mock.patch.object(stage.registry, 'get_value', autospec=True)
def test_set_stage_first(self, gv, sv):
gv.return_value = None
stage.set_stage(1)
sv.assert_has_calls([
mock.call('Start', mock.ANY, 'HKLM', stage.STAGES_ROOT + r'\1'),
mock.call('_Active', '1', 'HKLM', stage.STAGES_ROOT)
])
@mock.patch.object(stage, 'exit_stage', autospec=True)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
@mock.patch.object(stage.registry, 'set_value', autospec=True)
def test_set_stage_next(self, reg, get_active, exit_stage):
get_active.return_value = 1
stage.set_stage(2)
exit_stage.assert_called_with(1)
reg.assert_called_with('_Active', '2', 'HKLM', stage.STAGES_ROOT)
@mock.patch.object(stage, 'get_active_stage', autospec=True)
@mock.patch.object(stage.registry, 'set_value', autospec=True)
def test_set_stage_error(self, sv, get_active):
get_active.return_value = None
sv.side_effect = stage.registry.Error
self.assertRaises(stage.Error, stage.set_stage, 3)
def test_exit_stage_invalid_type(self):
self.assertRaises(stage.Error, stage.set_stage, 'ABC')
@flagsaver.flagsaver
@mock.patch.object(stage, 'get_active_time', autospec=True)
def test_stage_expiration(self, get_active):
end = stage._utc_now()
start = end - datetime.timedelta(minutes=90)
get_active.return_value = (end - start)
FLAGS.stage_timeout_minutes = 120
stage._check_expiration(3)
FLAGS.stage_timeout_minutes = 60
self.assertRaises(stage.Error, stage._check_expiration, 3)
get_active.assert_called_with(stage_id=3)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 1,881,690,306,196,095,000 | 38.769634 | 79 | 0.691022 | false |
leanix/leanix-sdk-python | src/leanix/models/ActivityUser.py | 1 | 1619 | #!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
class ActivityUser:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
self.swaggerTypes = {
'ID': 'str',
'fullName': 'str',
'email': 'str'
}
self.ID = None # str
self.fullName = None # str
self.email = None # str
| mit | -8,348,228,163,601,424,000 | 37.547619 | 105 | 0.720815 | false |
trujunzhang/djzhang-targets | cwitunes/cwitunes/pipelines.py | 1 | 1262 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
from datetime import datetime
from hashlib import md5
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db, collection_name):
from cwitunes.DBUtils import DBUtils
self.dbutils = DBUtils(mongo_uri, mongo_db, collection_name)
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGODB_SERVER'),
mongo_db=crawler.settings.get('MONGODB_DB', 'items'),
collection_name=crawler.settings.get('MONGODB_COLLECTION')
)
def open_spider(self, spider):
self.dbutils.open_spider()
def close_spider(self, spider):
self.dbutils.close_spider()
def process_item(self, item, spider):
self.dbutils.process_item(item,spider)
return item
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
log.err(failure)
| mit | -8,140,034,887,149,657,000 | 27.044444 | 70 | 0.669572 | false |
ivmech/iviny-scope | lib/xlsxwriter/test/styles/test_styles09.py | 1 | 3747 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...styles import Styles
from ...workbook import Workbook
class TestAssembleStyles(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for simple font styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
format1 = workbook.add_format({
'font_color': '#9C0006',
'bg_color': '#FFC7CE',
'font_condense': 1,
'font_extend': 1,
'has_fill': 1,
'has_font': 1,
})
# Get (and set) the DXF format index.
format1._get_dxf_index()
workbook._prepare_format_properties()
style._set_style_properties([
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_format_count,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
])
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="1">
<dxf>
<font>
<condense val="0"/>
<extend val="0"/>
<color rgb="FF9C0006"/>
</font>
<fill>
<patternFill>
<bgColor rgb="FFFFC7CE"/>
</patternFill>
</fill>
</dxf>
</dxfs>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 2,526,537,211,076,380,000 | 30.225 | 118 | 0.413397 | false |
google/vae-seq | vaeseq/examples/text/text.py | 1 | 4187 | # Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model sequences of text, character-by-character."""
from __future__ import print_function
import argparse
import itertools
import sys
import tensorflow as tf
from vaeseq.examples.text import hparams as hparams_mod
from vaeseq.examples.text import model as model_mod
def train(flags):
if flags.vocab_corpus is None:
print("NOTE: no --vocab-corpus supplied; using",
repr(flags.train_corpus), "for vocabulary.")
model = model_mod.Model(
hparams=hparams_mod.make_hparams(flags.hparams),
session_params=flags,
vocab_corpus=flags.vocab_corpus or flags.train_corpus)
model.train(flags.train_corpus, flags.num_steps,
valid_dataset=flags.valid_corpus)
def evaluate(flags):
model = model_mod.Model(
hparams=hparams_mod.make_hparams(flags.hparams),
session_params=flags,
vocab_corpus=flags.vocab_corpus)
model.evaluate(flags.eval_corpus, flags.num_steps)
def generate(flags):
hparams = hparams_mod.make_hparams(flags.hparams)
hparams.sequence_size = flags.length
model = model_mod.Model(
hparams=hparams,
session_params=flags,
vocab_corpus=flags.vocab_corpus)
for i, string in enumerate(itertools.islice(model.generate(),
flags.num_samples)):
print("#{:02d}: {}\n".format(i + 1, string))
# Argument parsing code below.
def common_args(args, require_vocab):
model_mod.Model.SessionParams.add_parser_arguments(args)
args.add_argument(
"--hparams", default="",
help="Model hyperparameter overrides.")
args.add_argument(
"--vocab-corpus",
help="Path to the corpus used for vocabulary generation.",
required=require_vocab)
def train_args(args):
common_args(args, require_vocab=False)
args.add_argument(
"--train-corpus",
help="Location of the training text.",
required=True)
args.add_argument(
"--valid-corpus",
help="Location of the validation text.")
args.add_argument(
"--num-steps", type=int, default=int(1e6),
help="Number of training iterations.")
args.set_defaults(entry=train)
def eval_args(args):
common_args(args, require_vocab=True)
args.add_argument(
"--eval-corpus",
help="Location of the training text.",
required=True)
args.add_argument(
"--num-steps", type=int, default=int(1e3),
help="Number of eval iterations.")
args.set_defaults(entry=evaluate)
def generate_args(args):
common_args(args, require_vocab=True)
args.add_argument(
"--length", type=int, default=1000,
help="Length of the generated strings.")
args.add_argument(
"--num-samples", type=int, default=20,
help="Number of strings to generate.")
args.set_defaults(entry=generate)
def main():
args = argparse.ArgumentParser()
subcommands = args.add_subparsers(title="subcommands")
train_args(subcommands.add_parser(
"train", help="Train a model."))
eval_args(subcommands.add_parser(
"evaluate", help="Evaluate a trained model."))
generate_args(subcommands.add_parser(
"generate", help="Generate some text."))
flags, unparsed_args = args.parse_known_args(sys.argv[1:])
if not hasattr(flags, "entry"):
args.print_help()
return 1
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=lambda _unused_argv: flags.entry(flags),
argv=[sys.argv[0]] + unparsed_args)
if __name__ == "__main__":
main()
| apache-2.0 | -6,422,594,320,282,468,000 | 31.207692 | 74 | 0.657511 | false |
3cky/netdata | collectors/python.d.plugin/python_modules/bases/charts.py | 1 | 12932 | # -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
from bases.collection import safe_print
CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
VARIABLE_PARAMS = ['id', 'value']
CHART_TYPES = ['line', 'area', 'stacked']
DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
"{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
"{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
DIMENSION_SET = "SET '{id}' = {value}\n"
CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \
"netdata.pythond_runtime line 145000 {update_every}\n" \
"DIMENSION run_time 'run time' absolute 1 1\n"
def create_runtime_chart(func):
"""
Calls a wrapped function, then prints runtime chart to stdout.
Used as a decorator for SimpleService.create() method.
The whole point of making 'create runtime chart' functionality as a decorator was
to help users who re-implements create() in theirs classes.
:param func: class method
:return:
"""
def wrapper(*args, **kwargs):
self = args[0]
ok = func(*args, **kwargs)
if ok:
safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
update_every=self._runtime_counters.update_every))
return ok
return wrapper
class ChartError(Exception):
"""Base-class for all exceptions raised by this module"""
class DuplicateItemError(ChartError):
"""Occurs when user re-adds a chart or a dimension that has already been added"""
class ItemTypeError(ChartError):
"""Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
class ItemValueError(ChartError):
"""Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
class Charts:
"""Represent a collection of charts
All charts stored in a dict.
Chart is a instance of Chart class.
Charts adding must be done using Charts.add_chart() method only"""
def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
"""
:param job_name: <bound method>
:param priority: <int>
:param get_update_every: <bound method>
"""
self.job_name = job_name
self.priority = priority
self.cleanup = cleanup
self.get_update_every = get_update_every
self.module_name = module_name
self.charts = dict()
def __len__(self):
return len(self.charts)
def __iter__(self):
return iter(self.charts.values())
def __repr__(self):
return 'Charts({0})'.format(self)
def __str__(self):
return str([chart for chart in self.charts])
def __contains__(self, item):
return item in self.charts
def __getitem__(self, item):
return self.charts[item]
def __delitem__(self, key):
del self.charts[key]
def __bool__(self):
return bool(self.charts)
def __nonzero__(self):
return self.__bool__()
def add_chart(self, params):
"""
Create Chart instance and add it to the dict
Manually adds job name, priority and update_every to params.
:param params: <list>
:return:
"""
params = [self.job_name()] + params
new_chart = Chart(params)
new_chart.params['update_every'] = self.get_update_every()
new_chart.params['priority'] = self.priority
new_chart.params['module_name'] = self.module_name
self.priority += 1
self.charts[new_chart.id] = new_chart
return new_chart
def active_charts(self):
return [chart.id for chart in self if not chart.flags.obsoleted]
class Chart:
"""Represent a chart"""
def __init__(self, params):
"""
:param params: <list>
"""
if not isinstance(params, list):
raise ItemTypeError("'chart' must be a list type")
if not len(params) >= 8:
raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
self.name = '{type}.{id}'.format(type=self.params['type'],
id=self.params['id'])
if self.params.get('chart_type') not in CHART_TYPES:
self.params['chart_type'] = 'absolute'
hidden = str(self.params.get('hidden', ''))
self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
self.dimensions = list()
self.variables = set()
self.flags = ChartFlags()
self.penalty = 0
def __getattr__(self, item):
try:
return self.params[item]
except KeyError:
raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
attr=item))
def __repr__(self):
return 'Chart({0})'.format(self.id)
def __str__(self):
return self.id
def __iter__(self):
return iter(self.dimensions)
def __contains__(self, item):
return item in [dimension.id for dimension in self.dimensions]
def add_variable(self, variable):
"""
:param variable: <list>
:return:
"""
self.variables.add(ChartVariable(variable))
def add_dimension(self, dimension):
"""
:param dimension: <list>
:return:
"""
dim = Dimension(dimension)
if dim.id in self:
raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
chart=self.name))
self.refresh()
self.dimensions.append(dim)
return dim
def del_dimension(self, dimension_id, hide=True):
if dimension_id not in self:
return
idx = self.dimensions.index(dimension_id)
dimension = self.dimensions[idx]
if hide:
dimension.params['hidden'] = 'hidden'
dimension.params['obsolete'] = 'obsolete'
self.create()
self.dimensions.remove(dimension)
def hide_dimension(self, dimension_id, reverse=False):
if dimension_id not in self:
return
idx = self.dimensions.index(dimension_id)
dimension = self.dimensions[idx]
dimension.params['hidden'] = 'hidden' if not reverse else str()
self.refresh()
def create(self):
"""
:return:
"""
chart = CHART_CREATE.format(**self.params)
dimensions = ''.join([dimension.create() for dimension in self.dimensions])
variables = ''.join([var.set(var.value) for var in self.variables if var])
self.flags.push = False
self.flags.created = True
safe_print(chart + dimensions + variables)
def can_be_updated(self, data):
for dim in self.dimensions:
if dim.get_value(data) is not None:
return True
return False
def update(self, data, interval):
updated_dimensions, updated_variables = str(), str()
for dim in self.dimensions:
value = dim.get_value(data)
if value is not None:
updated_dimensions += dim.set(value)
for var in self.variables:
value = var.get_value(data)
if value is not None:
updated_variables += var.set(value)
if updated_dimensions:
since_last = interval if self.flags.updated else 0
if self.flags.push:
self.create()
chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
self.flags.updated = True
self.penalty = 0
else:
self.penalty += 1
self.flags.updated = False
return bool(updated_dimensions)
def obsolete(self):
self.flags.obsoleted = True
if self.flags.created:
safe_print(CHART_OBSOLETE.format(**self.params))
def refresh(self):
self.penalty = 0
self.flags.push = True
self.flags.obsoleted = False
class Dimension:
"""Represent a dimension"""
def __init__(self, params):
"""
:param params: <list>
"""
if not isinstance(params, list):
raise ItemTypeError("'dimension' must be a list type")
if not params:
raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
self.params['name'] = self.params.get('name') or self.params['id']
if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
self.params['algorithm'] = 'absolute'
if not isinstance(self.params.get('multiplier'), int):
self.params['multiplier'] = 1
if not isinstance(self.params.get('divisor'), int):
self.params['divisor'] = 1
self.params.setdefault('hidden', '')
self.params.setdefault('obsolete', '')
def __getattr__(self, item):
try:
return self.params[item]
except KeyError:
raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
attr=item))
def __repr__(self):
return 'Dimension({0})'.format(self.id)
def __str__(self):
return self.id
def __eq__(self, other):
if not isinstance(other, Dimension):
return self.id == other
return self.id == other.id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def create(self):
return DIMENSION_CREATE.format(**self.params)
def set(self, value):
"""
:param value: <str>: must be a digit
:return:
"""
return DIMENSION_SET.format(id=self.id,
value=value)
def get_value(self, data):
try:
return int(data[self.id])
except (KeyError, TypeError):
return None
class ChartVariable:
"""Represent a chart variable"""
def __init__(self, params):
"""
:param params: <list>
"""
if not isinstance(params, list):
raise ItemTypeError("'variable' must be a list type")
if not params:
raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
self.params = dict(zip(VARIABLE_PARAMS, params))
self.params.setdefault('value', None)
def __getattr__(self, item):
try:
return self.params[item]
except KeyError:
raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
attr=item))
def __bool__(self):
return self.value is not None
def __nonzero__(self):
return self.__bool__()
def __repr__(self):
return 'ChartVariable({0})'.format(self.id)
def __str__(self):
return self.id
def __eq__(self, other):
if isinstance(other, ChartVariable):
return self.id == other.id
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def set(self, value):
return CHART_VARIABLE_SET.format(id=self.id,
value=value)
def get_value(self, data):
try:
return int(data[self.id])
except (KeyError, TypeError):
return None
class ChartFlags:
def __init__(self):
self.push = True
self.created = False
self.updated = False
self.obsoleted = False
| gpl-3.0 | -8,566,432,299,610,067,000 | 30.773956 | 113 | 0.564259 | false |
roaet/wafflehaus.neutron | tests/test_neutron_context.py | 1 | 11693 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from wafflehaus.try_context import context_filter
import webob.exc
from tests import test_base
class TestNeutronContext(test_base.TestBase):
def setUp(self):
super(TestNeutronContext, self).setUp()
adv_svc_patch = mock.patch(
"neutron.policy.check_is_advsvc")
self.adv_svc = adv_svc_patch.start()
self.adv_svc.return_value = False
self.app = mock.Mock()
self.app.return_value = "OK"
self.start_response = mock.Mock()
self.neutron_cls = "wafflehaus.neutron.context.%s.%s" % (
"neutron_context", "NeutronContextFilter")
self.strat_neutron = {"context_strategy": self.neutron_cls,
'enabled': 'true'}
self.strat_neutron_a = {"context_strategy": self.neutron_cls,
'enabled': 'true',
'require_auth_info': 'true'}
def test_create_strategy_neutron(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_USER_ID': 'derp', }
result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
def test_create_strategy_neutron_no_user_no_role(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertEqual(self.app, resp)
def test_create_strategy_neutron_with_no_roles(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': None, }
resp = result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertEqual(self.app, resp)
def test_create_strategy_neutron_with_empty_roles(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': '', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertTrue(hasattr(context, 'roles'))
def test_create_strategy_neutron_with_role(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': 'testrole', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
def test_create_strategy_neutron_with_roles(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json',
'X_ROLES': 'testrole, testrole2', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertTrue(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_requires_auth_will_fail_without_info(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_ROLES': 'testrole, testrole2', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertTrue(isinstance(resp, webob.exc.HTTPForbidden))
def test_requires_auth_is_admin(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
policy_check.return_value = True
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(1, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertTrue(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_requires_auth_is_not_admin(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
policy_check.return_value = False
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(2, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertFalse(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_verify_non_duplicate_request_id_non_admin(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
policy_check.return_value = False
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(2, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertFalse(context.is_admin)
self.assertEqual(2, len(context.roles))
# Generate another call in order to force oslo.context to refresh
# the _request_store, which in turn generates a new request_id
resp = result.__call__.request('/', method='HEAD', headers=headers)
context1 = result.strat_instance.context
self.assertNotEqual(context.request_id, context1.request_id)
def test_verify_non_duplicate_request_id_admin(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json', }
resp = result.__call__.request('/', method='HEAD', headers=headers)
context = result.strat_instance.context
self.assertTrue(context.is_admin)
self.assertEqual(self.app, resp)
# Generate another call in order to force oslo.context to refresh
# the _request_store, which in turn generates a new request_id
resp = result.__call__.request('/', method='HEAD', headers=headers)
context1 = result.strat_instance.context
self.assertNotEqual(context.request_id, context1.request_id)
def test_is_not_admin_policy_check_true(self):
result = context_filter.filter_factory(self.strat_neutron_a)(self.app)
self.assertIsNotNone(result)
headers = {'Content-Type': 'application/json',
'X_TENANT_ID': '123456',
'X_USER_ID': 'foo',
'X_ROLES': 'testrole, testrole2', }
policy_check = self.create_patch('neutron.policy.check_is_admin')
# First return value sets is_admin to False, second value sets
# is_admin to True
policy_check.side_effect = [False, True]
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
self.assertEqual(2, policy_check.call_count)
context = result.strat_instance.context
self.assertTrue(hasattr(context, 'roles'))
self.assertTrue('testrole' in context.roles)
self.assertTrue('testrole2' in context.roles)
self.assertTrue(context.is_admin)
self.assertEqual(2, len(context.roles))
def test_advsvc_is_false_when_admin_and_not_advsvc_role(self):
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json'}
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertFalse(context.is_advsvc)
def test_advsvc_is_true_when_policy_says_it_is(self):
self.adv_svc.return_value = True
result = context_filter.filter_factory(self.strat_neutron)(self.app)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, context_filter.ContextFilter))
headers = {'Content-Type': 'application/json'}
resp = result.__call__.request('/', method='HEAD', headers=headers)
self.assertEqual(self.app, resp)
context = result.strat_instance.context
self.assertTrue(context.is_advsvc)
| apache-2.0 | -5,572,070,798,425,745,000 | 48.130252 | 78 | 0.642863 | false |
wgwoods/fedup2 | setup.py | 1 | 2636 | #!/usr/bin/python
from distutils.core import setup, Command
from distutils.util import convert_path
from distutils.command.build_scripts import build_scripts
from distutils import log
import os
from os.path import join, basename
from subprocess import check_call
class Gettext(Command):
description = "Use po/POTFILES.in to generate po/<name>.pot"
user_options = []
def initialize_options(self):
self.encoding = 'UTF-8'
self.po_dir = 'po'
self.add_comments = True
def finalize_options(self):
pass
def _xgettext(self, opts):
name = self.distribution.get_name()
version = self.distribution.get_version()
email = self.distribution.get_author_email()
cmd = ['xgettext', '--default-domain', name, '--package-name', name,
'--package-version', version, '--msgid-bugs-address', email,
'--from-code', self.encoding,
'--output', join(self.po_dir, name + '.pot')]
if self.add_comments:
cmd.append('--add-comments')
check_call(cmd + opts)
def run(self):
self._xgettext(['-f', 'po/POTFILES.in'])
class Msgfmt(Command):
description = "Generate po/*.mo from po/*.po"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
po_dir = 'po'
for po in os.listdir(po_dir):
po = join(po_dir, po)
if po.endswith('.po'):
mo = po[:-3]+'.mo'
check_call(['msgfmt', '-vv', po, '-o', mo])
class BuildScripts(build_scripts):
def run(self):
build_scripts.run(self)
for script in self.scripts:
script = convert_path(script)
outfile = join(self.build_dir, basename(script))
if os.path.exists(outfile) and outfile.endswith(".py"):
newfile = outfile[:-3] # drop .py
log.info("renaming %s -> %s", outfile, basename(newfile))
os.rename(outfile, newfile)
version='v0'
try:
exec(open("fedup2/version.py").read())
except IOError:
pass
setup(name="fedup2",
version=version,
description="Fedora Upgrade",
long_description="",
author="Will Woods",
author_email="[email protected]",
url="https://github.com/wgwoods/fedup2",
download_url="https://github.com/wgwoods/fedup2/downloads",
license="GPLv2+",
packages=["fedup2"],
scripts=["fedup2.py"],
cmdclass={
'gettext': Gettext,
'msgfmt': Msgfmt,
'build_scripts': BuildScripts,
}
)
| gpl-2.0 | -929,589,590,876,869,800 | 28.954545 | 76 | 0.581942 | false |
hopshadoop/hops-util-py | hops/experiment_impl/distribute/parameter_server_reservation.py | 1 | 11016 | from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import pickle
import select
import socket
import struct
import threading
import time
from hops import util
from hops.experiment_impl.util import experiment_utils
MAX_RETRIES = 3
BUFSIZE = 1024*2
class Reservations:
"""Thread-safe store for node reservations."""
def __init__(self, required):
"""
Args:
required:
"""
self.required = required
self.lock = threading.RLock()
self.reservations = []
self.cluster_spec = {}
self.check_done = False
def add(self, meta):
"""Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.reservations.append(meta)
if self.remaining() == 0:
gpus_present = False
for entry in self.reservations:
if entry["gpus_present"] == True:
gpus_present = True
break
cluster_spec = {"chief": [], "ps": [], "worker": []}
if not gpus_present:
added_chief=False
for entry in self.reservations:
if entry["task_type"] == "ps":
cluster_spec["ps"].append(entry["host_port"])
elif added_chief == False and entry["task_type"] == "worker":
cluster_spec["chief"].append(entry["host_port"])
added_chief = True
else:
cluster_spec["worker"].append(entry["host_port"])
else:
added_chief=False
# switch Worker without GPU with PS with GPU
for possible_switch in self.reservations:
if possible_switch["task_type"] == "worker" and possible_switch["gpus_present"] == False:
for candidate in self.reservations:
if candidate["task_type"] == "ps" and candidate["gpus_present"] == True:
candidate["task_type"] = "worker"
possible_switch["task_type"] = "ps"
break
for entry in self.reservations:
if entry["task_type"] == "worker" and entry["gpus_present"] == True and added_chief == False:
added_chief=True
cluster_spec["chief"].append(entry["host_port"])
elif entry["task_type"] == "worker" and entry["gpus_present"] == True:
cluster_spec["worker"].append(entry["host_port"])
elif entry["task_type"] == "ps" and entry["gpus_present"] == False:
cluster_spec["ps"].append(entry["host_port"])
self.cluster_spec = cluster_spec
self.check_done = True
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return self.check_done
def get(self):
"""Get the list of current reservations."""
with self.lock:
return self.cluster_spec
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
num_registered = len(self.reservations)
return self.required - num_registered
class WorkerFinished:
"""Thread-safe store for node reservations."""
def __init__(self, required):
"""
Args:
:required: expected number of nodes in the cluster.
"""
self.required = required
self.lock = threading.RLock()
self.finished = 0
self.check_done = False
def add(self):
"""Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.finished = self.finished + 1
if self.remaining() == 0:
self.check_done = True
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return self.check_done
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
return self.required - self.finished
class MessageSocket(object):
"""Abstract class w/ length-prefixed socket send/receive functions."""
def receive(self, sock):
"""
Receive a message on ``sock``
Args:
sock:
Returns:
"""
msg = None
data = b''
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack('>I', buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = (recv_len == 0)
msg = pickle.loads(data)
return msg
def send(self, sock, msg):
"""
Send ``msg`` to destination ``sock``.
Args:
sock:
msg:
Returns:
"""
data = pickle.dumps(msg)
buf = struct.pack('>I', len(data)) + data
sock.sendall(buf)
class Server(MessageSocket):
"""Simple socket server with length prefixed pickle messages"""
reservations = None
done = False
def __init__(self, count):
"""
Args:
count:
"""
assert count > 0
self.reservations = Reservations(count)
self.worker_finished = WorkerFinished(util.num_executors() - util.num_param_servers())
def await_reservations(self, sc, status={}, timeout=600):
"""
Block until all reservations are received.
Args:
sc:
status:
timeout:
Returns:
"""
timespent = 0
while not self.reservations.done():
logging.info("waiting for {0} reservations".format(self.reservations.remaining()))
# check status flags for any errors
if 'error' in status:
sc.cancelAllJobs()
#sc.stop()
#sys.exit(1)
time.sleep(1)
timespent += 1
if (timespent > timeout):
raise Exception("timed out waiting for reservations to complete")
logging.info("all reservations completed")
return self.reservations.get()
def _handle_message(self, sock, msg):
"""
Args:
sock:
msg:
Returns:
"""
logging.debug("received: {0}".format(msg))
msg_type = msg['type']
if msg_type == 'REG':
self.reservations.add(msg['data'])
MessageSocket.send(self, sock, 'OK')
elif msg_type == 'REG_DONE':
self.worker_finished.add()
MessageSocket.send(self, sock, 'OK')
elif msg_type == 'QUERY':
MessageSocket.send(self, sock, self.reservations.done())
elif msg_type == 'QUERY_DONE':
MessageSocket.send(self, sock, self.worker_finished.done())
elif msg_type == 'QINFO':
rinfo = self.reservations.get()
MessageSocket.send(self, sock, rinfo)
elif msg_type == 'STOP':
logging.info("setting server.done")
MessageSocket.send(self, sock, 'OK')
self.done = True
else:
MessageSocket.send(self, sock, 'ERR')
def start(self):
"""
Start listener in a background thread
Returns:
address of the Server as a tuple of (host, port)
"""
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind(('', 0))
server_sock.listen(10)
# hostname may not be resolvable but IP address probably will be
host = experiment_utils._get_ip_address()
port = server_sock.getsockname()[1]
addr = (host,port)
def _listen(self, sock):
CONNECTIONS = []
CONNECTIONS.append(sock)
while not self.done:
read_socks, write_socks, err_socks = select.select(CONNECTIONS, [], [], 60)
for sock in read_socks:
if sock == server_sock:
client_sock, client_addr = sock.accept()
CONNECTIONS.append(client_sock)
logging.debug("client connected from {0}".format(client_addr))
else:
try:
msg = self.receive(sock)
self._handle_message(sock, msg)
except Exception as e:
logging.debug(e)
sock.close()
CONNECTIONS.remove(sock)
server_sock.close()
t = threading.Thread(target=_listen, args=(self, server_sock))
t.daemon = True
t.start()
return addr
def stop(self):
"""Stop the Server's socket listener."""
self.done = True
class Client(MessageSocket):
"""Client to register and await node reservations.
Args:
:server_addr: a tuple of (host, port) pointing to the Server.
"""
sock = None #: socket to server TCP connection
server_addr = None #: address of server
def __init__(self, server_addr):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(server_addr)
self.server_addr = server_addr
logging.info("connected to server at {0}".format(server_addr))
def _request(self, msg_type, msg_data=None):
"""Helper function to wrap msg w/ msg_type."""
msg = {}
msg['type'] = msg_type
if msg_data or ((msg_data == True) or (msg_data == False)):
msg['data'] = msg_data
done = False
tries = 0
while not done and tries < MAX_RETRIES:
try:
MessageSocket.send(self, self.sock, msg)
done = True
except socket.error as e:
tries += 1
if tries >= MAX_RETRIES:
raise
print("Socket error: {}".format(e))
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.server_addr)
logging.debug("sent: {0}".format(msg))
resp = MessageSocket.receive(self, self.sock)
logging.debug("received: {0}".format(resp))
return resp
def close(self):
"""Close the client socket."""
self.sock.close()
def register(self, reservation):
"""
Register ``reservation`` with server.
Args:
reservation:
Returns:
"""
resp = self._request('REG', reservation)
return resp
def register_worker_finished(self):
"""
Register ``worker as finished`` with server.
Returns:
"""
resp = self._request('REG_DONE')
return resp
def await_all_workers_finished(self):
"""
Poll until all reservations completed, then return cluster_info.
Returns:
"""
done = False
while not done:
done = self._request('QUERY_DONE')
time.sleep(5)
return True
def get_reservations(self):
"""
Get current list of reservations.
Returns:
"""
cluster_info = self._request('QINFO')
return cluster_info
def await_reservations(self):
"""Poll until all reservations completed, then return cluster_info."""
done = False
while not done:
done = self._request('QUERY')
time.sleep(1)
reservations = self.get_reservations()
return reservations
def request_stop(self):
"""Request server stop."""
resp = self._request('STOP')
return resp
| apache-2.0 | 5,554,518,278,654,147,000 | 25.291169 | 105 | 0.590051 | false |
MostlyOpen/odoo_addons_jcafb | myo_professional_cst/models/professional.py | 1 | 1115 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Professional(models.Model):
_inherit = 'myo.professional'
employee_id = fields.Many2one('hr.employee', 'Related Employee', required=False, readonly=False)
| agpl-3.0 | -128,884,841,202,150,990 | 40.296296 | 100 | 0.641256 | false |
mastizada/kuma | kuma/search/tests/test_utils.py | 1 | 2338 | from __future__ import absolute_import
import test_utils
from nose.tools import eq_, ok_
from ..store import referrer_url
from ..utils import QueryURLObject
class URLTests(test_utils.TestCase):
def test_pop_query_param(self):
original = 'http://example.com/?spam=eggs'
url = QueryURLObject(original)
eq_(url.pop_query_param('spam', 'eggs'), 'http://example.com/')
eq_(url.pop_query_param('spam', 'spam'), original)
original = 'http://example.com/?spam=eggs&spam=spam'
url = QueryURLObject(original)
eq_(url.pop_query_param('spam', 'eggs'),
'http://example.com/?spam=spam')
eq_(url.pop_query_param('spam', 'spam'),
'http://example.com/?spam=eggs')
original = 'http://example.com/?spam=eggs&foo='
url = QueryURLObject(original)
eq_(url.pop_query_param('spam', 'eggs'),
'http://example.com/?foo=')
def test_merge_query_param(self):
original = 'http://example.com/?spam=eggs'
url = QueryURLObject(original)
eq_(url.merge_query_param('spam', 'eggs'), original)
eq_(url.merge_query_param('spam', 'spam'), original + '&spam=spam')
original = 'http://example.com/?foo=&spam=eggs&foo=bar'
url = QueryURLObject(original)
eq_(url.merge_query_param('foo', None),
'http://example.com/?foo=&foo=bar&spam=eggs')
eq_(url.merge_query_param('foo', [None]),
'http://example.com/?foo=&foo=bar&spam=eggs')
def test_clean_params(self):
for url in ['http://example.com/?spam=',
'http://example.com/?spam']:
url_object = QueryURLObject(url)
eq_(url_object.clean_params(url_object.query_dict), {})
def test_referer_bad_encoding(self):
class _TestRequest(object):
# In order to test this we just need an object that has
# 'locale' and 'META', but not the full attribute set of
# an HttpRequest. This is that object.
def __init__(self, locale, referer):
self.locale = locale
self.META = {'HTTP_REFERER': referer}
request = _TestRequest('es', 'http://developer.mozilla.org/es/docs/Tutorial_de_XUL/A\xc3\x83\xc2\xb1adiendo_botones')
ok_(referrer_url(request) is None)
| mpl-2.0 | 1,689,584,355,519,011,000 | 36.709677 | 125 | 0.591104 | false |
deiv/plasmate-pkg | plasmate/templates/mainDataEngine.py | 1 | 2102 | # -*- coding: iso-8859-1 -*-
#
# Author: $AUTHOR <$EMAIL>
# Date: $DATE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation; either version 2, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU Library General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Import essential modules
from PyQt4.QtCore import *
from PyKDE4.kdecore import *
from PyKDE4 import plasmascript
class $DATAENGINE_NAME(plasmascript.DataEngine):
# Constructor, forward initialization to its superclass
# Note: try to NOT modify this constructor; all the setup code
# should be placed in the init method.
def __init__(self,parent,args=None):
plasmascript.DataEngine.__init__(self,parent)
# init method
# Put here all the code needed to initialize our plasmoid
def init(self):
self.setMinimumPollingInterval(333)
# sources method
# Used by applets to request what data source the DataEngine has
def sources(self):
# Add custom code here
return sources
# sourceRequestEvent method
# Called when an applet access the DataEngine and request for
# a specific source ( name )
def sourceRequestEvent(self, name):
# Add custom code here
return self.updateSourceEvent(name)
# updateSourceEvent method
# The main function for a DataEngine
def updateSourceEvent(self, tz):
# Add custom code here
return True
# CreateDataEngine method
# Note: do NOT modify it, needed by Plasma
def CreateDataEngine(parent):
return $DATAENGINE_NAME(parent)
| gpl-2.0 | 7,481,441,239,052,497,000 | 32.365079 | 70 | 0.709324 | false |
sloria/osf.io | osf/migrations/0113_add_view_collectionprovider_to_admin_perm.py | 1 | 2258 | from __future__ import unicode_literals
import logging
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from django.db.models import Q
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
logger = logging.getLogger(__file__)
def get_new_read_only_permissions():
return Permission.objects.filter(
Q(codename='view_collectionprovider')
)
def get_new_admin_permissions():
return Permission.objects.filter(
Q(codename='change_collectionprovider') |
Q(codename='delete_collectionprovider')
)
def add_group_permissions(*args):
# this is to make sure that the permissions created in an earlier migration exist!
emit_post_migrate_signal(2, False, 'default')
# Add permissions for the read only group
read_only_group = Group.objects.get(name='read_only')
[read_only_group.permissions.add(perm) for perm in get_new_read_only_permissions()]
read_only_group.save()
logger.info('Collection Provider permissions added to read only group')
# Add permissions for new OSF Admin group - can perform actions
admin_group = Group.objects.get(name='osf_admin')
[admin_group.permissions.add(perm) for perm in get_new_read_only_permissions()]
[admin_group.permissions.add(perm) for perm in get_new_admin_permissions()]
admin_group.save()
logger.info('Administrator permissions for Collection Providers added to admin group')
def remove_group_permissions(*args):
# remove the read only group
read_only_group = Group.objects.get(name='read_only')
[read_only_group.permissions.remove(perm) for perm in get_new_read_only_permissions()]
read_only_group.save()
# remove the osf admin group
admin_group = Group.objects.get(name='osf_admin')
[admin_group.permissions.remove(perm) for perm in get_new_read_only_permissions()]
[admin_group.permissions.remove(perm) for perm in get_new_admin_permissions()]
admin_group.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0112_alter_collectionprovider_permissions'),
]
operations = [
migrations.RunPython(add_group_permissions, remove_group_permissions),
]
| apache-2.0 | 860,804,156,702,227,200 | 33.738462 | 90 | 0.723206 | false |
rlbabyuk/integration_tests | scripts/cleanup_edomain_templates.py | 1 | 12391 | #!/usr/bin/env python2
"""This script takes an provider and edomain as optional parameters, and
searches for old templates on specified provider's export domain and deletes
them. In case of no --provider parameter specified then this script
traverse all the rhevm providers in cfme_data.
"""
import argparse
import datetime
import pytz
from threading import Lock, Thread
from utils import net
from utils.conf import cfme_data, credentials
from utils.log import logger
from utils.providers import get_mgmt
from utils.ssh import SSHClient
from utils.wait import wait_for
lock = Lock()
def make_ssh_client(provider_mgmt):
creds = credentials[provider_mgmt.kwargs.get('ssh_creds', None)]
connect_kwargs = {
'username': creds['username'],
'password': creds['password'],
'hostname': provider_mgmt.kwargs.get('ipaddress')
}
return SSHClient(**connect_kwargs)
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument("--edomain", dest="edomain",
help="Export domain for the remplate", default=None)
parser.add_argument("--provider", dest="provider",
help="Rhevm provider (to look for in cfme_data)",
default=None)
parser.add_argument("--days-old", dest="days_old",
help="number of days_old templates to be deleted"
"e.g. --day-old 4 deletes templates created before 4 days",
default=3)
parser.add_argument("--max-templates", dest="max_templates",
help="max number of templates to be deleted at a time"
"e.g. --max-templates 6 deletes 6 templates at a time",
default=5)
args = parser.parse_args()
return args
def is_ovirt_engine_running(provider_mgmt):
try:
with make_ssh_client(provider_mgmt) as ssh_client:
stdout = ssh_client.run_command('systemctl status ovirt-engine')[1]
# fallback to sysV commands if necessary
if 'command not found' in stdout:
stdout = ssh_client.run_command('service ovirt-engine status')[1]
return 'running' in stdout
except Exception as e:
logger.exception(e)
return False
def change_edomain_state(provider_mgmt, state, edomain):
try:
# fetch name for logging
provider_name = provider_mgmt.kwargs.get('name', None)
log_args = (provider_name, edomain, state)
api = provider_mgmt.api
dcs = api.datacenters.list()
for dc in dcs:
export_domain = dc.storagedomains.get(edomain)
if export_domain:
if state == 'maintenance' and export_domain.get_status().state == 'active':
dc.storagedomains.get(edomain).deactivate()
elif state == 'active' and export_domain.get_status().state != 'active':
dc.storagedomains.get(edomain).activate()
wait_for(is_edomain_in_state, [api, state, edomain], fail_condition=False, delay=5)
print('RHEVM:{}, domain {} set to "{}" state'.format(*log_args))
return True
return False
except Exception as e:
print(e)
print('RHEVM:{} Exception setting domain {} to "{}" state'.format(*log_args))
return False
def is_edomain_in_state(api, state, edomain):
dcs = api.datacenters.list()
for dc in dcs:
export_domain = dc.storagedomains.get(edomain)
if export_domain:
return export_domain.get_status().state == state
return False
# get the domain edomain path on the rhevm
def get_edomain_path(api, edomain):
edomain_id = api.storagedomains.get(edomain).get_id()
edomain_conn = api.storagedomains.get(edomain).storageconnections.list()[0]
return ('{}/{}'.format(edomain_conn.get_path(), edomain_id),
edomain_conn.get_address())
def cleanup_empty_dir_on_edomain(provider_mgmt, edomain):
"""Cleanup all the empty directories on the edomain/edomain_id/master/vms
else api calls will result in 400 Error with ovf not found,
Args:
provider_mgmt: provider object under execution
edomain: domain on which to operate
"""
try:
# We'll use this for logging
provider_name = provider_mgmt.kwargs.get('name', None)
# get path first
path, edomain_ip = get_edomain_path(provider_mgmt.api, edomain)
edomain_path = '{}:{}'.format(edomain_ip, path)
command = 'mkdir -p ~/tmp_filemount &&'
command += 'mount -O tcp {} ~/tmp_filemount &&'.format(edomain_path)
command += 'find ~/tmp_filemount/master/vms/ -maxdepth 1 -type d -empty -delete &&'
command += 'cd ~ && umount ~/tmp_filemount &&'
command += 'find . -maxdepth 1 -name tmp_filemount -type d -empty -delete'
print('RHEVM:{} Deleting empty directories on edomain/vms file path {}'
.format(provider_name, path))
with make_ssh_client(provider_mgmt) as ssh_client:
exit_status, output = ssh_client.run_command(command)
if exit_status != 0:
print('RHEVM:{} Error deleting empty directories on path {}'
.format(provider_name, path))
print(output)
print('RHEVM:{} successfully deleted empty directories on path {}'
.format(provider_name, path))
except Exception as e:
print(e)
return False
def is_edomain_template_deleted(api, name, edomain):
"""Checks for the templates delete status on edomain.
Args:
api: API for RHEVM.
name: template_name
edomain: Export domain of selected RHEVM provider.
"""
return not api.storagedomains.get(edomain).templates.get(name)
def delete_edomain_templates(api, template, edomain):
"""deletes the template on edomain.
Args:
api: API for RHEVM.
name: template_name
edomain: Export domain of selected RHEVM provider.
"""
with lock:
creation_time = template.get_creation_time().strftime("%d %B-%Y")
name = template.get_name()
print('Deleting {} created on {} ...'.format(name, creation_time))
try:
template.delete()
print('waiting for {} to be deleted..'.format(name))
wait_for(is_edomain_template_deleted, [api, name, edomain], fail_condition=False, delay=5)
print('RHEVM: successfully deleted template {} on domain {}'.format(name, edomain))
except Exception as e:
with lock:
print('RHEVM: Exception deleting template {} on domain {}'.format(name, edomain))
logger.exception(e)
def cleanup_templates(api, edomain, days, max_templates):
try:
templates = api.storagedomains.get(edomain).templates.list()
thread_queue = []
delete_templates = []
for template in templates:
delta = datetime.timedelta(days=days)
now = datetime.datetime.now(pytz.utc)
template_creation_time = template.get_creation_time().astimezone(pytz.utc)
if template.get_name().startswith('auto-tmp'):
if now > (template_creation_time + delta):
delete_templates.append(template)
if not delete_templates:
print("RHEVM: No old templates to delete in {}".format(edomain))
for delete_template in delete_templates[:max_templates]:
thread = Thread(target=delete_edomain_templates,
args=(api, delete_template, edomain))
thread.daemon = True
thread_queue.append(thread)
thread.start()
for thread in thread_queue:
thread.join()
except Exception as e:
logger.exception(e)
return False
def api_params_resolution(item_list, item_name, item_param):
"""Picks and prints info about parameter obtained by api call.
Args:
item_list: List of possible candidates to pick from.
item_name: Name of parameter obtained by api call.
item_param: Name of parameter representing data in the script.
"""
if len(item_list) == 0:
print("RHEVM: Cannot find {} ({}) automatically.".format(item_name, item_param))
print("Please specify it by cmd-line parameter '--{}' or in cfme_data.".format(item_param))
return None
elif len(item_list) > 1:
print("RHEVM: Found multiple of {}. Picking first, '{}'.".format(item_name, item_list[0]))
else:
print("RHEVM: Found {}: '{}'.".format(item_name, item_list[0]))
return item_list[0]
def get_edomain(api):
"""Discovers suitable export domain automatically.
Args:
api: API to RHEVM instance.
"""
edomain_names = []
for domain in api.storagedomains.list(status=None):
if domain.get_type() == 'export':
edomain_names.append(domain.get_name())
return api_params_resolution(edomain_names, 'export domain', 'edomain')
def make_kwargs(args, cfme_data, **kwargs):
"""Assembles all the parameters in case of running as a standalone script.
Makes sure, that the parameters given by command-line arguments
have higher priority.Makes sure, that all the needed parameters
have proper values.
Args:
args: Arguments given from command-line.
cfme_data: Data in cfme_data.yaml
kwargs: Kwargs generated from
cfme_data['template_upload']['template_upload_rhevm']
"""
args_kwargs = dict(args._get_kwargs())
if not kwargs:
return args_kwargs
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs.update(template_name=template_name)
for kkey, kval in kwargs.items():
for akey, aval in args_kwargs.items():
if aval and kkey == akey and kval != aval:
kwargs[akey] = aval
for akey, aval in args_kwargs.items():
if akey not in kwargs.keys():
kwargs[akey] = aval
return kwargs
def run(**kwargs):
"""Calls the functions needed to cleanup templates on RHEVM providers.
This is called either by template_upload_all script, or by main
function.
Args:
**kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
"""
providers = cfme_data['management_systems']
for provider in [prov for prov in providers if providers[prov]['type'] == 'rhevm']:
# If a provider was passed, only cleanup on it, otherwise all rhevm providers
cli_provider = kwargs.get('provider', None)
if cli_provider and cli_provider != provider:
continue
provider_mgmt = get_mgmt(provider)
if not net.is_pingable(provider_mgmt.kwargs.get('ipaddress', None)):
continue
elif not is_ovirt_engine_running(provider_mgmt):
print('ovirt-engine service not running..')
continue
try:
print('connecting to provider, to establish api handler')
edomain = kwargs.get('edomain', None)
if not edomain:
edomain = provider_mgmt.kwargs['template_upload']['edomain']
except Exception as e:
logger.exception(e)
continue
try:
print("\n--------Start of {}--------".format(provider))
cleanup_templates(provider_mgmt.api,
edomain,
kwargs.get('days_old'),
kwargs.get('max_templates'))
finally:
change_edomain_state(provider_mgmt,
'maintenance',
edomain)
cleanup_empty_dir_on_edomain(provider_mgmt, edomain)
change_edomain_state(provider_mgmt,
'active',
edomain)
print("--------End of {}--------\n".format(provider))
print("Provider Execution completed")
if __name__ == "__main__":
args = parse_cmd_line()
kwargs = cfme_data['template_upload']['template_upload_rhevm']
final_kwargs = make_kwargs(args, cfme_data, **kwargs)
run(**final_kwargs)
| gpl-2.0 | -5,913,989,060,310,183,000 | 35.444118 | 99 | 0.60665 | false |
katakumpo/niceredis | tests/test_lock.py | 1 | 5521 | from __future__ import with_statement
import time
import pytest
from redis.exceptions import LockError, ResponseError
from redis.lock import Lock, LuaLock
class TestLock(object):
lock_class = Lock
def get_lock(self, redis, *args, **kwargs):
kwargs['lock_class'] = self.lock_class
return redis.lock(*args, **kwargs)
def test_lock(self, sr):
lock = self.get_lock(sr, 'foo')
assert lock.acquire(blocking=False)
assert sr.get('foo') == lock.local.token
assert sr.ttl('foo') == -1
lock.release()
assert sr.get('foo') is None
def test_competing_locks(self, sr):
lock1 = self.get_lock(sr, 'foo')
lock2 = self.get_lock(sr, 'foo')
assert lock1.acquire(blocking=False)
assert not lock2.acquire(blocking=False)
lock1.release()
assert lock2.acquire(blocking=False)
assert not lock1.acquire(blocking=False)
lock2.release()
def test_timeout(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10)
assert lock.acquire(blocking=False)
assert 8 < sr.ttl('foo') <= 10
lock.release()
def test_float_timeout(self, sr):
lock = self.get_lock(sr, 'foo', timeout=9.5)
assert lock.acquire(blocking=False)
assert 8 < sr.pttl('foo') <= 9500
lock.release()
def test_blocking_timeout(self, sr):
lock1 = self.get_lock(sr, 'foo')
assert lock1.acquire(blocking=False)
lock2 = self.get_lock(sr, 'foo', blocking_timeout=0.2)
start = time.time()
assert not lock2.acquire()
assert (time.time() - start) > 0.2
lock1.release()
def test_context_manager(self, sr):
# blocking_timeout prevents a deadlock if the lock can't be acquired
# for some reason
with self.get_lock(sr, 'foo', blocking_timeout=0.2) as lock:
assert sr.get('foo') == lock.local.token
assert sr.get('foo') is None
def test_high_sleep_raises_error(self, sr):
"If sleep is higher than timeout, it should raise an error"
with pytest.raises(LockError):
self.get_lock(sr, 'foo', timeout=1, sleep=2)
def test_releasing_unlocked_lock_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
with pytest.raises(LockError):
lock.release()
def test_releasing_lock_no_longer_owned_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
lock.acquire(blocking=False)
# manually change the token
sr.set('foo', 'a')
with pytest.raises(LockError):
lock.release()
# even though we errored, the token is still cleared
assert lock.local.token is None
def test_extend_lock(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10)
assert lock.acquire(blocking=False)
assert 8000 < sr.pttl('foo') <= 10000
assert lock.extend(10)
assert 16000 < sr.pttl('foo') <= 20000
lock.release()
def test_extend_lock_float(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10.0)
assert lock.acquire(blocking=False)
assert 8000 < sr.pttl('foo') <= 10000
assert lock.extend(10.0)
assert 16000 < sr.pttl('foo') <= 20000
lock.release()
def test_extending_unlocked_lock_raises_error(self, sr):
lock = self.get_lock(sr, 'foo', timeout=10)
with pytest.raises(LockError):
lock.extend(10)
def test_extending_lock_with_no_timeout_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
assert lock.acquire(blocking=False)
with pytest.raises(LockError):
lock.extend(10)
lock.release()
def test_extending_lock_no_longer_owned_raises_error(self, sr):
lock = self.get_lock(sr, 'foo')
assert lock.acquire(blocking=False)
sr.set('foo', 'a')
with pytest.raises(LockError):
lock.extend(10)
class TestLuaLock(TestLock):
lock_class = LuaLock
class TestLockClassSelection(object):
def test_lock_class_argument(self, sr):
lock = sr.lock('foo', lock_class=Lock)
assert type(lock) == Lock
lock = sr.lock('foo', lock_class=LuaLock)
assert type(lock) == LuaLock
def test_cached_lualock_flag(self, sr):
try:
sr._use_lua_lock = True
lock = sr.lock('foo')
assert type(lock) == LuaLock
finally:
sr._use_lua_lock = None
def test_cached_lock_flag(self, sr):
try:
sr._use_lua_lock = False
lock = sr.lock('foo')
assert type(lock) == Lock
finally:
sr._use_lua_lock = None
def test_lua_compatible_server(self, sr, monkeypatch):
@classmethod
def mock_register(cls, redis):
return
monkeypatch.setattr(LuaLock, 'register_scripts', mock_register)
try:
lock = sr.lock('foo')
assert type(lock) == LuaLock
assert sr._use_lua_lock is True
finally:
sr._use_lua_lock = None
def test_lua_unavailable(self, sr, monkeypatch):
@classmethod
def mock_register(cls, redis):
raise ResponseError()
monkeypatch.setattr(LuaLock, 'register_scripts', mock_register)
try:
lock = sr.lock('foo')
assert type(lock) == Lock
assert sr._use_lua_lock is False
finally:
sr._use_lua_lock = None
| mit | -6,382,778,191,388,930,000 | 31.863095 | 76 | 0.587031 | false |
apipanda/openssl | app/helpers/marshmallow/convert.py | 1 | 10248 | # -*- coding: utf-8 -*-
import functools
import inspect
import uuid
import marshmallow as ma
import sqlalchemy as sa
from marshmallow import fields, validate
from sqlalchemy.dialects import mssql, mysql, postgresql
from .exceptions import ModelConversionError
from .fields import Related
def _is_field(value):
return (
isinstance(value, type) and
issubclass(value, fields.Field)
)
def _has_default(column):
return (
column.default is not None or
column.server_default is not None or
_is_auto_increment(column)
)
def _is_auto_increment(column):
return (
column.table is not None and
column is column.table._autoincrement_column
)
def _postgres_array_factory(converter, data_type):
return functools.partial(
fields.List,
converter._get_field_class_for_data_type(data_type.item_type),
)
def _should_exclude_field(column, fields=None, exclude=None):
if fields and column.key not in fields:
return True
if exclude and column.key in exclude:
return True
return False
class ModelConverter(object):
"""Class that converts a SQLAlchemy model into a dictionary of corresponding
marshmallow `Fields <marshmallow.fields.Field>`.
"""
SQLA_TYPE_MAPPING = {
sa.Enum: fields.Field,
postgresql.BIT: fields.Integer,
postgresql.UUID: fields.UUID,
postgresql.MACADDR: fields.String,
postgresql.INET: fields.String,
postgresql.JSON: fields.Raw,
postgresql.JSONB: fields.Raw,
postgresql.HSTORE: fields.Raw,
postgresql.ARRAY: _postgres_array_factory,
mysql.BIT: fields.Integer,
mysql.YEAR: fields.Integer,
mysql.SET: fields.List,
mysql.ENUM: fields.Field,
mssql.BIT: fields.Integer,
}
if hasattr(sa, 'JSON'):
SQLA_TYPE_MAPPING[sa.JSON] = fields.Raw
DIRECTION_MAPPING = {
'MANYTOONE': False,
'MANYTOMANY': True,
'ONETOMANY': True,
}
def __init__(self, schema_cls=None):
self.schema_cls = schema_cls
@property
def type_mapping(self):
if self.schema_cls:
return self.schema_cls.TYPE_MAPPING
else:
return ma.Schema.TYPE_MAPPING
def fields_for_model(self, model, include_fk=False, fields=None, exclude=None, base_fields=None,
dict_cls=dict):
result = dict_cls()
base_fields = base_fields or {}
for prop in model.__mapper__.iterate_properties:
if _should_exclude_field(prop, fields=fields, exclude=exclude):
continue
if hasattr(prop, 'columns'):
if not include_fk:
# Only skip a column if there is no overridden column
# which does not have a Foreign Key.
for column in prop.columns:
if not column.foreign_keys:
break
else:
continue
field = base_fields.get(prop.key) or self.property2field(prop)
if field:
result[prop.key] = field
return result
def fields_for_table(self, table, include_fk=False, fields=None, exclude=None, base_fields=None,
dict_cls=dict):
result = dict_cls()
base_fields = base_fields or {}
for column in table.columns:
if _should_exclude_field(column, fields=fields, exclude=exclude):
continue
if not include_fk and column.foreign_keys:
continue
field = base_fields.get(column.key) or self.column2field(column)
if field:
result[column.key] = field
return result
def property2field(self, prop, instance=True, field_class=None, **kwargs):
field_class = field_class or self._get_field_class_for_property(prop)
if not instance:
return field_class
field_kwargs = self._get_field_kwargs_for_property(prop)
field_kwargs.update(kwargs)
ret = field_class(**field_kwargs)
if (
hasattr(prop, 'direction') and
self.DIRECTION_MAPPING[prop.direction.name] and
prop.uselist is True
):
ret = fields.List(ret, **kwargs)
return ret
def column2field(self, column, instance=True, **kwargs):
field_class = self._get_field_class_for_column(column)
if not instance:
return field_class
field_kwargs = self.get_base_kwargs()
self._add_column_kwargs(field_kwargs, column)
field_kwargs.update(kwargs)
return field_class(**field_kwargs)
def field_for(self, model, property_name, **kwargs):
prop = model.__mapper__.get_property(property_name)
return self.property2field(prop, **kwargs)
def _get_field_class_for_column(self, column):
return self._get_field_class_for_data_type(column.type)
def _get_field_class_for_data_type(self, data_type):
field_cls = None
types = inspect.getmro(type(data_type))
# First search for a field class from self.SQLA_TYPE_MAPPING
for col_type in types:
if col_type in self.SQLA_TYPE_MAPPING:
field_cls = self.SQLA_TYPE_MAPPING[col_type]
if callable(field_cls) and not _is_field(field_cls):
field_cls = field_cls(self, data_type)
break
else:
# Try to find a field class based on the column's python_type
try:
python_type = data_type.python_type
except NotImplementedError:
python_type = None
if python_type in self.type_mapping:
field_cls = self.type_mapping[python_type]
else:
if hasattr(data_type, 'impl'):
return self._get_field_class_for_data_type(data_type.impl)
raise ModelConversionError(
'Could not find field column of type {0}.'.format(types[0]))
return field_cls
def _get_field_class_for_property(self, prop):
if hasattr(prop, 'direction'):
field_cls = Related
else:
column = prop.columns[0]
field_cls = self._get_field_class_for_column(column)
return field_cls
def _get_field_kwargs_for_property(self, prop):
kwargs = self.get_base_kwargs()
if hasattr(prop, 'columns'):
column = prop.columns[0]
self._add_column_kwargs(kwargs, column)
if hasattr(prop, 'direction'): # Relationship property
self._add_relationship_kwargs(kwargs, prop)
if getattr(prop, 'doc', None): # Useful for documentation generation
kwargs['description'] = prop.doc
return kwargs
def _add_column_kwargs(self, kwargs, column):
"""Add keyword arguments to kwargs (in-place) based on the passed in
`Column <sqlalchemy.schema.Column>`.
"""
if column.nullable:
kwargs['allow_none'] = True
kwargs['required'] = not column.nullable and not _has_default(column)
if hasattr(column.type, 'enums'):
kwargs['validate'].append(
validate.OneOf(choices=column.type.enums))
# Add a length validator if a max length is set on the column
# Skip UUID columns
if hasattr(column.type, 'length'):
try:
python_type = column.type.python_type
except (AttributeError, NotImplementedError):
python_type = None
if not python_type or not issubclass(python_type, uuid.UUID):
kwargs['validate'].append(
validate.Length(max=column.type.length))
if hasattr(column.type, 'scale'):
kwargs['places'] = getattr(column.type, 'scale', None)
def _add_relationship_kwargs(self, kwargs, prop):
"""Add keyword arguments to kwargs (in-place) based on the passed in
relationship `Property`.
"""
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
if prop.uselist is True:
nullable = False
break
kwargs.update({
'allow_none': nullable,
'required': not nullable,
})
def get_base_kwargs(self):
return {
'validate': []
}
default_converter = ModelConverter()
fields_for_model = default_converter.fields_for_model
"""Generate a dict of field_name: `marshmallow.fields.Field` pairs for the
given model.
:param model: The SQLAlchemy model
:param bool include_fk: Whether to include foreign key fields in the output.
:return: dict of field_name: Field instance pairs
"""
property2field = default_converter.property2field
"""Convert a SQLAlchemy `Property` to a field instance or class.
:param Property prop: SQLAlchemy Property.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:param kwargs: Additional keyword arguments to pass to the field constructor.
:return: A `marshmallow.fields.Field` class or instance.
"""
column2field = default_converter.column2field
"""Convert a SQLAlchemy `Column <sqlalchemy.schema.Column>` to a field instance or class.
:param sqlalchemy.schema.Column column: SQLAlchemy Column.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:return: A `marshmallow.fields.Field` class or instance.
"""
field_for = default_converter.field_for
"""Convert a property for a mapped SQLAlchemy class to a marshmallow `Field`.
Example: ::
date_created = field_for(Author, 'date_created', dump_only=True)
author = field_for(Book, 'author')
:param type model: A SQLAlchemy mapped class.
:param str property_name: The name of the property to convert.
:param kwargs: Extra keyword arguments to pass to `property2field`
:return: A `marshmallow.fields.Field` class or instance.
"""
| mit | -2,935,554,774,744,383,000 | 33.857143 | 100 | 0.613388 | false |
cloudify-cosmo/cloudify-system-tests | cosmo_tester/test_suites/cluster/compact_cluster_test.py | 1 | 4151 | import copy
import random
import string
import pytest
from cosmo_tester.test_suites.cluster.conftest import run_cluster_bootstrap
from cosmo_tester.framework.examples import get_example_deployment
from .cluster_status_shared import (
_assert_cluster_status,
_verify_status_when_postgres_inactive,
_verify_status_when_rabbit_inactive,
_verify_status_when_syncthing_inactive,
)
@pytest.mark.three_vms
def test_three_nodes_cluster_status(three_nodes_cluster, logger):
node1, node2, node3 = three_nodes_cluster
_assert_cluster_status(node1.client)
_verify_status_when_syncthing_inactive(node1, node2, logger)
_verify_status_when_postgres_inactive(node1, node2, logger, node3.client)
_verify_status_when_rabbit_inactive(node1, node2, node3, logger,
node1.client)
@pytest.mark.three_vms
def test_three_nodes_cluster_teardown(three_nodes_cluster, ssh_key,
test_config, module_tmpdir, logger):
"""Tests a cluster teardown"""
node1, node2, node3 = three_nodes_cluster
nodes_list = [node1, node2, node3]
logger.info('Asserting cluster status')
_assert_cluster_status(node1.client)
logger.info('Installing example deployment')
example = get_example_deployment(node1, ssh_key, logger,
'cluster_teardown', test_config)
example.inputs['server_ip'] = node1.ip_address
example.upload_and_verify_install()
logger.info('Removing example deployment')
example.uninstall()
logger.info('Removing cluster')
for node in nodes_list:
for config_name in ['manager', 'rabbit', 'db']:
node.run_command('cfy_manager remove -v -c /etc/cloudify/'
'{0}_config.yaml'.format(config_name))
credentials = _get_new_credentials()
logger.info('New credentials: %s', credentials)
for node in nodes_list:
node.install_config = copy.deepcopy(node.basic_install_config)
logger.info('Installing Cloudify cluster again')
run_cluster_bootstrap(nodes_list, nodes_list, nodes_list,
skip_bootstrap_list=[], pre_cluster_rabbit=True,
high_security=True, use_hostnames=False,
tempdir=module_tmpdir, test_config=test_config,
logger=logger, revert_install_config=True,
credentials=credentials)
logger.info('Asserting cluster status')
_assert_cluster_status(node1.client)
def _get_new_credentials():
monitoring_creds = {
'username': _random_credential_generator(),
'password': _random_credential_generator()
}
postgresql_password = _random_credential_generator()
return {
'manager': { # We're not changing the username and password
'monitoring': monitoring_creds
},
'postgresql_server': {
'postgres_password': postgresql_password,
'cluster': {
'etcd': {
'cluster_token': _random_credential_generator(),
'root_password': _random_credential_generator(),
'patroni_password': _random_credential_generator()
},
'patroni': {
'rest_password': _random_credential_generator()
},
'postgres': {
'replicator_password': _random_credential_generator()
}
}
},
'postgresql_client': {
'monitoring': monitoring_creds,
'server_password': postgresql_password
},
'rabbitmq': {
'username': _random_credential_generator(),
'password': _random_credential_generator(),
'erlang_cookie': _random_credential_generator(),
'monitoring': monitoring_creds
},
'prometheus': {
'credentials': monitoring_creds
}
}
def _random_credential_generator():
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for _ in range(40))
| apache-2.0 | -934,606,930,455,898,400 | 35.734513 | 77 | 0.600819 | false |
kkanellis/uthportal-server | uthportal/tasks/base.py | 1 | 11926 |
import sys
from abc import ABCMeta, abstractmethod
from datetime import datetime
import feedparser
import requests
from requests.exceptions import ConnectionError, Timeout
from uthportal.database.mongo import MongoDatabaseManager
from uthportal.logger import get_logger
from uthportal.util import truncate_str
class BaseTask(object):
__metaclass__ = ABCMeta
def __init__(self, path, settings, database_manager, pushd_client, **kwargs):
self.settings = settings
self.path = path
self.id = path.split('.')[-1]
self.logger = get_logger(self.id, self.settings)
self.timeout = self.settings['network']['timeout']
self.database_manager = database_manager
self.pushd_client = pushd_client
self.db_collection = '.'.join( path.split('.')[:-1] )
self.db_query = { }
for (key, value) in self.db_query_format.iteritems():
if not hasattr(self, value):
self.logger.error('Missing "%s" field defined in db_query_format' % value)
sys.exit(1)
self.db_query[key] = getattr(self, value)
# Load and update database document (if necessary)
self.document = self.load()
if not self.document:
if hasattr(self, 'document_prototype'):
self.logger.info('No document found in database. Using prototype')
self.document = self.document_prototype
self.save()
else:
self.logger.error('No document_prototype is available!')
return
def __call__(self):
"""This is the method called from the Scheduler when this object is
next in queue (and about to be executed) """
if not hasattr(self, 'document') or not self.document:
self.logger.error('Task has no document attribute or document is empty. Task stalled!')
else:
self.load()
self.update()
def fetch(self, link, session=None, *args, **kwargs):
"""
Fetch a remote document to be parsed later.
This function is called as is from subclasses
"""
if not session:
session = requests.Session()
self.logger.debug('Fetching "%s" ...' % link)
try:
page = session.get(link, *args, timeout=self.timeout, **kwargs)
except ConnectionError:
self.logger.warning('%s: Connection error' % link)
return None
except Timeout:
self.logger.warning('%s: Timeout [%d]' % (link, self.timeout))
return None
if page.status_code is not (200 or 301):
self.logger.warning('%s: Returned [%d]' % (link, page.status_code))
return None
self.logger.debug('Fetched successfully! [%d]' % page.status_code)
# Change page encoding to utf-8 so no special handling for encoding is needed
page.encoding = 'utf-8'
return page.text
@abstractmethod
def update(self, *args, **kwargs):
"""This function is called from __call__. Takes as a key-word argument (kwargs) a dictionary called
new_fields where new data are stored after fecthing procedures. These are compared with the
current data (stored in self.document)"""
# Check if 'new_fields' arg is present
if 'new_fields' in kwargs:
new_fields = kwargs['new_fields']
else:
self.logger.warning('Update method called without "new_fields" dict')
return
# Check if 'new_fields' has the neccessary fields
for field in self.update_fields:
if field not in new_fields:
self.logger.error('Field "%s" not present in "new_fields" dict. Stalling task!' % field)
return
# Get self.document's update_fields
old_fields = { field: self._get_document_field(self.document, field)
for field in self.update_fields }
# Check if new data is available
(data_differ, should_notify) = self.process_new_data(new_fields, old_fields)
now = datetime.now()
if data_differ:
self.logger.debug('Archiving old document...')
self.archive()
# Update new fields
self.logger.debug('Updating new fields...')
for field in self.update_fields:
self._set_document_field(self.document, field, new_fields[field])
# Update remaining fields
self._set_document_field(self.document, 'first_updated', now)
self._set_document_field(self.document, 'last_updated', now)
self.logger.debug('Transmitting new document...')
self.transmit()
if should_notify:
self.notify()
else:
self.logger.debug('No new entries found')
self._set_document_field(self.document, 'last_updated', now)
self.save()
self.logger.debug('Task updated successfully!')
self.post_process()
def process_new_data(self, new_fields, old_fields):
"""
Returns tuple (data_differ[bool], should_notify[bool])
data_differ: True if we have differences between new and old data
should_notify: True if we have to send push notification to the client
"""
data_differ = should_notify = False
# Checking for differences in the according update_fields
for field in self.update_fields:
(old_data, new_data) = (old_fields[field], new_fields[field])
if old_data:
if new_data:
if type(old_data) == type(new_data):
if isinstance(new_data, list):
last_updated = self._get_document_field(self.document, 'last_updated')
# Check if new list entries are present in new_data since last update
new_entries = [ entry for entry in new_data if entry not in old_data ]
if new_entries:
differ = True
notify = False
for entry in new_entries:
assert ('date' in entry and 'has_time' in entry)
# Check if entry was published after last update date
# NOTE: Avoid comparing time because of small time
# changes which may occur in production and
# not properly notify the clients
if entry['date'].date() >= last_updated.date():
notify = True
break
else:
differ = notify = False
else:
differ = notify = True if old_data != new_data else False
else:
self.logger.warning(
'Different type (%s - %s) for the same update field [%s]'
% (type(old_data), type(new_data), field)
)
differ = notify = True
else:
# We shouldn't notify the user because it may be server error:
# e.g problematic parser or invalid link
differ = True
notify = False
else:
# Data differ only if new_data exist
differ = True if new_data else False
# We notify the user if and only if:
# a) data differ and
# b) task is NOT run for the first time
notify = True if differ and 'first_updated' in self.document else False
if differ:
self.logger.info(
truncate_str( 'New entries in field "%s"' % field, 150 )
)
data_differ = data_differ or differ
should_notify = should_notify or notify
return (data_differ, should_notify)
def notify(self):
self.logger.debug('Notifing clients...')
event_name = self.path
data = {
'event': event_name
}
var = { }
if hasattr(self, 'notify_fields'):
var = {
field: self._get_document_field(self.document, field)
for field in self.notify_fields
}
if not all(var.values()):
self.logger.warning('notify: some var values are None')
success = False
try:
success = self.pushd_client.events[event_name].send(data, var=var)
except KeyError:
self.logger.error('No valid event template exists. Notification NOT sent!')
except ValueError:
self.logger.error('Event name is empty/None. Notification NOT sent!')
if success:
self.logger.info('Notification send!')
else:
self.logger.error('Notification NOT send! Check notifier logs')
def post_process(self):
pass
""" Database related method """
def save(self, *args, **kwargs):
"""Save result dictionary in database"""
if not self.database_manager.update_document(
self.db_collection,
self.db_query,
self.document.copy(),
upsert=True,
*args,
**kwargs):
self.logger.warning('Could not save document "%s"' % self.path)
def archive(self, *args, **kwargs):
""" Save the current document into the history collection """
if not self.database_manager.insert_document(
'history.%s' % self.db_collection,
self.document.copy(),
*args,
**kwargs):
self.logger.warning('Could not archive document "%s"' % self.path)
def transmit(self, *args, **kwargs):
""" Save the current document into the server collection free of uneccessary fields """
#TODO: Implement ignore_fields
if not self.database_manager.update_document(
'server.%s' % self.db_collection,
self.db_query,
self.document,
*args, upsert=True, **kwargs):
self.logger.warning('Could not transmit document "%s"' %self.path)
pass
def load(self, *args, **kwargs):
"""Load old dictionary from database"""
document = self.database_manager.find_document(
self.db_collection,
self.db_query,
*args,
**kwargs)
if document and '_id' in document:
del document['_id']
return document
""" Helper methods """
def _set_document_field(self, document, field, value):
""" Sets the field (dot notation format) in the provided document """
keys = field.split('.')
for key in keys[:-1]:
if key not in document:
self.logger.warning('Key "%s" not found in field "%s"' % (key, field))
return
document = document[key]
# Set the field
document[keys[-1]] = value
def _get_document_field(self, document, field):
""" Gets the field (dot notation format) in the provided document """
keys = field.split('.')
for key in keys[:-1]:
if key not in document:
self.logger.warning('Key "%s" not found in field "%s"' % (key, field))
return
document = document[key]
if keys[-1] in document:
return document[keys[-1]]
else:
return None
| gpl-3.0 | -1,328,568,271,007,929,600 | 35.470948 | 107 | 0.536643 | false |
crccheck/project_runpy | project_runpy/heidi.py | 1 | 4119 | """
Heidi: Helpers related to visuals.
"""
import logging
__all__ = ['ColorizingStreamHandler', 'ReadableSqlFilter']
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
# https://gist.github.com/758430
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except: # noqa: E722
self.handleError(record)
def output_colorized(self, message):
self.stream.write(message)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
# LOGGING FILTERS
#################
class ReadableSqlFilter(logging.Filter):
"""
A filter for more readable sql by stripping out the SELECT ... columns.
Modeled after how debug toolbar displays SQL. This code should be optimized
for performance. For example, I don't check to make sure record.name is
'django.db.backends' because I assume you put this filter alongside it.
Sample Usage in Django's `settings.py`:
LOGGING = {
...
'filters': {
'readable_sql': {
'()': 'project_runpy.ReadableSqlFilter',
},
},
'loggers': {
'django.db.backends': {
'filters': ['readable_sql'],
...
},
...
},
}
"""
def filter(self, record):
# https://github.com/django/django/blob/febe136d4c3310ec8901abecca3ea5ba2be3952c/django/db/backends/utils.py#L106-L131
duration, sql, *__ = record.args
if not sql or 'SELECT' not in sql[:28]:
# WISHLIST what's the most performant way to see if 'SELECT' was
# used?
return super().filter(record)
begin = sql.index('SELECT')
try:
end = sql.index('FROM', begin + 6)
except ValueError: # not all SELECT statements also have a FROM
return super().filter(record)
sql = '{0}...{1}'.format(sql[:begin + 6], sql[end:])
# Drop "; args=%s" to shorten logging output
record.msg = '(%.3f) %s'
record.args = (duration, sql)
return super().filter(record)
| apache-2.0 | -28,892,168,273,902,668 | 30.442748 | 126 | 0.531925 | false |
kimlaborg/NGSKit | ngskit/utils/__pycache__/fasta_tools.py | 1 | 2263 | """Fasta Tools
"""
def write_fasta_sequence(sequence_data, file_output, write_mode='a'):
"""Add sequences to a file, in Fasta Format.
Parameters
----------
sequence_data : str
Sequence to add to the fasta file. if only the sequence is provided,
assume the header is not relevant and a random will be created, sequence base
to avoid collisions
sequence_data : array_like
sequence_data[0] == Header or id of the sequences, if do not contain > ,
it will be added.
sequence_data[1] == Sequence
file_output: str, obj
This function can recive both a file_handler or file name. In the former
scenario it will create a file_handler, and in both cases it will let
it open, to improve I/O.
Returns
-------
file_handle : obj
returns the file handler.
Raises
------
ValueError
Sequence_data should contain two items: header, Sequece
Examples
--------
>>> write_fasta_sequence('ATGATGATGA','my_file.fasta')
>>> write_fasta_sequence('ATGATGATGA',open('my_file.fasta', 'a'))
>>> write_fasta_sequence(['SEQ_1', 'ATGATGATGA'],'my_file.fasta')
"""
# Check the input sequence
if isinstance(sequence_data, str):
# create a Header using 100 first sequence caracters.
header = sequence_data.strip('\n').strip()[:100]
sequence_data = [header,
sequence_data.strip('\n').strip()]
if not len(sequence_data)>=2:
raise ValueError("Sequence data must contain at least header and sequence")
# check if a file handelr has been provided
if isinstance(file_output, str):
file_handle = open(file_output, write_mode)
else:
file_handle = file_output
# write the sequence
file_handle.write(">{0}\n{1}\n".format(*sequence_data))
return file_handle
def to_fasta(grp_seq, output, header=False):
"""Transform a batch of sequnces to a fasta format file.
Parameters
----------
grp_seq : array_like
Iterable object with sequneces
""""
if header == False:
for sequence in grp_seq:
output = write_fasta_sequence(seq, output, write_mode='a')
output.close()
| mit | 7,169,794,591,417,631,000 | 26.26506 | 83 | 0.613787 | false |
Shpilevskiy/mobile_family_budget | backend/django_service/mobile_family_budget/purchaseManager/migrations/0001_initial.py | 1 | 1567 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-21 21:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('count', models.PositiveIntegerField(default=1)),
('price', models.FloatField(default=0)),
('current_count', models.PositiveIntegerField(default=0)),
('status', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='PurchaseList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Мой список покупок', max_length=30)),
('budget_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.BudgetGroup')),
],
),
migrations.AddField(
model_name='purchase',
name='purchase_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='purchaseManager.PurchaseList'),
),
]
| gpl-3.0 | 7,832,930,612,681,314,000 | 35.928571 | 123 | 0.583495 | false |
intelaligent/tctb | tctb/classes/agent.py | 1 | 4353 | #!/usr/bin/env python2
"""
@file connection.py
@author Bo Gao
@date 2017-07-25
@version alpha
Intersection control agent
Copyright (C) 2017 Transport Research Group, University of Southampton
Intersection Control Test Bed
"""
# if "SUMO_HOME" in os.environ:
# tools = os.path.join(os.environ["SUMO_HOME"], "tools")
# sys.path.append(tools)
# from sumolib import checkBinary # sumo, sumo-gui
# else:
# sys.exit("please declare environment variable 'SUMO_HOME'")
# import traci
import os
import sys
from subprocess import call
class Agent(object):
# _name = ""
def init():
raise NotImplementedError( "Method init not implemented." )
class Agent_Tools:
"""
doc: http://www.sumo.dlr.de/userdoc/DUAROUTER.html
duarouter
-n data/map.sumo.net.xml
-t demands/odTrips.demand.xml
-d add.vTypes.xml
-o demands/odTrips.rou.xml
"""
def trip_to_route(self, scn):
_binary_name = "duarouter"
if "SUMO_HOME" in os.environ:
tools = os.path.join(os.environ["SUMO_HOME"], "tools")
sys.path.append(tools)
from sumolib import checkBinary # sumo, sumo-gui
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
output_route_file = os.path.join(scn.get("dir"), "duarouter_out.rou.xml")
command = []
command.append(checkBinary(_binary_name))
command.append("--net-file")
command.append(scn.get("net_file"))
# command.append("--trip-files")
# command.append(scn.get("demand_file"))
command.append("--additional-files")
command.append(scn.get("add_files"))
command.append("--output-file")
command.append(output_route_file)
print("".join(elt + " " for elt in command))
call(command)
return output_route_file
class Agent_Sumo_Coordinator(Agent):
"""
doc: http://www.sumo.dlr.de/userdoc/Tools/tls.html#tlsCoordinator.py
tlsCoordinator.py
-n data/map.sumo.net.xml
-r demands/randomTrips.rou.xml
-o tls/tls.coordinated.xml
"""
_script_name = "tlsCoordinator.py"
def init(self, scn) :
command = []
tls_offset_file = os.path.join(scn.get("dir"), "tls.offset.sumo_coordinator.xml" )
if "SUMO_HOME" in os.environ:
command.append(os.path.join(os.environ["SUMO_HOME"], "tools", self._script_name))
else:
sys.exit("Agent_Sumo_Coordinator requires environment variable 'SUMO_HOME'")
command.append("--net-file")
command.append(scn.get("net_file"))
command.append("--route-file")
command.append(Agent_Tools().trip_to_route(scn))
command.append("--output-file")
command.append(tls_offset_file)
print("".join(elt + " " for elt in command))
call(command)
scn.add_additional_file(tls_offset_file)
class Agent_Sumo_Cycle_Adaptation(Agent):
"""
doc: http://www.sumo.dlr.de/userdoc/Tools/tls.html#tlsCycleAdaptation.py
tlsCycleAdaptation.py
-n data/map.sumo.net.xml
-r demands/odTrips.rou.xml
-o tls/tls.ca.od.xml
"""
_script_name = "tlsCycleAdaptation.py"
def init(self, scn) :
command = []
tls_new_program_file = os.path.join(scn.get("dir"), "tls.offset.sumo_cycle_adaptation.xml" )
if "SUMO_HOME" in os.environ:
command.append(os.path.join(os.environ["SUMO_HOME"], "tools", self._script_name))
else:
sys.exit("Agent_Sumo_Coordinator requires environment variable 'SUMO_HOME'")
command.append("--net-file")
command.append(scn.get("net_file"))
command.append("--route-file")
command.append(Agent_Tools().trip_to_route(scn))
command.append("--output-file")
command.append(tls_new_program_file)
print("".join(elt + " " for elt in command))
call(command)
scn.add_additional_file(tls_new_program_file)
class AgentManager:
def initialise_agent_for_scenario(self, scn):
return {
"tls_sumo_coordinator" : lambda scn : Agent_Sumo_Coordinator().init(scn),
"tls_sumo_cycle_adaptation" : lambda scn : Agent_Sumo_Cycle_Adaptation().init(scn)
}[scn.get("agent_type")](scn)
| gpl-3.0 | -1,551,814,368,528,722,000 | 28.02 | 100 | 0.612221 | false |
grnet/synnefo | docs/conf.py | 1 | 1973 | import sys, os
sys.path.insert(0, os.path.abspath('../snf-cyclades-app'))
import synnefo
reload(synnefo)
import synnefo.versions
reload(synnefo.versions)
from synnefo.versions.app import __version__
project = u'synnefo'
copyright = u'2010-2017, GRNET S.A.'
version = __version__
release = __version__
html_title = 'synnefo ' + version
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_theme_options = {
'sidebarwidth': '300',
'collapsiblesidebar': 'true',
'footerbgcolor': '#55b577',
'footertextcolor': '#000000',
'sidebarbgcolor': '#ffffff',
'sidebarbtncolor': '#f2f2f2',
'sidebartextcolor': '#000000',
'sidebarlinkcolor': '#328e4a',
'relbarbgcolor': '#55b577',
'relbartextcolor': '#ffffff',
'relbarlinkcolor': '#ffffff',
'bgcolor': '#ffffff',
'textcolor': '#000000',
'headbgcolor': '#ffffff',
'headtextcolor': '#000000',
'headlinkcolor': '#c60f0f',
'linkcolor': '#328e4a',
'visitedlinkcolor': '#63409b',
'codebgcolor': '#eeffcc',
'codetextcolor': '#333333'
}
htmlhelp_basename = 'synnefodoc'
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'django': ('https://docs.djangoproject.com/en/dev/',
'https://docs.djangoproject.com/en/dev/_objects/')
}
SYNNEFO_PROJECTS = ['synnefo', 'archipelago', 'kamaki', 'snf-image',
'snf-image-creator', 'nfdhcpd', 'snf-vncauthproxy',
'snf-network']
SYNNEFO_DOCS_BASEURL = 'https://www.synnefo.org/docs/%s/latest/objects.inv'
for project in SYNNEFO_PROJECTS:
project_url = SYNNEFO_DOCS_BASEURL % project
intersphinx_mapping[project.replace('-', '')] = (os.path.dirname(project_url), project_url)
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
| gpl-3.0 | -5,990,170,465,250,381,000 | 28.893939 | 95 | 0.631019 | false |
FogCreek/solari-board | example_realtime/liveFogbugz.py | 1 | 1937 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import sys, os
import cgi
import time
def cgi_callback():
params = cgi.parse_qs(os.environ['QUERY_STRING'])
# lastts is the last modified date that this browser has already loaded, 0 means this is an initial request
lastts = 0
if params.has_key('ts'):
lastts = int(params['ts'][0])
# keep count of the number of times waiting so this takes no longer than 30 sec to respond
attempt = 0
ts = ''
while ts == '':
attempt += 1
try:
stats = os.stat('fogbugz.json')
if (attempt > 56 or int(stats.st_mtime) > lastts):
# the file either has new data, or we've been waiting long enough, exit the loop
ts = int(stats.st_mtime)
else:
# the file has no new data, wait a half a second and try again
time.sleep(0.5)
except:
break
if ts == "":
# a file was not found, return invalid JSON to raise an error in the UI
json = 'Show fail whale because refreshFogBugz.py has never been called'
ts = 0
else:
f = open('fogbugz.json')
json = f.read()
f.close()
if json == '':
json = '[]'
print('Content-Type: application/javascript\n')
# remember this last modified ts, so future requests can tell if there's new data
print('URL_SUFFIX = "&ts=%s";' % (ts))
# if responding immediately then kick off another read
if attempt == 1 and not params.has_key('ts'):
print('setTimeout(updateSolariBoard, 1000);')
# support a callback param, or default to "void"
callback = 'void'
if params.has_key('callback'):
callback = params['callback'][0]
# send the json to jQuery's callback
print('%s(%s);' % (callback,json))
if __name__ == '__main__':
cgi_callback()
| mit | -8,826,270,226,747,974,000 | 29.746032 | 111 | 0.579763 | false |
ucloud/uai-sdk | examples/tensorflow-2.0/imagenet/train/code/imagenet.py | 1 | 3432 | import os
import vgg_preprocessing
import tensorflow as tf
_DEFAULT_IMAGE_SIZE = 224
_NUM_CHANNELS = 3
_LABEL_CLASSES = 1001
_FILE_SHUFFLE_BUFFER = 1024
_SHUFFLE_BUFFER = 1500
class ImagenetDataSet(object):
"""Imagenet data set
"""
def __init__(self, data_dir, subset='train', use_distortion=True):
self.data_dir = data_dir
self.subset = subset
self.use_distortion = use_distortion
def filenames(self, is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'train-%05d-of-01024' % i)
for i in range(1024)]
else:
return [
os.path.join(data_dir, 'validation-%05d-of-00128' % i)
for i in range(128)]
def parser(self, value, is_training):
"""Parse an ImageNet record from `value`."""
keys_to_features = {
'image/encoded':
tf.compat.v1.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.compat.v1.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.compat.v1.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.compat.v1.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.compat.v1.VarLenFeature(dtype=tf.int64),
}
parsed = tf.compat.v1.parse_single_example(value, keys_to_features)
image = tf.image.decode_image(
tf.reshape(parsed['image/encoded'], shape=[]),
_NUM_CHANNELS)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=_DEFAULT_IMAGE_SIZE,
output_width=_DEFAULT_IMAGE_SIZE,
is_training=is_training)
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]),
dtype=tf.int32)
return image, label #tf.one_hot(label, _LABEL_CLASSES)
def make_dataset(self, batch_size, is_training, num_shards, num_epochs=1):
data_dir = self.data_dir
shards_batch_size = int(batch_size / num_shards)
"""Input function which provides batches for train or eval."""
dataset = tf.data.Dataset.from_tensor_slices(self.filenames(is_training, data_dir))
if is_training:
dataset = dataset.shuffle(buffer_size=(_FILE_SHUFFLE_BUFFER * num_shards))
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.map(lambda value: self.parser(value, is_training),
num_parallel_calls=5)
dataset = dataset.prefetch(batch_size * 2)
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(shards_batch_size)
return dataset | apache-2.0 | -8,043,587,197,140,686,000 | 33.676768 | 87 | 0.644814 | false |
OpenTwinCities/site_bot | app/Meetup/RSS.py | 1 | 1767 | # -*- coding: utf8 -*-
import feedparser
from bs4 import BeautifulSoup
from datetime import datetime
from time import mktime
class MeetupRSS:
MEETUP_DOMAIN = 'www.meetup.com'
def __init__(self, group_id):
self.group_id = group_id
self.__rss__ = None
self.__events__ = None
@property
def rss_url(self):
return 'https://%s/%s/events/rss' % (self.MEETUP_DOMAIN, self.group_id)
def fetch_rss(self):
"""Use feedparser to get entries from the RSS feed"""
return feedparser.parse(self.rss_url)
def update_entries(self):
"""Fetch entries from RSS feed and store them"""
self.__rss__ = self.fetch_rss()
self.__events__ = self.__rss__.entries
def parse_event(self, e):
"""Helper function to convert RSS event data to
expected data for MeetupEvent"""
event = {}
event['title'] = e.title
event['id'] = e.guid.rsplit('/')[-2] if e.guid.endswith('/') else e.guid.rsplit('/', 1)[-1]
# published_parsed has the date in struct_time
# Convert to datetime for better output
event['time'] = datetime.fromtimestamp(mktime(e.published_parsed))
# Find a better way to parse this specific element
html = BeautifulSoup(e.summary, 'html.parser')
event['excerpt'] = None
for tag in html.find_all('p'):
for p in tag.find_all('p'):
event['excerpt'] = str(p)
break
event['venue_name'] = None
event['venue_location'] = None
return event
@property
def events(self):
"""Stored entries from the RSS feed"""
if self.__events__ is None:
self.update_entries()
return self.__events__
| mit | -47,412,677,332,010,296 | 28.949153 | 99 | 0.578947 | false |
cernbox/smashbox | lib/test_slowwrite.py | 1 | 2941 | import os
import time
import tempfile
__doc__ = """
Synchronize local folder while writing into the file.
This is a testcase for:
https://github.com/owncloud/mirall/issues/2210 (corrupted file upload if file modified during transfer)
owncloudcmd will delay syncing of the file if the file is modified every 2 seconds or less (slowWrite < 2)
"""
from smashbox.utilities import *
from smashbox.utilities.hash_files import *
do_not_report_as_failure()
MB = 1024*1000
filesizeKB = int(config.get('slowwrite_filesizeKB',10000))
blockSize = int(config.get('slowwrite_blockSize',MB))
slowWrite = int(config.get('slowwrite_slowWrite',1))
nfiles=1
testsets = [
{ 'slowwrite_filesizeKB': 2,
'slowwrite_blockSize': 200,
'slowwrite_slowWrite':1.5
},
{ 'slowwrite_filesizeKB': 5000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
},
{ 'slowwrite_filesizeKB': 11000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
},
{ 'slowwrite_filesizeKB': 25000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
},
{ 'slowwrite_filesizeKB': 50000,
'slowwrite_blockSize': MB,
'slowwrite_slowWrite':1
}
]
@add_worker
def writer(step):
ver=ocsync_version()
# sync client version 2.x.x and below were syncing indefinitely in case of local errors, so eventually the files got synced
# for newer clients, the default number of sync retries is 3 which is not enough to get the file synced if the writes are really slow
# so for newer client we set the --max-sync-retries explicitly to a higher number (this is a new option)
# ref: https://github.com/owncloud/client/issues/4586
if ver[0] >= 2:
config.oc_sync_cmd += " --max-sync-retries 20"
# do not cleanup server files from previous run
reset_owncloud_account()
# cleanup all local files for the test
reset_rundir()
step(1,'Preparation')
d = make_workdir('writer') # bother writer and synchronizer share the same workdir
run_ocsync(d)
k0 = count_files(d)
step(2,'Add %s files and check if we still have k1+nfiles after resync'%nfiles)
create_hashfile(d,size=filesizeKB*1000,bs=blockSize,slow_write=slowWrite) #config.hashfile_size)
@add_worker
def synchronizer(step):
step(2,'Sync the file as it is being written by writer')
sleep(slowWrite*2)
d = make_workdir('writer') # bother writer and synchronizer share the same workdir
run_ocsync(d)
@add_worker
def checker(step):
step(1,'Preparation')
d = make_workdir()
run_ocsync(d)
k0 = count_files(d)
step(3,'Resync and check files added by synchronizer')
run_ocsync(d)
analyse_hashfiles(d)
k1 = count_files(d)
error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0))
| agpl-3.0 | -6,707,239,179,517,909,000 | 24.353448 | 137 | 0.658279 | false |
MIT-LCP/false-alarm-reduction | pyfar/utils.py | 1 | 5165 | from __future__ import print_function
import wfdb
import json
def abs_value(x, y):
return abs(x-y)
def is_true_alarm_fields(fields):
return fields['comments'][1] == 'True alarm'
def is_true_alarm(data_path, sample_name):
sig, fields = wfdb.srdsamp(data_path + sample_name)
return is_true_alarm_fields(fields)
# start and end in seconds
def get_annotation(sample, ann_type, ann_fs, start, end):
try:
annotation = wfdb.rdann(sample, ann_type, sampfrom=start*ann_fs, sampto=end*ann_fs)
except Exception as e:
annotation = []
print(e)
return annotation
## Returns type of arrhythmia alarm
# output types include: 'a', 'b', 't', 'v', 'f'
def get_arrhythmia_type(fields):
"""Returns type of arrhythmia based on fields of the sample
Arguments
---------
fields: fields of sample read from wfdb.rdsamp
Returns
-------
Type of arrhythmia
'a': asystole
'b': bradycardia
't': tachycardia
'f': ventricular fibrillation
'v': ventricular tachycardia
"""
arrhythmias = {
'Asystole': 'a',
'Bradycardia': 'b',
'Tachycardia': 't',
'Ventricular_Tachycardia': 'v',
'Ventricular_Flutter_Fib': 'f'
}
arrhythmia_type = fields['comments'][0]
return arrhythmias[arrhythmia_type]
def get_channel_type(channel_name, sigtypes_filename):
"""Returns type of channel
Arguments
---------
channel_name: name of channel (e.g. "II", "V", etc.)
sigtypes_filename: file mapping channel names to channel
types
Returns
-------
Type of channel (e.g. "ECG", "BP", "PLETH", "Resp")
"""
channel_types_dict = {}
with open(sigtypes_filename, "r") as f:
for line in f:
splitted_line = line.split("\t")
channel = splitted_line[-1].rstrip()
channel_type = splitted_line[0]
channel_types_dict[channel] = channel_type
if channel_name in channel_types_dict.keys():
return channel_types_dict[channel_name]
raise Exception("Unknown channel name")
def get_samples_of_type(samples_dict, arrhythmia_type):
"""Returns a sub-dictionary of only the given arrhythmia type
Arguments
---------
samples_dict: dictionary mapping sample names to data associated
with the given sample
arrhythmia_type:
'a': asystole
'b': bradycardia
't': tachycardia
'f': ventricular fibrillation
'v': ventricular tachycardia
Returns
-------
a sub-dictionary with keys of only the given arrhythmia
"""
subdict = {}
for sample_name in samples_dict.keys():
if sample_name[0] == arrhythmia_type:
subdict[sample_name] = samples_dict[sample_name]
return subdict
def write_json(dictionary, filename):
with open(filename, "w") as f:
json.dump(dictionary, f)
def read_json(filename):
with open(filename, "r") as f:
dictionary = json.load(f)
return dictionary
def get_classification_accuracy(matrix):
num_correct = len(matrix["TP"]) + len(matrix["TN"])
num_total = len(matrix["FP"]) + len(matrix["FN"]) + num_correct
return float(num_correct) / num_total
def calc_sensitivity(counts):
tp = counts["TP"]
fn = counts["FN"]
return tp / float(tp + fn)
def calc_specificity(counts):
tn = counts["TN"]
fp = counts["FP"]
return tn / float(tn + fp)
def calc_ppv(counts):
tp = counts["TP"]
fp = counts["FP"]
return tp / float(tp + fp)
def calc_npv(counts):
tn = counts["TN"]
fn = counts["FN"]
return tn / float(tn + fn)
def calc_f1(counts):
sensitivity = calc_sensitivity(counts)
ppv = calc_ppv(counts)
return 2 * sensitivity * ppv / float(sensitivity + ppv)
def print_stats(counts):
try:
sensitivity = calc_sensitivity(counts)
specificity = calc_specificity(counts)
ppv = calc_ppv(counts)
npv = calc_npv(counts)
f1 = calc_f1(counts)
except Exception as e:
print(e)
print("counts: ", counts)
print("sensitivity: ", sensitivity)
print("specificity: ", specificity)
print("ppv: ", ppv)
print("npv: ", npv)
print("f1: ", f1)
def get_matrix_classification(actual, predicted):
if actual and predicted:
return "TP"
elif actual and not predicted:
return "FN"
elif not actual and predicted:
return "FP"
return "TN"
def get_score(matrix):
numerator = len(matrix["TP"]) + len(matrix["TN"])
denominator = len(matrix["FP"]) + 5*len(matrix["FN"]) + numerator
return float(numerator) / denominator
def get_by_arrhythmia(confusion_matrix, arrhythmia_prefix):
counts_by_arrhythmia = {}
matrix_by_arrhythmia = {}
for classification_type in confusion_matrix.keys():
sample_list = [ sample for sample in confusion_matrix[classification_type] if sample[0] == arrhythmia_prefix]
counts_by_arrhythmia[classification_type] = len(sample_list)
matrix_by_arrhythmia[classification_type] = sample_list
return counts_by_arrhythmia, matrix_by_arrhythmia
| mit | -8,308,017,992,821,407,000 | 23.712919 | 117 | 0.619942 | false |
max4260/HydroP | python/script.py | 1 | 1197 | import webiopi
import datetime
import sqlite3
import subprocess
import sys
GPIO = webiopi.GPIO
AUTO = 1
SCRIPT_PATH = "/home/pi/HydroP/python/"
LIGHT1 = 17 # GPIO pin using BCM numbering
dbconn = sqlite3.connect(SCRIPT_PATH + "hydro.db")
dbconn.row_factory = sqlite3.Row
dbcursor = dbconn.cursor()
# setup function is automatically called at WebIOPi startup
def setup():
# set the GPIO used by the light to output
GPIO.setFunction(LIGHT1, GPIO.OUT)
# retrieve current datetime
now = datetime.datetime.now()
dbcursor.execute("SELECT status, interval FROM devices WHERE name = \"LIGHT1\"")
lightDevice = dbcursor.fetchone()
if (lightDevice != None) and (lightDevice["status"] == AUTO):
setLightInterval(lightDevice['interval'])
# loop function is repeatedly called by WebIOPi
def loop():
# retrieve current datetime
now = datetime.datetime.now()
# gives CPU some time before looping again
webiopi.sleep(1)
# destroy function is called at WebIOPi shutdown
def destroy():
GPIO.digitalWrite(LIGHT1, GPIO.LOW)
@webiopi.macro
def setLightInterval(interval):
subprocess.Popen(["python",SCRIPT_PATH + "light_loop.py",str(interval)]) | gpl-2.0 | 1,823,126,588,863,554,300 | 25.622222 | 84 | 0.721805 | false |
jakirkham/bokeh | bokeh/io/state.py | 1 | 7853 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Encapsulate implicit state that is useful for Bokeh plotting APIs.
.. note::
While ``State`` objects can also be manipulated explicitly, they are
automatically configured when the functions :func:`~bokeh.io.output_file`,
etc. from :ref:`bokeh.io` are used, so this is not necessary under
typical usage.
Generating output for Bokeh plots requires coordinating several things:
:class:`~bokeh.document.Document`
Groups together Bokeh models that may be shared between plots (e.g.,
range or data source objects) into one common strucure.
:class:`~bokeh.resources.Resources`
Control how JavaScript and CSS for the client library BokehJS are
included and used in the generated output.
It is possible to handle the configuration of these things manually, and some
examples of doing this can be found in ``examples/models`` directory. When
developing sophisticated applications, it may be necessary or desirable to work
at this level. However, for general use this would quickly become burdensome.
This module provides a ``State`` class that encapsulates these objects and
ensures their proper configuration in many common usage scenarios.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
# External imports
from six import string_types
# Bokeh imports
from ..document import Document
from ..resources import Resources
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class State(object):
''' Manage state related to controlling Bokeh output.
'''
def __init__(self):
self.last_comms_handle = None
self.uuid_to_server = {} # Mapping from uuid to server instance
self.reset()
# Properties --------------------------------------------------------------
@property
def document(self):
''' A default :class:`~bokeh.document.Document` to use for all
output operations.
'''
return self._document
@document.setter
def document(self, doc):
self._document = doc
@property
def file(self):
''' A dict with the default configuration for file output (READ ONLY)
The dictionary value has the following form:
.. code-block:: python
{
'filename' : # filename to use when saving
'resources' : # resources configuration
'title' : # a title for the HTML document
}
'''
return self._file
@property
def notebook(self):
''' Whether to generate notebook output on show operations. (READ ONLY)
'''
return self._notebook
@property
def notebook_type(self):
''' Notebook type
'''
return self._notebook_type
@notebook_type.setter
def notebook_type(self, notebook_type):
''' Notebook type, acceptable values are 'jupyter' as well as any names
defined by external notebook hooks that have been installed.
'''
if notebook_type is None or not isinstance(notebook_type, string_types):
raise ValueError("Notebook type must be a string")
self._notebook_type = notebook_type.lower()
# Public methods ----------------------------------------------------------
def output_file(self, filename, title="Bokeh Plot", mode="cdn", root_dir=None):
''' Configure output to a standalone HTML file.
Calling ``output_file`` not clear the effects of any other calls to
``output_notebook``, etc. It adds an additional output destination
(publishing to HTML files). Any other active output modes continue
to be active.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`~bokeh.resources.Resources`
for more details.
root_dir (str, optional) : root dir to use for absolute resources
(default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or``CDN``.
.. warning::
The specified output file will be overwritten on every save, e.g.,
every time ``show()`` or ``save()`` is called.
'''
self._file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir),
'title' : title
}
if os.path.isfile(filename):
log.info("Session output file '%s' already exists, will be overwritten." % filename)
def output_notebook(self, notebook_type='jupyter'):
''' Generate output in notebook cells.
Calling ``output_notebook`` not clear the effects of any other calls
to ``output_file``, etc. It adds an additional output destination
(publishing to notebook output cells). Any other active output modes
continue to be active.
Returns:
None
'''
self._notebook = True
self.notebook_type = notebook_type
def reset(self):
''' Deactivate all currently active output modes and set ``curdoc()``
to a fresh empty ``Document``.
Subsequent calls to ``show()`` will not render until a new output mode
is activated.
Returns:
None
'''
self._reset_with_doc(Document())
# Private methods ---------------------------------------------------------
def _reset_keeping_doc(self):
''' Reset output modes but DO NOT replace the default Document
'''
self._file = None
self._notebook = False
self._notebook_type = None
def _reset_with_doc(self, doc):
''' Reset output modes but DO replace the default Document
'''
self._document = doc
self._reset_keeping_doc()
def curstate():
''' Return the current State object
Returns:
State : the current default State object
'''
global _STATE
if _STATE is None:
_STATE = State()
return _STATE
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_STATE = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 8,792,376,044,795,884,000 | 31.995798 | 96 | 0.513434 | false |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_dircache.py | 1 | 2489 | """
Test cases for the dircache module
Nick Mathewson
"""
import unittest
from test.test_support import run_unittest, TESTFN, import_module
dircache = import_module('dircache', deprecated=True)
import os, time, sys, tempfile
class DircacheTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
for fname in os.listdir(self.tempdir):
self.delTemp(fname)
os.rmdir(self.tempdir)
def writeTemp(self, fname):
f = open(os.path.join(self.tempdir, fname), 'w')
f.close()
def mkdirTemp(self, fname):
os.mkdir(os.path.join(self.tempdir, fname))
def delTemp(self, fname):
fname = os.path.join(self.tempdir, fname)
if os.path.isdir(fname):
os.rmdir(fname)
else:
os.unlink(fname)
def test_listdir(self):
## SUCCESSFUL CASES
entries = dircache.listdir(self.tempdir)
self.assertEquals(entries, [])
# Check that cache is actually caching, not just passing through.
self.assert_(dircache.listdir(self.tempdir) is entries)
# Directories aren't "files" on Windows, and directory mtime has
# nothing to do with when files under a directory get created.
# That is, this test can't possibly work under Windows -- dircache
# is only good for capturing a one-shot snapshot there.
if sys.platform[:3] not in ('win', 'os2'):
# Sadly, dircache has the same granularity as stat.mtime, and so
# can't notice any changes that occurred within 1 sec of the last
# time it examined a directory.
time.sleep(1)
self.writeTemp("test1")
entries = dircache.listdir(self.tempdir)
self.assertEquals(entries, ['test1'])
self.assert_(dircache.listdir(self.tempdir) is entries)
## UNSUCCESSFUL CASES
self.assertRaises(OSError, dircache.listdir, self.tempdir+"_nonexistent")
def test_annotate(self):
self.writeTemp("test2")
self.mkdirTemp("A")
lst = ['A', 'test2', 'test_nonexistent']
dircache.annotate(self.tempdir, lst)
self.assertEquals(lst, ['A/', 'test2', 'test_nonexistent'])
def test_main():
try:
run_unittest(DircacheTests)
finally:
dircache.reset()
if __name__ == "__main__":
test_main()
| mit | 7,273,786,802,098,980,000 | 30.324675 | 81 | 0.601045 | false |
zhenjiawang157/BART_Py2 | BART/AUCcalc.py | 1 | 6820 | # Time-stamp: <2017-08-10>
'''Module for calculating ROC-AUC values for all TF datasets
Copyright (c) 2017, 2018 Chongzhi Zang, Zhenjia Wang <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License.
@status: release candidate
@version: $Id$
@author: Chongzhi Zang, Zhenjia Wang
@contact: [email protected]
'''
from __future__ import division
import os,sys,os.path
import argparse,time
import configparser
import re
import multiprocessing
from BART.StatTest import stat_test
from BART.OptValidator import opt_validate,conf_validate
from BART.ReadCount import read_count_on_DHS
import bz2
def get_file(filedir,match):
mark = re.compile(match)
names = os.listdir(filedir)
files = []
for name in names:
if mark.search(name):
files.append('_'.join(name.split('_')[:-2]))
files = sorted(files)
return files
def get_position_list(margefile):
'''
Get the ID list of DHS, according to the decreasingly sorted scores in MARGE
'''
fin = open(margefile,'rb')
line = fin.readline()
score = {}
while line:
line = line.strip().split()
try:
score[line[-2]]=float(line[-1])
except:
pass
line = fin.readline()
fin.close()
return sorted(score.keys(),key=score.get,reverse=True)
def get_match_list(tf, tfdir,positions):
'''
Return the binding info on DHS
'''
## .txt format
# fname = tf+'_DHS_01.txt'
# tf = open(os.path.join(tfdir,fname), 'rb')
# lines = tf.raw.read()
## .bz2 format
fname = tf+'_DHS_01.txt.bz2'
tf = bz2.BZ2File(os.path.join(tfdir,fname),'r')
lines = tf.read()
match = [ord(lines[2*position-2])-ord('0') for position in positions]
tf.close()
return match
def partion(match):
sub_t = 0
list_t = []
list_f = []
total = len(match)
groupsize=10000
groups = int(total/groupsize)
for i in range(groups):
sub_t = sum(match[i*groupsize:(i+1)*groupsize])
sub_f = groupsize - sub_t
list_t.append(sub_t)
list_f.append(sub_f)
sub_t = sum(match[groups*groupsize:])
sub_f = total -groups*groupsize-sub_t
list_t.append(sub_t)
list_f.append(sub_f)
return total, list_t, list_f
def roc_auc(total, list_t, list_f):
list_x = [0.0]
list_y = [0.0]
assert len(list_t)==len(list_f)
for i in range(len(list_t)):
list_x.append(list_f[i]+list_x[i])
list_y.append(list_t[i]+list_y[i])
total_t = list_y[-1]
list_x = [i/(total - total_t) for i in list_x]
list_y = [i/total_t for i in list_y]
auc = 0.0
for i in range(1,len(list_x)):
width = list_x[i]-list_x[i-1]
height = (list_y[i]+list_y[i-1])/2
auc += height*width
return list_x, list_y, auc
def cal_auc_for_tf(tf_p):
tf,tfdir,positions = tf_p
# time1 = time.time()
match = get_match_list(tf,tfdir,positions)
(t, lt, lf) = partion(match)
(list_x, list_y, auc) = roc_auc(t, lt,lf)
# time2 = time.time()
# print(time2-time1)
return tf,auc
def run(options):
args = opt_validate(options)
tfs = get_file(args.tfdir,'DHS_01')
if len(tfs) == 0:
sys.stderr.write('Please specify correct directory of TF binding profiles!')
sys.exit(1)
try:
os.makedirs(args.outdir)
except:
sys.exit('Output directory: {} already exist, please select another directory.'.format(args.outdir))
# This part is for the auc.txt input
#if args.auc:
# AUCs = {}
# with open(args.infile,'r') as auc_infile:
# for auc_line in auc_infile.readlines():
# auc_info = auc_line.strip().split()
# AUCs[auc_info[0]] = float(auc_info[-1])
# #print(AUCs)
# stat_test(AUCs,args)
# exit(0)
# print(args,'\n')
if args.subcommand_name == 'geneset':
print('Prediction starts...\n\nRank all DHS...\n')
margefile = args.infile
positions = get_position_list(args.infile)
positions = [int(i) for i in positions]
#print(type(positions[1]));exit(0)
elif args.subcommand_name == 'profile':
print('Start mapping the {} file...\n'.format(args.format.upper()))
counting = read_count_on_DHS(args)
positions = sorted(counting.keys(),key=counting.get,reverse=True)
positions = [int(i) for i in positions]
#print([[i,counting[i]] for i in positions[:30]])
print('Prediction starts...\n\nRank all DHS...\n')
if len(positions) == 0:
sys.stderr.write('Input file might not with right format!\n')
sys.exit(1)
# output file of AUC-ROC values for all TFs
aucfile = args.outdir+os.sep+args.ofilename+'_auc.txt'
sys.stdout.write("Calculating ROC-AUC values for all transcription factors:\n\n")
print(args)
tf_ps = [(tf,args.tfdir,positions) for tf in tfs]
print(len(tf_ps),'#TF datasets')###########
AUCs = {}
# always multiprocessing
if args.processes:
print('--Number of CUPs in use: {}\n'.format(args.processes))
pool = multiprocessing.Pool(processes=args.processes)
tf_aucs = pool.map_async(cal_auc_for_tf,tf_ps,chunksize=1)
total=len(tf_ps)
#print(total)
#import tqdm ##pbar
#pbar = tqdm.tqdm(total=total) ##pbar
#last=total ##pbar
while not tf_aucs.ready(): # print percentage of work has been done
remaining=tf_aucs._number_left
#pbar.update(last-remaining) ##pbar
#last=remaining ##pbar
sys.stdout.write('\n Processing...{:.1f}% finished'.format(100*(total-remaining)/total)) ##print
i=0
while not tf_aucs.ready() and i<24:
sys.stdout.write('.')
sys.stdout.flush()
#print(".",end='',flush=True) for py3
i+=1
time.sleep(5)
#pbar.update(remaining) ##pbar
#pbar.close() ##pbar
print('\n Processing...100.0% finished.') ##print
pool.close()
pool.join()
# save the AUCs
for tfauc in tf_aucs.get():
AUCs[tfauc[0]]=tfauc[1]
#print(AUCs)
else:
for tf_p in tf_ps:
AUCs[tf_p[0]]=cal_auc_for_tf(tf_p)[1]
with open(aucfile, 'w') as aucf:
for tf_key in sorted(AUCs.keys(),key=AUCs.get,reverse=True):
aucf.write('{}\tAUC = {:.3f}\n'.format(tf_key,AUCs[tf_key]))
print('\n--ROC-AUC calculation finished!\n--Results saved in file: {}\n'.format(aucfile))
stat_test(AUCs,args)
| bsd-2-clause | -6,984,467,226,184,872,000 | 29.58296 | 109 | 0.57478 | false |
timbooo/traktforalfred | trakt/mapper/summary.py | 1 | 3161 | from trakt.mapper.core.base import Mapper
class SummaryMapper(Mapper):
@classmethod
def movies(cls, client, items, **kwargs):
if not items:
return None
return [cls.movie(client, item, **kwargs) for item in items]
@classmethod
def movie(cls, client, item, **kwargs):
if not item:
return None
if 'movie' in item:
i_movie = item['movie']
else:
i_movie = item
# Retrieve item keys
pk, keys = cls.get_ids('movie', i_movie)
if pk is None:
return None
# Create object
movie = cls.construct(client, 'movie', i_movie, keys, **kwargs)
# Update with root info
if 'movie' in item:
movie._update(item)
return movie
@classmethod
def shows(cls, client, items, **kwargs):
if not items:
return None
return [cls.show(client, item, **kwargs) for item in items]
@classmethod
def show(cls, client, item, **kwargs):
if not item:
return None
if 'show' in item:
i_show = item['show']
else:
i_show = item
# Retrieve item keys
pk, keys = cls.get_ids('show', i_show)
if pk is None:
return None
# Create object
show = cls.construct(client, 'show', i_show, keys, **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
return show
@classmethod
def seasons(cls, client, items, **kwargs):
if not items:
return None
return [cls.season(client, item, **kwargs) for item in items]
@classmethod
def season(cls, client, item, **kwargs):
if not item:
return None
if 'season' in item:
i_season = item['season']
else:
i_season = item
# Retrieve item keys
pk, keys = cls.get_ids('season', i_season)
if pk is None:
return None
# Create object
season = cls.construct(client, 'season', i_season, keys, **kwargs)
# Update with root info
if 'season' in item:
season._update(item)
return season
@classmethod
def episodes(cls, client, items, **kwargs):
if not items:
return None
return [cls.episode(client, item, **kwargs) for item in items]
@classmethod
def episode(cls, client, item, **kwargs):
if not item:
return None
if 'episode' in item:
i_episode = item['episode']
else:
i_episode = item
# Retrieve item keys
pk, keys = cls.get_ids('episode', i_episode)
if pk is None:
return None
# Create object
episode = cls.construct(client, 'episode', i_episode, keys, **kwargs)
# Update with root info
if 'episode' in item:
episode._update(item)
return episode
| mit | 3,495,584,519,128,426,500 | 22.129771 | 77 | 0.506485 | false |
mmetince/akgulyzer | akgulyzer.py | 1 | 3279 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mehmet Dursun Ince
from argparse import ArgumentParser
from random import choice, randint
import locale
locale.setlocale(locale.LC_ALL, "tr_TR")
class Akgulyzer(object):
def __init__(self, args):
"""
Kurucu Akgulyzer
:param args:
:return:
"""
if args.encode:
self.encode(args.encode)
elif args.random:
self.random(args.random)
def random(self, level):
"""
Parametre olarak verilen degere gore, en az bir kisi tarafindan gorulmus olan
Akgul metnini rastgele secer ve ekrana basar.
:param level:
:return:
"""
akgul_db = {
'low': [
"Seneye bekleriz. LKD uyelei lutfen dvet beklemeyin parcasıdı. LKD akadeiik Bilşimin organik bir parcasıdır. Mustafa Akgul",
"Aslında tum seminerler icin bilmeyene hitap edecek , yeterli detayda, seminmer ozeti isterim. Baslik ve konusmaci ve epostasi belli olduktan sonra bir 'stub acip, opemnconf uzeriden girilmesini rica ediyorum.",
""
],
'medium': [
"Bilgisayar Mugendislii/Bilim egitini, Yürkieyenin yazılım startejisi ve belki Ümiveristelrde özgür yzılım kullanımı konularınd apanel olacak LKD den konusmaci istiyoruz.",
"Okudugunu anlamayanlar ülkesi: katılamayacaklar oln mail atsin diyoruz. su ikiis yanlis gondermis - cikattmıyoru",
"bu ucune sşizn kurs iicn ben kabul mektubu uretip dizne koyacagimsiz github'a eklediniz dimi?"
],
'hardcore': ["Erdem Bayer'e Akgül hocadan gelen mesajı okuyanlar anlar.."]
}
print choice(akgul_db[level])
def encode(self, text):
"""
Temel olarak whitespace'e gore parse edip kelimeleri, uzunluguna gore
random rotasyona tabi tutar.
:param text:
:return:
"""
words = []
char_from_previous = None
for word in text.split():
if randint(0, 10) < 2 and len(word) > 3:
# %20 ihtimalle karakterleri hatali sirala
word = self.__swap(word, randint(1, len(word)-2))
if char_from_previous:
# Eger bir onceki kelimenin son harfi kaldiysa en basa ekle
word = char_from_previous + word
char_from_previous = None
elif randint(0, 10) < 2:
char_from_previous = word[-1]
word = word[:-1]
words.append(word)
print " ".join(words)
def __swap(self, strg, n):
"""
Verilen parametreye gore karakter degisimi.
:param strg:
:param n:
:return:
"""
return strg[:n] + strg[n+1] + strg[n] + strg[n+2:]
if __name__ == "__main__":
parser = ArgumentParser(description="Mustafa Akgül'un askerleriyiz..!")
parser.add_argument("-e", "--encode", help="Verilen metni Akgüller.")
parser.add_argument("-r", "--random", choices=['low', 'medium', 'hardcore'], default="low",
help="Bilinen Akgül metinlerinden birini rastgele seçer.")
args = parser.parse_args()
main = Akgulyzer(args) | gpl-2.0 | 1,414,536,402,941,925,000 | 38.192771 | 231 | 0.583948 | false |
janpipek/pyearcal | pyearcal/image_sources.py | 1 | 1485 | import abc
import os
import fnmatch
import random
from typing import Dict
from collections import OrderedDict
class ImageDirectory(abc.ABC):
def __getitem__(self, index: int) -> str:
return self.images[index]
images: Dict[int, str]
def __iter__(self):
# yield from self.images.values()
for image in self.images.values():
yield image
class SortedImageDirectory(ImageDirectory):
def __init__(self, dirname=".", extension=".jpg"):
self.dirname = dirname
self.extension = extension
self.read_images()
def read_images(self):
self.images = OrderedDict()
for index in range(1, 13):
path = os.path.join(self.dirname, str(index) + self.extension)
if os.path.exists(path):
self.images[index] = path
else:
raise Exception("File does not exist: " + path)
class UnsortedImageDirectory(ImageDirectory):
def __init__(self, dirname=".", pattern="*.jpg"):
self.dirname = dirname
self.pattern = pattern
self.read_images()
def read_images(self):
self.images = OrderedDict()
all_file_names = [
fn for fn in os.listdir(self.dirname) if fnmatch.fnmatch(fn, self.pattern)
]
sampled_file_names = random.sample(all_file_names, 12)
for index, name in enumerate(sampled_file_names):
self.images[index + 1] = os.path.join(self.dirname, name)
| mit | -7,820,184,735,028,925,000 | 28.117647 | 86 | 0.607407 | false |
wdv4758h/rst2html5slides | test/test_output_dir.py | 1 | 4833 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from io import open
from os import makedirs, urandom
from os.path import join, exists
from shutil import rmtree
from tempfile import mkdtemp
from rst2html5slides import SlideWriter
from docutils.core import publish_file, publish_string
presentation = '''.. presentation::
:distribution: grid
.. role:: small
.. class:: capa
Presentation Title
==================
Agenda
======
* Topic 1
* Topic 2
* Topic 3
|logotipo|
.. class:: chapter
Chapter 1
=========
Schema
======
|tdd cycle|
----
|python logo|
.. include:: junit.rst
.. title:: Testes Automatizados de Software
.. meta::
:generator: rst2html5slides https://bitbucket.org/andre_felipe_dias/rst2html5slides
:author: André Felipe Dias
.. |logotipo| image:: imagens/logotipo.png
.. |tdd cycle| image:: imagens/tdd_cycle.png
.. |python logo| image:: https://www.python.org/static/community_logos/python-logo-master-v3-TM.png
'''
junit = '''JUnit
=====
JUnit is a testing framework'''
css = 'div {background-color: red}'
source_dir = mkdtemp()
source_path = join(source_dir, 'presentation.rst')
def setup():
makedirs(join(source_dir, 'imagens'))
makedirs(join(source_dir, 'css'))
with open(source_path, 'w', encoding='utf-8') as f:
f.write(presentation)
with open(join(source_dir, 'junit.rst'), 'w', encoding='utf-8') as f:
f.write(junit)
with open(join(source_dir, 'css', 'style.css'), 'w', encoding='utf-8') as f:
f.write(css)
with open(join(source_dir, 'imagens', 'tdd_cycle.png'), 'wb') as f:
f.write(urandom(2 ** 16))
with open(join(source_dir, 'imagens', 'not_used.png'), 'wb') as f:
f.write(urandom(2 ** 11))
with open(join(source_dir, 'imagens', 'logotipo.png'), 'wb') as f:
f.write(urandom(2 ** 15))
def teardown():
rmtree(source_dir)
def test_destination_dir():
dest_dir = mkdtemp()
output = publish_file(
writer=SlideWriter(), source_path=source_path,
destination_path=dest_dir,
settings_overrides={'stylesheet': [join('css', 'style.css')], 'presentation': 'jmpress.js'}
)
assert exists(join(dest_dir, 'presentation.html'))
assert exists(join(dest_dir, 'css', 'style.css'))
assert exists(join(dest_dir, 'imagens', 'tdd_cycle.png'))
assert exists(join(dest_dir, 'imagens', 'logotipo.png'))
assert exists(join(dest_dir, 'css', 'slides.css'))
assert exists(join(dest_dir, 'js'))
assert not exists(join(dest_dir, 'imagens', 'not_used.png'))
assert str('<link href="css/slides.css"') in output
assert str('<script src="js/jquery.min.js">') in output
assert str('<link href="css/style.css"') in output
assert str('src="https://www.python.org') in output
rmtree(dest_dir)
def test_destination_path():
dest_dir = mkdtemp()
output = publish_file(
writer=SlideWriter(), source_path=source_path,
destination_path=join(dest_dir, 'slides.html'),
settings_overrides={'stylesheet': [join('css', 'style.css')], 'presentation': 'jmpress.js'}
)
assert exists(join(dest_dir, 'slides.html'))
assert not exists(join(dest_dir, 'presentation.html'))
assert exists(join(dest_dir, 'css', 'style.css'))
assert exists(join(dest_dir, 'imagens', 'tdd_cycle.png'))
assert exists(join(dest_dir, 'imagens', 'logotipo.png'))
assert not exists(join(dest_dir, 'imagens', 'not_used.png'))
assert exists(join(dest_dir, 'css', 'slides.css'))
assert exists(join(dest_dir, 'js'))
assert str('<link href="css/slides.css"') in output
assert str('<script src="js/jquery.min.js">') in output
assert str('<link href="css/style.css"') in output
assert str('src="https://www.python.org') in output
rmtree(dest_dir)
def test_no_destination():
dest_dir = mkdtemp()
os.chdir(dest_dir)
output = publish_string(
writer=SlideWriter(), source=presentation, source_path=source_path,
settings_overrides={'stylesheet': [join('css', 'style.css')],
'output_encoding': 'unicode',
'presentation': 'jmpress.js'}
)
assert not exists(join(dest_dir, 'presentation.html'))
assert not exists(join(dest_dir, 'css', 'style.css'))
assert not exists(join(dest_dir, 'imagens', 'tdd_cycle.png'))
assert not exists(join(dest_dir, 'imagens', 'logotipo.png'))
assert not exists(join(dest_dir, 'imagens', 'not_used.png'))
assert not exists(join(dest_dir, 'css', 'slides.css'))
assert not exists(join(dest_dir, 'js'))
assert str('<link href="css/slides.css"') in output
assert str('<script src="js/jquery.min.js">') in output
assert '<link href="css/style.css"' in output
assert 'src="https://www.python.org' in output
rmtree(dest_dir)
| mit | 6,010,415,876,726,808,000 | 29.974359 | 99 | 0.641349 | false |
RexFuzzle/sfepy | sfepy/discrete/iga/extmods/setup.py | 1 | 1281 | #!/usr/bin/env python
def configuration(parent_package='', top_path=None):
import os.path as op
from numpy.distutils.misc_util import Configuration
from sfepy import Config
site_config = Config()
os_flag = {'posix' : 0, 'windows' : 1}
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
defines = [('__SDIR__', "'\"%s\"'" % auto_dir),
('SFEPY_PLATFORM', os_flag[site_config.system()])]
if '-DDEBUG_FMF' in site_config.debug_flags():
defines.append(('DEBUG_FMF', None))
common_path = '../../common/extmods'
fem_src = ['fmfield.c', 'geommech.c', 'common_python.c']
fem_src = [op.join(common_path, ii) for ii in fem_src]
src = ['igac.pyx', 'nurbs.c']
config.add_extension('igac',
sources=src + fem_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, common_path],
define_macros=defines)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause | 5,189,118,304,611,245,000 | 32.710526 | 72 | 0.568306 | false |
c0710204/mirrorsBistu | pypi/bandersnatch/lib/python2.7/site-packages/pyrepl/reader.py | 1 | 20539 | # Copyright 2000-2010 Michael Hudson-Doyle <[email protected]>
# Antonio Cuni
# Armin Rigo
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
import unicodedata
from pyrepl import commands
from pyrepl import input
try:
unicode
except NameError:
unicode = str
unichr = chr
basestring = bytes, str
def _make_unctrl_map():
uc_map = {}
for c in map(unichr, range(256)):
if unicodedata.category(c)[0] != 'C':
uc_map[c] = c
for i in range(32):
c = unichr(i)
uc_map[c] = '^' + unichr(ord('A') + i - 1)
uc_map[b'\t'] = ' ' # display TABs as 4 characters
uc_map[b'\177'] = unicode('^?')
for i in range(256):
c = unichr(i)
if c not in uc_map:
uc_map[c] = unicode('\\%03o') % i
return uc_map
def _my_unctrl(c, u=_make_unctrl_map()):
if c in u:
return u[c]
else:
if unicodedata.category(c).startswith('C'):
return b'\u%04x' % ord(c)
else:
return c
def disp_str(buffer, join=''.join, uc=_my_unctrl):
""" disp_str(buffer:string) -> (string, [int])
Return the string that should be the printed represenation of
|buffer| and a list detailing where the characters of |buffer|
get used up. E.g.:
>>> disp_str(chr(3))
('^C', [1, 0])
the list always contains 0s or 1s at present; it could conceivably
go higher as and when unicode support happens."""
# disp_str proved to be a bottleneck for large inputs,
# so it needs to be rewritten in C; it's not required though.
s = [uc(x) for x in buffer]
b = [] # XXX: bytearray
for x in s:
b.append(1)
b.extend([0] * (len(x) - 1))
return join(s), b
del _my_unctrl
del _make_unctrl_map
# syntax classes:
[SYNTAX_WHITESPACE,
SYNTAX_WORD,
SYNTAX_SYMBOL] = range(3)
def make_default_syntax_table():
# XXX perhaps should use some unicodedata here?
st = {}
for c in map(unichr, range(256)):
st[c] = SYNTAX_SYMBOL
for c in [a for a in map(unichr, range(256)) if a.isalpha()]:
st[c] = SYNTAX_WORD
st[unicode('\n')] = st[unicode(' ')] = SYNTAX_WHITESPACE
return st
default_keymap = tuple(
[(r'\C-a', 'beginning-of-line'),
(r'\C-b', 'left'),
(r'\C-c', 'interrupt'),
(r'\C-d', 'delete'),
(r'\C-e', 'end-of-line'),
(r'\C-f', 'right'),
(r'\C-g', 'cancel'),
(r'\C-h', 'backspace'),
(r'\C-j', 'accept'),
(r'\<return>', 'accept'),
(r'\C-k', 'kill-line'),
(r'\C-l', 'clear-screen'),
(r'\C-m', 'accept'),
(r'\C-q', 'quoted-insert'),
(r'\C-t', 'transpose-characters'),
(r'\C-u', 'unix-line-discard'),
(r'\C-v', 'quoted-insert'),
(r'\C-w', 'unix-word-rubout'),
(r'\C-x\C-u', 'upcase-region'),
(r'\C-y', 'yank'),
(r'\C-z', 'suspend'),
(r'\M-b', 'backward-word'),
(r'\M-c', 'capitalize-word'),
(r'\M-d', 'kill-word'),
(r'\M-f', 'forward-word'),
(r'\M-l', 'downcase-word'),
(r'\M-t', 'transpose-words'),
(r'\M-u', 'upcase-word'),
(r'\M-y', 'yank-pop'),
(r'\M--', 'digit-arg'),
(r'\M-0', 'digit-arg'),
(r'\M-1', 'digit-arg'),
(r'\M-2', 'digit-arg'),
(r'\M-3', 'digit-arg'),
(r'\M-4', 'digit-arg'),
(r'\M-5', 'digit-arg'),
(r'\M-6', 'digit-arg'),
(r'\M-7', 'digit-arg'),
(r'\M-8', 'digit-arg'),
(r'\M-9', 'digit-arg'),
#(r'\M-\n', 'insert-nl'),
('\\\\', 'self-insert')] + \
[(c, 'self-insert')
for c in map(chr, range(32, 127)) if c != '\\'] + \
[(c, 'self-insert')
for c in map(chr, range(128, 256)) if c.isalpha()] + \
[(r'\<up>', 'up'),
(r'\<down>', 'down'),
(r'\<left>', 'left'),
(r'\<right>', 'right'),
(r'\<insert>', 'quoted-insert'),
(r'\<delete>', 'delete'),
(r'\<backspace>', 'backspace'),
(r'\M-\<backspace>', 'backward-kill-word'),
(r'\<end>', 'end-of-line'), # was 'end'
(r'\<home>', 'beginning-of-line'), # was 'home'
(r'\<f1>', 'help'),
(r'\EOF', 'end'), # the entries in the terminfo database for xterms
(r'\EOH', 'home'), # seem to be wrong. this is a less than ideal
# workaround
])
if 'c' in globals(): # only on python 2.x
del c # from the listcomps
class Reader(object):
"""The Reader class implements the bare bones of a command reader,
handling such details as editing and cursor motion. What it does
not support are such things as completion or history support -
these are implemented elsewhere.
Instance variables of note include:
* buffer:
A *list* (*not* a string at the moment :-) containing all the
characters that have been entered.
* console:
Hopefully encapsulates the OS dependent stuff.
* pos:
A 0-based index into `buffer' for where the insertion point
is.
* screeninfo:
Ahem. This list contains some info needed to move the
insertion point around reasonably efficiently. I'd like to
get rid of it, because its contents are obtuse (to put it
mildly) but I haven't worked out if that is possible yet.
* cxy, lxy:
the position of the insertion point in screen ... XXX
* syntax_table:
Dictionary mapping characters to `syntax class'; read the
emacs docs to see what this means :-)
* commands:
Dictionary mapping command names to command classes.
* arg:
The emacs-style prefix argument. It will be None if no such
argument has been provided.
* dirty:
True if we need to refresh the display.
* kill_ring:
The emacs-style kill-ring; manipulated with yank & yank-pop
* ps1, ps2, ps3, ps4:
prompts. ps1 is the prompt for a one-line input; for a
multiline input it looks like:
ps2> first line of input goes here
ps3> second and further
ps3> lines get ps3
...
ps4> and the last one gets ps4
As with the usual top-level, you can set these to instances if
you like; str() will be called on them (once) at the beginning
of each command. Don't put really long or newline containing
strings here, please!
This is just the default policy; you can change it freely by
overriding get_prompt() (and indeed some standard subclasses
do).
* finished:
handle1 will set this to a true value if a command signals
that we're done.
"""
help_text = """\
This is pyrepl. Hear my roar.
Helpful text may appear here at some point in the future when I'm
feeling more loquacious than I am now."""
msg_at_bottom = True
def __init__(self, console):
self.buffer = []
self.ps1 = "->> "
self.ps2 = "/>> "
self.ps3 = "|.. "
self.ps4 = "\__ "
self.kill_ring = []
self.arg = None
self.finished = 0
self.console = console
self.commands = {}
self.msg = ''
for v in vars(commands).values():
if (isinstance(v, type)
and issubclass(v, commands.Command)
and v.__name__[0].islower()):
self.commands[v.__name__] = v
self.commands[v.__name__.replace('_', '-')] = v
self.syntax_table = make_default_syntax_table()
self.input_trans_stack = []
self.keymap = self.collect_keymap()
self.input_trans = input.KeymapTranslator(
self.keymap,
invalid_cls='invalid-key',
character_cls='self-insert')
def collect_keymap(self):
return default_keymap
def calc_screen(self):
"""The purpose of this method is to translate changes in
self.buffer into changes in self.screen. Currently it rips
everything down and starts from scratch, which whilst not
especially efficient is certainly simple(r).
"""
lines = self.get_unicode().split("\n")
screen = []
screeninfo = []
w = self.console.width - 1
p = self.pos
for ln, line in zip(range(len(lines)), lines):
ll = len(line)
if 0 <= p <= ll:
if self.msg and not self.msg_at_bottom:
for mline in self.msg.split("\n"):
screen.append(mline)
screeninfo.append((0, []))
self.lxy = p, ln
prompt = self.get_prompt(ln, ll >= p >= 0)
while '\n' in prompt:
pre_prompt, _, prompt = prompt.partition('\n')
screen.append(pre_prompt)
screeninfo.append((0, []))
p -= ll + 1
prompt, lp = self.process_prompt(prompt)
l, l2 = disp_str(line)
wrapcount = (len(l) + lp) // w
if wrapcount == 0:
screen.append(prompt + l)
screeninfo.append((lp, l2 + [1]))
else:
screen.append(prompt + l[:w - lp] + "\\")
screeninfo.append((lp, l2[:w - lp]))
for i in range(-lp + w, -lp + wrapcount * w, w):
screen.append(l[i:i + w] + "\\")
screeninfo.append((0, l2[i:i + w]))
screen.append(l[wrapcount * w - lp:])
screeninfo.append((0, l2[wrapcount * w - lp:] + [1]))
self.screeninfo = screeninfo
self.cxy = self.pos2xy(self.pos)
if self.msg and self.msg_at_bottom:
for mline in self.msg.split("\n"):
screen.append(mline)
screeninfo.append((0, []))
return screen
def process_prompt(self, prompt):
""" Process the prompt.
This means calculate the length of the prompt. The character \x01
and \x02 are used to bracket ANSI control sequences and need to be
excluded from the length calculation. So also a copy of the prompt
is returned with these control characters removed. """
out_prompt = ''
l = len(prompt)
pos = 0
while True:
s = prompt.find('\x01', pos)
if s == -1:
break
e = prompt.find('\x02', s)
if e == -1:
break
# Found start and end brackets, subtract from string length
l = l - (e - s + 1)
out_prompt += prompt[pos:s] + prompt[s + 1:e]
pos = e + 1
out_prompt += prompt[pos:]
return out_prompt, l
def bow(self, p=None):
"""Return the 0-based index of the word break preceding p most
immediately.
p defaults to self.pos; word boundaries are determined using
self.syntax_table."""
if p is None:
p = self.pos
st = self.syntax_table
b = self.buffer
p -= 1
while p >= 0 and st.get(b[p], SYNTAX_WORD) != SYNTAX_WORD:
p -= 1
while p >= 0 and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD:
p -= 1
return p + 1
def eow(self, p=None):
"""Return the 0-based index of the word break following p most
immediately.
p defaults to self.pos; word boundaries are determined using
self.syntax_table."""
if p is None:
p = self.pos
st = self.syntax_table
b = self.buffer
while p < len(b) and st.get(b[p], SYNTAX_WORD) != SYNTAX_WORD:
p += 1
while p < len(b) and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD:
p += 1
return p
def bol(self, p=None):
"""Return the 0-based index of the line break preceding p most
immediately.
p defaults to self.pos."""
# XXX there are problems here.
if p is None:
p = self.pos
b = self.buffer
p -= 1
while p >= 0 and b[p] != '\n':
p -= 1
return p + 1
def eol(self, p=None):
"""Return the 0-based index of the line break following p most
immediately.
p defaults to self.pos."""
if p is None:
p = self.pos
b = self.buffer
while p < len(b) and b[p] != '\n':
p += 1
return p
def get_arg(self, default=1):
"""Return any prefix argument that the user has supplied,
returning `default' if there is None. `default' defaults
(groan) to 1."""
if self.arg is None:
return default
else:
return self.arg
def get_prompt(self, lineno, cursor_on_line):
"""Return what should be in the left-hand margin for line
`lineno'."""
if self.arg is not None and cursor_on_line:
return "(arg: %s) " % self.arg
if "\n" in self.buffer:
if lineno == 0:
res = self.ps2
elif lineno == self.buffer.count("\n"):
res = self.ps4
else:
res = self.ps3
else:
res = self.ps1
# Lazily call str() on self.psN, and cache the results using as key
# the object on which str() was called. This ensures that even if the
# same object is used e.g. for ps1 and ps2, str() is called only once.
if res not in self._pscache:
self._pscache[res] = str(res)
return self._pscache[res]
def push_input_trans(self, itrans):
self.input_trans_stack.append(self.input_trans)
self.input_trans = itrans
def pop_input_trans(self):
self.input_trans = self.input_trans_stack.pop()
def pos2xy(self, pos):
"""Return the x, y coordinates of position 'pos'."""
# this *is* incomprehensible, yes.
y = 0
assert 0 <= pos <= len(self.buffer)
if pos == len(self.buffer):
y = len(self.screeninfo) - 1
p, l2 = self.screeninfo[y]
return p + len(l2) - 1, y
else:
for p, l2 in self.screeninfo:
l = l2.count(1)
if l > pos:
break
else:
pos -= l
y += 1
c = 0
i = 0
while c < pos:
c += l2[i]
i += 1
while l2[i] == 0:
i += 1
return p + i, y
def insert(self, text):
"""Insert 'text' at the insertion point."""
self.buffer[self.pos:self.pos] = list(text)
self.pos += len(text)
self.dirty = 1
def update_cursor(self):
"""Move the cursor to reflect changes in self.pos"""
self.cxy = self.pos2xy(self.pos)
self.console.move_cursor(*self.cxy)
def after_command(self, cmd):
"""This function is called to allow post command cleanup."""
if getattr(cmd, "kills_digit_arg", 1):
if self.arg is not None:
self.dirty = 1
self.arg = None
def prepare(self):
"""Get ready to run. Call restore when finished. You must not
write to the console in between the calls to prepare and
restore."""
try:
self.console.prepare()
self.arg = None
self.screeninfo = []
self.finished = 0
del self.buffer[:]
self.pos = 0
self.dirty = 1
self.last_command = None
self._pscache = {}
except:
self.restore()
raise
def last_command_is(self, klass):
if not self.last_command:
return 0
return issubclass(klass, self.last_command)
def restore(self):
"""Clean up after a run."""
self.console.restore()
def finish(self):
"""Called when a command signals that we're finished."""
pass
def error(self, msg="none"):
self.msg = "! " + msg + " "
self.dirty = 1
self.console.beep()
def update_screen(self):
if self.dirty:
self.refresh()
def refresh(self):
"""Recalculate and refresh the screen."""
# this call sets up self.cxy, so call it first.
screen = self.calc_screen()
self.console.refresh(screen, self.cxy)
self.dirty = 0 # forgot this for a while (blush)
def do_cmd(self, cmd):
#print cmd
if isinstance(cmd[0], basestring):
#XXX: unify to text
cmd = self.commands.get(cmd[0],
commands.invalid_command)(self, *cmd)
elif isinstance(cmd[0], type):
cmd = cmd[0](self, cmd)
else:
return # nothing to do
cmd.do()
self.after_command(cmd)
if self.dirty:
self.refresh()
else:
self.update_cursor()
if not isinstance(cmd, commands.digit_arg):
self.last_command = cmd.__class__
self.finished = cmd.finish
if self.finished:
self.console.finish()
self.finish()
def handle1(self, block=1):
"""Handle a single event. Wait as long as it takes if block
is true (the default), otherwise return None if no event is
pending."""
if self.msg:
self.msg = ''
self.dirty = 1
while 1:
event = self.console.get_event(block)
if not event: # can only happen if we're not blocking
return None
translate = True
if event.evt == 'key':
self.input_trans.push(event)
elif event.evt == 'scroll':
self.refresh()
elif event.evt == 'resize':
self.refresh()
else:
translate = False
if translate:
cmd = self.input_trans.get()
else:
cmd = event.evt, event.data
if cmd is None:
if block:
continue
else:
return None
self.do_cmd(cmd)
return 1
def push_char(self, char):
self.console.push_char(char)
self.handle1(0)
def readline(self, returns_unicode=False, startup_hook=None):
"""Read a line. The implementation of this method also shows
how to drive Reader if you want more control over the event
loop."""
self.prepare()
try:
if startup_hook is not None:
startup_hook()
self.refresh()
while not self.finished:
self.handle1()
if returns_unicode:
return self.get_unicode()
return self.get_buffer()
finally:
self.restore()
def bind(self, spec, command):
self.keymap = self.keymap + ((spec, command),)
self.input_trans = input.KeymapTranslator(
self.keymap,
invalid_cls='invalid-key',
character_cls='self-insert')
def get_buffer(self, encoding=None):
if encoding is None:
encoding = self.console.encoding
return unicode('').join(self.buffer).encode(self.console.encoding)
def get_unicode(self):
"""Return the current buffer as a unicode string."""
return unicode('').join(self.buffer)
def test():
from pyrepl.unix_console import UnixConsole
reader = Reader(UnixConsole())
reader.ps1 = "**> "
reader.ps2 = "/*> "
reader.ps3 = "|*> "
reader.ps4 = "\*> "
while reader.readline():
pass
if __name__ == '__main__':
test()
| mit | 4,925,212,020,770,701,000 | 31.19279 | 78 | 0.528263 | false |
Yam-cn/potato | testcases/technical_roc_test.py | 1 | 3010 | # PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import common
from engine.technical import roc
from engine import dataseries
class ROCTestCase(common.TestCase):
def __buildROC(self, values, period, rocMaxLen=None):
seqDS = dataseries.SequenceDataSeries()
ret = roc.RateOfChange(seqDS, period, rocMaxLen)
for value in values:
seqDS.append(value)
return ret
def testPeriod12(self):
# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:rate_of_change
inputValues = [11045.27, 11167.32, 11008.61, 11151.83, 10926.77, 10868.12, 10520.32, 10380.43, 10785.14, 10748.26, 10896.91, 10782.95, 10620.16, 10625.83, 10510.95, 10444.37, 10068.01, 10193.39, 10066.57, 10043.75]
roc_ = self.__buildROC(inputValues, 12)
outputValues = [-3.85, -4.85, -4.52, -6.34, -7.86, -6.21, -4.31, -3.24]
for i in range(len(outputValues)):
outputValue = roc_[12 + i] * 100
self.assertTrue(round(outputValue, 2) == outputValues[i])
self.assertEqual(len(roc_.getDateTimes()), len(inputValues))
for i in range(len(roc_)):
self.assertEqual(roc_.getDateTimes()[i], None)
def testPeriod1(self):
def simple_roc(value1, value2):
return self.__buildROC([value1, value2], 1)[1]
self.assertTrue(simple_roc(1, 2) == 1)
self.assertTrue(simple_roc(1, 2) == simple_roc(50, 100))
self.assertTrue(simple_roc(2, 1) == -0.5)
self.assertTrue(simple_roc(2, 1) == simple_roc(100, 50))
def testBounded(self):
# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:rate_of_change
inputValues = [11045.27, 11167.32, 11008.61, 11151.83, 10926.77, 10868.12, 10520.32, 10380.43, 10785.14, 10748.26, 10896.91, 10782.95, 10620.16, 10625.83, 10510.95, 10444.37, 10068.01, 10193.39, 10066.57, 10043.75]
outputValues = [-4.31, -3.24]
roc_ = self.__buildROC(inputValues, 12, 2)
for i in xrange(2):
self.assertEqual(round(roc_[i], 4), round(outputValues[i] / 100, 4))
def testZeroes(self):
inputValues = [0, 0, 0]
outputValues = [None, 0, 0]
roc_ = self.__buildROC(inputValues, 1)
for i in xrange(len(inputValues)):
self.assertEqual(roc_[i], outputValues[i])
| apache-2.0 | 7,775,907,390,913,269,000 | 42 | 222 | 0.654817 | false |
mmisiewicz/slask | limbo/plugins/pager.py | 1 | 3469 | import re
import urllib2
import json
def pager_response(text):
"""!oncall|!pager (add "link" for url to pager rotation page)"""
match = re.match('!oncall|!pager', text, re.IGNORECASE)
if not match:
return False
if "link" in match.string:
return "https://corpwiki.appnexus.com/x/xxsaAQ"
return maestro_pager_response() or "Not Found"
# r = requests.get(URL, auth=(username, password), verify=False)
# soup = BeautifulSoup(r.text)
# tables = soup.find_all('table', 'confluenceTable')
# table_call = tables[0].find_all('td')
# list_call = [i.text for i in table_call]
# reg = re.compile("(\d+)\D+(\d+)\D+(\d+)\D+(\d+)")
# def time_range(t):
# month = datetime.now().month
# day = datetime.now().day
# return (int(t[0]) < month <=int(t[2]) and int(t[3]) >= day) \
# or (int(t[0]) <= month < int(t[2]) and int(t[1]) <= day) \
# or (int(t[0]) <= month <= int(t[2]) and (int(t[3]) >= day >= int(t[1])))
#
# response = None
# for i in range(0, len(list_call), 3):
# match = reg.match(list_call[i])
# if time_range(match.groups()):
# response = "Primary: {}, Secondary: {}".format(list_call[i+1], list_call[i+2])
# return response or "Not Found"
# maestro pager code borrowed from data-bot.
def __join_oncall_info(user_infos):
""" does the joining across the rseponse from maestro3's usergroup map service
and the timeperiods service, returning a tuple3 of (username, timeperiod_name, hours)
where hours are on call for day_of_week. If hours is null or the user is deleted
an entry is not returned day_of_week is expected to be lower case"""
results = []
for user_info in user_infos:
results.append(user_info['username'])
# if not user_info['deleted']:
# # XXX: ignoring out of bounds for now
# period = periods[user_info['nagios_timeperiod_id']]
# on_call_timerange = period[day_of_week]
# if on_call_timerange:
# results.append((user_info['username'], period['timeperiod_name'], on_call_timerange))
return results
# def __get_timeperiods_dict():
# timeperiods_resp = urllib2.urlopen('http://maestro3-api.adnxs.net/nagios-timeperiod').read()
# periods = {}
# for period in json.loads(timeperiods_resp)['response']['nagios_timeperiods']:
# periods[period['id']] = period
# return periods
def maestro_pager_response():
# periods = __get_timeperiods_dict()
# day_of_week = datetime.now().strftime("%A").lower()
on_pager_resp = urllib2.urlopen('http://maestro3-api.adnxs.net/nagios-usergroup-map?nagios_usergroup_id=20&pager=1').read()
on_pagers = __join_oncall_info(json.loads(on_pager_resp)['response']['nagios_usergroup_maps'])
on_escalation_resp = urllib2.urlopen('http://maestro3-api.adnxs.net/nagios-usergroup-map?nagios_usergroup_id=20&escalation=1').read()
on_escalations = __join_oncall_info(json.loads(on_escalation_resp)['response']['nagios_usergroup_maps'])
on_pager_section = ','.join([' %s' % on_pager for on_pager in on_pagers])
on_escalation_section = ','.join([' %s' % on_escalation for on_escalation in on_escalations])
reply = '```Primary:%s\nSecondary:%s```' % (on_pager_section, on_escalation_section)
return reply
def on_message(msg, server):
text = msg.get("text", "")
return pager_response(text)
| mit | -5,478,028,948,386,106,000 | 42.911392 | 137 | 0.62237 | false |
yephper/django | tests/template_backends/test_dummy.py | 1 | 3700 | # coding: utf-8
from __future__ import unicode_literals
from django.forms import CharField, Form, Media
from django.http import HttpRequest
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.dummy import TemplateStrings
from django.test import SimpleTestCase
class TemplateStringsTests(SimpleTestCase):
engine_class = TemplateStrings
backend_name = 'dummy'
options = {}
@classmethod
def setUpClass(cls):
super(TemplateStringsTests, cls).setUpClass()
params = {
'DIRS': [],
'APP_DIRS': True,
'NAME': cls.backend_name,
'OPTIONS': cls.options,
}
cls.engine = cls.engine_class(params)
def test_from_string(self):
template = self.engine.from_string("Hello!\n")
content = template.render()
self.assertEqual(content, "Hello!\n")
def test_get_template(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'world'})
self.assertEqual(content, "Hello world!\n")
def test_get_template_non_existing(self):
with self.assertRaises(TemplateDoesNotExist) as e:
self.engine.get_template('template_backends/non_existing.html')
self.assertEqual(e.exception.backend, self.engine)
def test_get_template_syntax_error(self):
# There's no way to trigger a syntax error with the dummy backend.
# The test still lives here to factor it between other backends.
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('template_backends/syntax_error.html')
def test_html_escaping(self):
template = self.engine.get_template('template_backends/hello.html')
context = {'name': '<script>alert("XSS!");</script>'}
content = template.render(context)
self.assertIn('<script>', content)
self.assertNotIn('<script>', content)
def test_django_html_escaping(self):
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
class TestForm(Form):
test_field = CharField()
media = Media(js=['my-script.js'])
form = TestForm()
template = self.engine.get_template('template_backends/django_escaping.html')
content = template.render({'media': media, 'test_form': form})
expected = '{}\n\n{}\n\n{}'.format(media, form, form['test_field'])
self.assertHTMLEqual(content, expected)
def test_csrf_token(self):
request = HttpRequest()
CsrfViewMiddleware().process_view(request, lambda r: None, (), {})
template = self.engine.get_template('template_backends/csrf.html')
content = template.render(request=request)
expected = (
'<input type="hidden" name="csrfmiddlewaretoken" '
'value="{}" />'.format(get_token(request)))
self.assertHTMLEqual(content, expected)
def test_no_directory_traversal(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('../forbidden/template_backends/hello.html')
def test_non_ascii_characters(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'Jérôme'})
self.assertEqual(content, "Hello Jérôme!\n")
| bsd-3-clause | 4,978,009,301,455,675,000 | 36.103093 | 85 | 0.633929 | false |
apache/allura | AlluraTest/alluratest/smtp_debug.py | 2 | 1307 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from smtpd import DebuggingServer
class BetterDebuggingServer(DebuggingServer, object):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
print('TO: ' + ', '.join(rcpttos))
super(BetterDebuggingServer, self).process_message(peer, mailfrom, rcpttos, data) | apache-2.0 | 6,195,972,793,576,462,000 | 42.6 | 89 | 0.703137 | false |
emulbreh/lymph | lymph/tests/integration/test_zookeeper_discovery.py | 1 | 1871 | from lymph.core.decorators import rpc
from lymph.core.interfaces import Interface
from lymph.discovery.zookeeper import ZookeeperServiceRegistry
from lymph.events.null import NullEventSystem
from lymph.testing import LymphIntegrationTestCase
class Upper(Interface):
service_type = 'upper'
@rpc()
def upper(self, text=None):
return text.upper()
class ZookeeperIntegrationTest(LymphIntegrationTestCase):
use_zookeeper = True
def setUp(self):
super(ZookeeperIntegrationTest, self).setUp()
self.registry = ZookeeperServiceRegistry(self.hosts)
self.events = NullEventSystem()
self.upper_container, interface = self.create_container(Upper, 'upper')
self.lymph_client = self.create_client()
def tearDown(self):
self.upper_container.stop()
self.lymph_client.container.stop()
self.upper_container.join()
self.lymph_client.container.join()
super(ZookeeperIntegrationTest, self).tearDown()
def test_lookup(self):
service = self.lymph_client.container.lookup('upper')
self.assertEqual(len(service), 1)
self.assertEqual(next(iter(service)).endpoint, self.upper_container.endpoint)
def test_upper(self):
reply = self.lymph_client.request(self.upper_container.endpoint, 'upper.upper', {'text': 'foo'})
self.assertEqual(reply.body, 'FOO')
def test_ping(self):
reply = self.lymph_client.request(self.upper_container.endpoint, 'lymph.ping', {'payload': 42})
self.assertEqual(reply.body, 42)
def test_status(self):
reply = self.lymph_client.request(self.upper_container.endpoint, 'lymph.status', {})
self.assertEqual(reply.body, {
'endpoint': self.upper_container.endpoint,
'identity': self.upper_container.identity,
'config': {},
})
| apache-2.0 | 1,861,986,741,842,466,300 | 34.301887 | 104 | 0.679316 | false |
mathiasertl/django-xmpp-server-list | xmpplist/settings.py | 1 | 8688 | # This file is part of django-xmpp-server-list
# (https://github.com/mathiasertl/django-xmpp-server-list).
#
# django-xmpp-server-list is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xmppllist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-xmpp-server-list. If not, see <http://www.gnu.org/licenses/>.
"""Django settings for django-xmpp-server-list project."""
import os
from datetime import timedelta
from celery.schedules import crontab
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3'
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'xmpplist.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'ATOMIC_REQUESTS': True,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = [
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware'
]
ROOT_URLCONF = 'xmpplist.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# style
'bootstrapform',
'core',
'server',
'account',
'api',
'confirm',
)
if DEBUG:
LOG_LEVEL = 'DEBUG'
else:
LOG_LEVEL = 'ERROR'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGIN_URL = '/user/login/'
LOGIN_REDIRECT_URL = '/user/'
AUTH_USER_MODEL = 'account.LocalUser'
DEFAULT_FROM_EMAIL = '[email protected]'
INTERNAL_IPS = ('127.0.0.1')
USE_HTTPS = False
USE_IP4 = True
USE_IP6 = True
GEOIP_CONFIG_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'geoip'))
CONFIRMATION_TIMEOUT = timedelta(hours=48)
CERTIFICATES_PATH = 'static/certs'
LOGOUT_REDIRECT_URL = 'home' # only used when next queryparam is not set
# Message tags updated to match bootstrap alert classes
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
##########
# Celery #
##########
CELERY_BEAT_SCHEDULE = {
'refresh geoip': {
'task': 'core.tasks.refresh_geoip_database',
'schedule': crontab(hour=3, minute=0, day_of_week=1),
},
'verify servers': {
'task': 'server.tasks.verify_servers',
'schedule': crontab(hour=3, minute=10),
},
'remove old servers': {
'task': 'server.tasks.remove_old_servers',
'schedule': crontab(hour=3, minute=5),
},
'moderation mails': {
'task': 'server.tasks.moderation_mails',
'schedule': crontab(hour=8, minute=0),
},
}
try:
from .localsettings import * # NOQA
except ImportError:
pass
GEOIP_COUNTRY_DB = os.path.join(GEOIP_CONFIG_ROOT, 'GeoLite2-Country.mmdb')
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(levelname)-8s %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'mail_admins': {
'level': LOG_LEVEL,
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'simple',
'filters': ['require_debug_false'],
},
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'server': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
'sleekxmpp': {
'handlers': ['console'],
'level': 'CRITICAL',
'propagate': False,
},
'xmpp': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
},
'root': {
'handlers': ['console'],
'level': 'INFO',
}
}
| gpl-3.0 | 222,943,038,920,344,300 | 30.478261 | 108 | 0.637776 | false |
muharif/vpp | vpp-api/java/jvpp/gen/jvpp_gen.py | 1 | 5128 | #!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# l
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import importlib
import sys
import callback_gen
import notification_gen
import dto_gen
import jvpp_callback_facade_gen
import jvpp_future_facade_gen
import jvpp_impl_gen
import jvpp_c_gen
import util
# Invocation:
# ~/Projects/vpp/vpp-api/jvpp/gen$ mkdir -p java/org/openvpp/jvpp && cd java/org/openvpp/jvpp
# ~/Projects/vpp/vpp-api/jvpp/gen/java/org/openvpp/jvpp$ ../../../../jvpp_gen.py -idefs_api_vpp_papi.py
#
# Compilation:
# ~/Projects/vpp/vpp-api/jvpp/gen/java/org/openvpp/jvpp$ javac *.java dto/*.java callback/*.java
#
# where
# defs_api_vpp_papi.py - vpe.api in python format (generated by vppapigen)
from util import vpp_2_jni_type_mapping
parser = argparse.ArgumentParser(description='VPP Java API generator')
parser.add_argument('-i', action="store", dest="inputfile")
args = parser.parse_args()
sys.path.append(".")
inputfile = args.inputfile.replace('.py', '')
cfg = importlib.import_module(inputfile, package=None)
# FIXME: functions unsupported due to problems with vpe.api
def is_supported(f_name):
return f_name not in {'vnet_ip4_fib_counters', 'vnet_ip6_fib_counters'}
def is_request_field(field_name):
return field_name not in {'_vl_msg_id', 'client_index', 'context'}
def is_response_field(field_name):
return field_name not in {'_vl_msg_id'}
def get_args(t, filter):
arg_list = []
for i in t:
if not filter(i[1]):
continue
arg_list.append(i[1])
return arg_list
def get_types(t, filter):
types_list = []
c_types_list = []
lengths_list = []
for i in t:
if not filter(i[1]):
continue
if len(i) is 3: # array type
types_list.append(vpp_2_jni_type_mapping[i[0]] + 'Array')
c_types_list.append(i[0] + '[]')
lengths_list.append((i[2], False))
elif len(i) is 4: # variable length array type
types_list.append(vpp_2_jni_type_mapping[i[0]] + 'Array')
c_types_list.append(i[0] + '[]')
lengths_list.append((i[3], True))
else: # primitive type
types_list.append(vpp_2_jni_type_mapping[i[0]])
c_types_list.append(i[0])
lengths_list.append((0, False))
return types_list, c_types_list, lengths_list
def get_definitions():
# Pass 1
func_list = []
func_name = {}
for a in cfg.vppapidef:
if not is_supported(a[0]):
continue
java_name = util.underscore_to_camelcase(a[0])
# For replies include all the arguments except message_id
if util.is_reply(java_name):
types, c_types, lengths = get_types(a[1:], is_response_field)
func_name[a[0]] = dict(
[('name', a[0]), ('java_name', java_name),
('args', get_args(a[1:], is_response_field)), ('full_args', get_args(a[1:], lambda x: True)),
('types', types), ('c_types', c_types), ('lengths', lengths)])
# For requests skip message_id, client_id and context
else:
types, c_types, lengths = get_types(a[1:], is_request_field)
func_name[a[0]] = dict(
[('name', a[0]), ('java_name', java_name),
('args', get_args(a[1:], is_request_field)), ('full_args', get_args(a[1:], lambda x: True)),
('types', types), ('c_types', c_types), ('lengths', lengths)])
# Indexed by name
func_list.append(func_name[a[0]])
return func_list, func_name
func_list, func_name = get_definitions()
base_package = 'org.openvpp.jvpp'
dto_package = 'dto'
callback_package = 'callback'
notification_package = 'notification'
future_package = 'future'
# TODO find better package name
callback_facade_package = 'callfacade'
dto_gen.generate_dtos(func_list, base_package, dto_package, args.inputfile)
jvpp_impl_gen.generate_jvpp(func_list, base_package, dto_package, args.inputfile)
callback_gen.generate_callbacks(func_list, base_package, callback_package, dto_package, args.inputfile)
notification_gen.generate_notification_registry(func_list, base_package, notification_package, callback_package, dto_package, args.inputfile)
jvpp_c_gen.generate_jvpp(func_list, args.inputfile)
jvpp_future_facade_gen.generate_jvpp(func_list, base_package, dto_package, callback_package, notification_package, future_package, args.inputfile)
jvpp_callback_facade_gen.generate_jvpp(func_list, base_package, dto_package, callback_package, notification_package, callback_facade_package, args.inputfile)
| apache-2.0 | 5,746,196,769,455,145,000 | 35.368794 | 157 | 0.656786 | false |
ferdyrod/basic-ecommerce | ecommerce/settings/local.py | 1 | 2918 | # Django settings for ecommerce project.
from os.path import dirname, abspath, join
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'ecommerce.sqlite', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = join(PROJECT_ROOT, 'static', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = join(PROJECT_ROOT, 'static', 'static-only')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
join(PROJECT_ROOT, 'static', 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_DIRS = (
join(PROJECT_ROOT, 'static', 'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'registration',
'products',
'contact',
'cart',
'profiles',
'orders',
)
ACCOUNT_ACTIVATION_DAYS = 7
AUTH_PROFILE_MODULE = 'profiles.profile'
EMAIL_HOST = 'stmp.gmail.com'
EMAIL_HOST_USER = 'Your_Email_Here'
EMAIL_HOST_PASSWORD = 'Your_Password_Here'
EMAIL_USE_TLS = True
| apache-2.0 | 275,103,391,602,662,940 | 31.422222 | 127 | 0.665182 | false |
theghostwhocodes/mocker | tests/utils_test.py | 1 | 1128 | import os
import unittest
import mocker.utils
class TestUtils(unittest.TestCase):
def setUp(self):
self.data_path = './tests/data'
def test_compute_file_path_for_get(self):
path = '/test'
command = 'GET'
file_path = mocker.utils.compute_file_path(self.data_path, path, command)
desired_file_path = os.path.join(
os.getcwd(),
'tests',
'data',
'test.GET.json'
)
self.assertEqual(file_path, desired_file_path)
def test_load_mock(self):
path = '/test'
command = 'GET'
file_path = mocker.utils.compute_file_path(self.data_path, path, command)
content = mocker.utils.load_mock(file_path)
self.assertDictEqual(
content,
{
'response': {
'body': {
'key': 'value in new format'
},
'headers': {
'Content-Type': 'application/json'
},
'status': 200
}
}
)
| mit | 1,872,902,237,085,321,500 | 25.857143 | 81 | 0.467199 | false |
google/iree-llvm-sandbox | runners/test/python/experts.py | 1 | 3643 | #
# import time
from typing import List
from search import *
from transforms import *
class Assignments:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Expert:
def __init__(self, **asignments):
self.assignments = Assignments(**asignments)
def _pre_transform(self, module, boilerplate_code):
# TODO: Allow cloning functions from one module to another.
# Atm we have to resort to string concatenation.
module = Module.parse(
str(module.operation.regions[0].blocks[0].operations[0].operation) +
boilerplate_code)
return module
def __call__(self, module, boilerplate_code):
module = self._pre_transform(module, boilerplate_code)
for transform in self.transforms():
transform(module, 'matmul_on_tensors')
return module
def transforms(self) -> List[Transform]:
'Abstract method that returns a list of transforms for given expert.'
class ExpertCompiler1(Expert):
variables = {
'sizes1': TilingSizesVariable,
'sizes2': TilingSizesVariable,
'sizes3': TilingSizesVariable,
'pad': BoolVariable,
'hoist_padding': HoistPaddingVariable,
}
def transforms(self) -> List[Transform]:
v = self.assignments
return [
TileAndPad('matmul_on_tensors', 'linalg.matmul', v.sizes1),
TileAndPad('matmul_on_tensors', 'linalg.matmul', v.sizes2),
TileAndPad(
'matmul_on_tensors',
'linalg.matmul',
v.sizes3,
pad=v.pad,
hoist_padding=v.hoist_padding),
Vectorize('matmul_on_tensors', 'linalg.matmul'),
Bufferize(),
LowerToLLVM(),
]
class ExpertCompiler2(Expert):
variables = {
'sizes1': TilingSizesVariable,
'sizes2': TilingSizesVariable,
'sizes3': TilingSizesVariable,
}
def transforms(self) -> List[Transform]:
v = self.assignments
return [
Fuse('matmul_on_tensors', 'linalg.matmul', v.sizes1),
Fuse('matmul_on_tensors', 'linalg.matmul', v.sizes2),
TileAndPad('matmul_on_tensors', 'linalg.matmul', v.sizes3),
Vectorize('matmul_on_tensors', 'linalg.matmul'),
Vectorize('matmul_on_tensors', 'linalg.fill'),
Bufferize(),
LowerToLLVM(),
]
class ExpertCompiler3(Expert):
variables = {
'sizes1': TilingSizesVariable,
'sizes2': TilingSizesVariable,
'sizes3': TilingSizesVariable,
'pad': BoolVariable,
'hoist_padding': HoistPaddingVariable,
}
def transforms(self) -> List[Transform]:
v = self.assignments
return [
Fuse('matmul_on_tensors', 'linalg.matmul', v.sizes1),
TileAndPad(
'matmul_on_tensors',
'linalg.matmul',
v.sizes2,
pad=v.pad,
hoist_padding=v.hoist_padding),
Vectorize('matmul_on_tensors', 'linalg.matmul'),
TileAndPad('matmul_on_tensors', 'linalg.fill', v.sizes3),
Vectorize('matmul_on_tensors', 'linalg.fill'),
Bufferize(),
LowerToLLVM(),
]
class ExpertSparseCompiler(Expert):
variables = {'options': str}
def transforms(self) -> List[Transform]:
v = self.assignments
self.options = v.options
return [
Sparsify(v.options),
]
expert_compilerr_1 = ExpertCompiler1(
sizes1=[256, 256, 256],
sizes2=[64, 64, 64],
sizes3=[8, 16, 32],
pad=True,
hoist_padding=2)
expert_compilerr_2 = ExpertCompiler2(
sizes1=[256, 256], sizes2=[8, 16], sizes3=[0, 0, 32])
expert_compilerr_3 = ExpertCompiler3(
sizes1=[256, 256],
sizes2=[8, 16, 32],
sizes3=[8, 32],
pad=True,
hoist_padding=3)
| apache-2.0 | 5,863,396,034,825,331,000 | 25.208633 | 76 | 0.622289 | false |
adlnet-archive/edx-platform | lms/djangoapps/instructor_analytics/tests/test_basic.py | 1 | 11611 | """
Tests for instructor.basic
"""
from django.test import TestCase
from student.models import CourseEnrollment
from django.core.urlresolvers import reverse
from student.tests.factories import UserFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from shoppingcart.models import CourseRegistrationCode, RegistrationCodeRedemption, Order, Invoice, Coupon
from instructor_analytics.basic import (
sale_record_features, enrolled_students_features, course_registration_features, coupon_codes_features,
AVAILABLE_FEATURES, STUDENT_FEATURES, PROFILE_FEATURES
)
from course_groups.tests.helpers import CohortFactory
from course_groups.models import CourseUserGroup
from courseware.tests.factories import InstructorFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class TestAnalyticsBasic(ModuleStoreTestCase):
""" Test basic analytics functions. """
def setUp(self):
super(TestAnalyticsBasic, self).setUp()
self.course_key = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = tuple(UserFactory() for _ in xrange(30))
self.ces = tuple(CourseEnrollment.enroll(user, self.course_key)
for user in self.users)
self.instructor = InstructorFactory(course_key=self.course_key)
def test_enrolled_students_features_username(self):
self.assertIn('username', AVAILABLE_FEATURES)
userreports = enrolled_students_features(self.course_key, ['username'])
self.assertEqual(len(userreports), len(self.users))
for userreport in userreports:
self.assertEqual(userreport.keys(), ['username'])
self.assertIn(userreport['username'], [user.username for user in self.users])
def test_enrolled_students_features_keys(self):
query_features = ('username', 'name', 'email')
for feature in query_features:
self.assertIn(feature, AVAILABLE_FEATURES)
with self.assertNumQueries(1):
userreports = enrolled_students_features(self.course_key, query_features)
self.assertEqual(len(userreports), len(self.users))
for userreport in userreports:
self.assertEqual(set(userreport.keys()), set(query_features))
self.assertIn(userreport['username'], [user.username for user in self.users])
self.assertIn(userreport['email'], [user.email for user in self.users])
self.assertIn(userreport['name'], [user.profile.name for user in self.users])
def test_enrolled_students_features_keys_cohorted(self):
course = CourseFactory.create(course_key=self.course_key)
course.cohort_config = {'cohorted': True, 'auto_cohort': True, 'auto_cohort_groups': ['cohort']}
self.store.update_item(course, self.instructor.id)
cohort = CohortFactory.create(name='cohort', course_id=course.id)
cohorted_students = [UserFactory.create() for _ in xrange(10)]
cohorted_usernames = [student.username for student in cohorted_students]
non_cohorted_student = UserFactory.create()
for student in cohorted_students:
cohort.users.add(student)
CourseEnrollment.enroll(student, course.id)
CourseEnrollment.enroll(non_cohorted_student, course.id)
instructor = InstructorFactory(course_key=course.id)
self.client.login(username=instructor.username, password='test')
query_features = ('username', 'cohort')
# There should be a constant of 2 SQL queries when calling
# enrolled_students_features. The first query comes from the call to
# User.objects.filter(...), and the second comes from
# prefetch_related('course_groups').
with self.assertNumQueries(2):
userreports = enrolled_students_features(course.id, query_features)
self.assertEqual(len([r for r in userreports if r['username'] in cohorted_usernames]), len(cohorted_students))
self.assertEqual(len([r for r in userreports if r['username'] == non_cohorted_student.username]), 1)
for report in userreports:
self.assertEqual(set(report.keys()), set(query_features))
if report['username'] in cohorted_usernames:
self.assertEqual(report['cohort'], cohort.name)
else:
self.assertEqual(report['cohort'], '[unassigned]')
def test_available_features(self):
self.assertEqual(len(AVAILABLE_FEATURES), len(STUDENT_FEATURES + PROFILE_FEATURES))
self.assertEqual(set(AVAILABLE_FEATURES), set(STUDENT_FEATURES + PROFILE_FEATURES))
class TestCourseSaleRecordsAnalyticsBasic(ModuleStoreTestCase):
""" Test basic course sale records analytics functions. """
def setUp(self):
"""
Fixtures.
"""
super(TestCourseSaleRecordsAnalyticsBasic, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_course_sale_features(self):
query_features = [
'company_name', 'company_contact_name', 'company_contact_email', 'total_codes', 'total_used_codes',
'total_amount', 'created_at', 'customer_reference_number', 'recipient_name', 'recipient_email',
'created_by', 'internal_reference', 'invoice_number', 'codes', 'course_id'
]
#create invoice
sale_invoice = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='[email protected]', recipient_name='Testw_1', recipient_email='[email protected]',
customer_reference_number='2Fwe23S', internal_reference="ABC", course_id=self.course.id
)
for i in range(5):
course_code = CourseRegistrationCode(
code="test_code{}".format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice
)
course_code.save()
course_sale_records_list = sale_record_features(self.course.id, query_features)
for sale_record in course_sale_records_list:
self.assertEqual(sale_record['total_amount'], sale_invoice.total_amount)
self.assertEqual(sale_record['recipient_email'], sale_invoice.recipient_email)
self.assertEqual(sale_record['recipient_name'], sale_invoice.recipient_name)
self.assertEqual(sale_record['company_name'], sale_invoice.company_name)
self.assertEqual(sale_record['company_contact_name'], sale_invoice.company_contact_name)
self.assertEqual(sale_record['company_contact_email'], sale_invoice.company_contact_email)
self.assertEqual(sale_record['internal_reference'], sale_invoice.internal_reference)
self.assertEqual(sale_record['customer_reference_number'], sale_invoice.customer_reference_number)
self.assertEqual(sale_record['invoice_number'], sale_invoice.id)
self.assertEqual(sale_record['created_by'], self.instructor)
self.assertEqual(sale_record['total_used_codes'], 0)
self.assertEqual(sale_record['total_codes'], 5)
class TestCourseRegistrationCodeAnalyticsBasic(ModuleStoreTestCase):
""" Test basic course registration codes analytics functions. """
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodeAnalyticsBasic, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'sale_price': 122.45,
'company_contact_name': 'TestName', 'company_contact_email': '[email protected]', 'recipient_name': 'Test123',
'recipient_email': '[email protected]', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
def test_course_registration_features(self):
query_features = [
'code', 'course_id', 'company_name', 'created_by',
'redeemed_by', 'invoice_id', 'purchaser', 'customer_reference_number', 'internal_reference'
]
order = Order(user=self.instructor, status='purchased')
order.save()
registration_code_redemption = RegistrationCodeRedemption(
order=order, registration_code_id=1, redeemed_by=self.instructor
)
registration_code_redemption.save()
registration_codes = CourseRegistrationCode.objects.all()
course_registration_list = course_registration_features(query_features, registration_codes, csv_type='download')
self.assertEqual(len(course_registration_list), len(registration_codes))
for course_registration in course_registration_list:
self.assertEqual(set(course_registration.keys()), set(query_features))
self.assertIn(course_registration['code'], [registration_code.code for registration_code in registration_codes])
self.assertIn(
course_registration['course_id'],
[registration_code.course_id.to_deprecated_string() for registration_code in registration_codes]
)
self.assertIn(
course_registration['company_name'],
[getattr(registration_code.invoice, 'company_name') for registration_code in registration_codes]
)
self.assertIn(
course_registration['invoice_id'],
[registration_code.invoice_id for registration_code in registration_codes]
)
def test_coupon_codes_features(self):
query_features = [
'course_id', 'percentage_discount', 'code_redeemed_count', 'description'
]
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
active_coupons = Coupon.objects.filter(course_id=self.course.id, is_active=True)
active_coupons_list = coupon_codes_features(query_features, active_coupons)
self.assertEqual(len(active_coupons_list), len(active_coupons))
for active_coupon in active_coupons_list:
self.assertEqual(set(active_coupon.keys()), set(query_features))
self.assertIn(active_coupon['percentage_discount'], [coupon.percentage_discount for coupon in active_coupons])
self.assertIn(active_coupon['description'], [coupon.description for coupon in active_coupons])
self.assertIn(
active_coupon['course_id'],
[coupon.course_id.to_deprecated_string() for coupon in active_coupons]
)
| agpl-3.0 | -4,815,205,632,357,112,000 | 52.261468 | 124 | 0.666609 | false |
ubvu/orcid-monitor | orcid-usage/analyze.py | 1 | 6196 | import codecs
import os
import sys
from multiprocessing import Process, Queue
from lxml import etree
import tablib
COLNAME_MODIFIED_DATE = 'last modified date'
COLNAME_CREATION_DATE = 'creation date'
COLNAME_KEYWORDS = 'keywords'
COLNAME_EMAIL = 'email'
COLNAME_WORKS = 'works'
COLNAME_FUNDING = 'funding'
COLNAME_AFFILIATIONS = 'affiliations'
COLNAME_OTHER_NAMES = 'other-names'
COLNAME_CREDIT_NAME = 'credit-name'
COLNAME_FAMILY_NAME = 'family-name'
COLNAME_ORCID = 'orcid'
COLNAME_GIVEN_NAMES = 'given-names'
COLUMN_INTERNAL = 'Internal (by disam. source id)'
nsmap = {
'x': 'http://www.orcid.org/ns/orcid'
}
def save_to_file(persons, dest):
column_names = [
COLNAME_ORCID,
COLNAME_GIVEN_NAMES,
COLNAME_FAMILY_NAME,
COLNAME_CREDIT_NAME,
COLNAME_OTHER_NAMES,
COLNAME_AFFILIATIONS,
COLUMN_INTERNAL,
COLNAME_FUNDING,
COLNAME_WORKS,
COLNAME_EMAIL,
COLNAME_KEYWORDS,
COLNAME_CREATION_DATE,
COLNAME_MODIFIED_DATE,
]
# Add column names for (initially unknown) external identifiers
all_col_names = {x for person in persons for x in person.keys()}
ext_id_col_names = {x for x in all_col_names if x not in column_names}
column_names.extend(ext_id_col_names)
dataset = tablib.Dataset(column_names, title='ORCID analyse')
for person in persons:
person_data = map(lambda x: person.get(x, ''), column_names)
dataset.append(person_data)
file_path = os.path.join(os.getcwd(), 'data', dest + '.csv')
with open(file_path, 'wb') as f:
f.write(dataset.csv)
with open('organization_ids.txt') as f:
internal_org_ids = {tuple(line.rstrip('\r\n').split(',')) for line in f}
def parse_person(filehandle):
person = {}
root = etree.parse(filehandle).getroot()
person[COLNAME_ORCID] = root.xpath('//x:orcid-identifier/x:path/text()', namespaces=nsmap)[0]
print person[COLNAME_ORCID]
#print sys.getsizeof(root)
person[COLNAME_AFFILIATIONS] = len(root.xpath('//x:affiliation[x:type[text()=\'employment\']]', namespaces=nsmap))
person[COLNAME_FUNDING] = len(root.xpath('//x:funding', namespaces=nsmap))
person[COLNAME_WORKS] = len(root.xpath('//x:orcid-works/x:orcid-work', namespaces=nsmap))
given_name_elems = root.xpath('//x:personal-details/x:given-names/text()', namespaces=nsmap)
if len(given_name_elems) > 0:
person[COLNAME_GIVEN_NAMES] = given_name_elems[0]
person[COLNAME_OTHER_NAMES] = len(root.xpath('//x:personal-details/x:other-names/x:other-name', namespaces=nsmap))
family_name_elems = root.xpath('//x:personal-details/x:family-name/text()', namespaces=nsmap)
if len(family_name_elems) > 0:
person[COLNAME_FAMILY_NAME] = family_name_elems[0]
credit_name_elems = root.xpath('//x:personal-details/x:credit-name/text()', namespaces=nsmap)
if len(credit_name_elems) > 0:
person[COLNAME_CREDIT_NAME] = credit_name_elems[0]
email_elems = root.xpath('//x:contact-details/x:email/text()', namespaces=nsmap)
if len(email_elems) > 0:
person[COLNAME_EMAIL] = email_elems[0]
keywords_elems = root.xpath('//x:keywords/x:keyword', namespaces=nsmap)
person[COLNAME_KEYWORDS] = 'No' if len(keywords_elems) == 0 else 'Yes'
person[COLNAME_CREATION_DATE] = root.xpath('//x:submission-date/text()', namespaces=nsmap)[0][:10]
person[COLNAME_MODIFIED_DATE] = root.xpath('//x:last-modified-date/text()', namespaces=nsmap)[0][:10]
for ext_id_node in root.xpath('//x:external-identifier', namespaces=nsmap):
source = ext_id_node.find('x:external-id-common-name', nsmap).text
reference = ext_id_node.find('x:external-id-reference', nsmap).text
person[source] = reference
employment_affiliations = root.xpath('//x:affiliation[x:type[text()=\'employment\']]', namespaces=nsmap)
person[COLNAME_AFFILIATIONS] = len(employment_affiliations)
person[COLUMN_INTERNAL] = 'N'
# find the source without an enddate
curr_affls = 0
for affiliation in employment_affiliations:
disam_org_identifier = affiliation.xpath(
'.//x:disambiguated-organization/x:disambiguated-organization-identifier', namespaces=nsmap)
disam_org_source = affiliation.xpath('.//x:disambiguated-organization/x:disambiguation-source',
namespaces=nsmap)
org_name = affiliation.xpath('.//x:organization/x:name/text()', namespaces=nsmap)[0]
org_name = org_name.lower()
end_date = affiliation.xpath('.//x:end-date', namespaces=nsmap)
end_year = affiliation.xpath('.//x:end-date/x:year/text()', namespaces=nsmap)
if len(end_date) == 0:
colname = 'affl' + str(curr_affls)
if org_name.find('amsterdam') > -1 or org_name.find('vu') > -1 or org_name.find('free') > -1 or org_name.find('vrije') > -1:
person[colname] = org_name
curr_affls = curr_affls + 1
# check for RINNGOLD ID and strings VU University or Vrije Universiteit
if len(end_date) == 0: # current employer
print org_name
if disam_org_identifier and disam_org_source:
if (disam_org_source[0].text, disam_org_identifier[0].text) in internal_org_ids:
person[COLUMN_INTERNAL] = 'Y'
if (org_name.find('vu university') > -1 and org_name.find('vu university medical center')==-1) or org_name.find('vrije universiteit amsterdam') > -1 or org_name.find('free university amsterdam') > -1:
print '****YES****'
person[COLUMN_INTERNAL] = 'Y'
return person
if __name__ == '__main__':
try:
path = sys.argv[1]
except:
path = '0217'
source = os.path.join(os.getcwd(), 'data', 'downloads', path)
persons = []
for fn in os.listdir(source):
f = codecs.open(os.path.join(source, fn), 'r', 'utf-8')
# with open(os.path.join(source, fn), 'r') as f:
# result = executor.submit(persons.append(parse_person(f)), *args, **kwargs).result()
persons.append(parse_person(f))
f.close
save_to_file(persons, path)
| mit | 147,770,138,063,816,770 | 40.583893 | 212 | 0.643157 | false |
nuxeh/keystone | keystone/tests/unit/test_v3_federation.py | 1 | 154332 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import subprocess
from testtools import matchers
import uuid
from lxml import etree
import mock
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslotest import mockpatch
import saml2
from saml2 import saml
from saml2 import sigver
from six.moves import urllib
import xmldsig
from keystone.auth import controllers as auth_controllers
from keystone.auth.plugins import mapped
from keystone.contrib import federation
from keystone.contrib.federation import controllers as federation_controllers
from keystone.contrib.federation import idp as keystone_idp
from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
from keystone import notifications
from keystone.tests.unit import core
from keystone.tests.unit import federation_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_v3
from keystone.token.providers import common as token_common
CONF = cfg.CONF
LOG = log.getLogger(__name__)
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
XMLDIR = os.path.join(ROOTDIR, 'saml2/')
def dummy_validator(*args, **kwargs):
pass
class FederationTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'federation'
EXTENSION_TO_ADD = 'federation_extension'
class FederatedSetupMixin(object):
ACTION = 'authenticate'
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2']
REMOTE_ID_ATTR = uuid.uuid4().hex
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
def _check_domains_are_valid(self, token):
self.assertEqual('Federated', token['user']['domain']['id'])
self.assertEqual('Federated', token['user']['domain']['name'])
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
def xor_project_domain(iterable):
return sum(('project' in iterable, 'domain' in iterable)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
if not xor_project_domain(token.keys()):
raise AssertionError("You must specify either"
"project or domain.")
self.assertIn('OS-FEDERATION', token['user'])
os_federation = token['user']['OS-FEDERATION']
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
def _issue_unscoped_token(self,
idp=None,
assertion='EMPLOYEE_ASSERTION',
environment=None):
api = federation_controllers.Auth()
context = {'environment': environment or {}}
self._inject_assertion(context, assertion)
if idp is None:
idp = self.IDP
r = api.federated_authentication(context, idp, self.PROTOCOL)
return r
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _inject_assertion(self, context, variant, query_string=None):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = query_string or []
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = self.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = self.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = self.new_domain_ref()
self.resource_api.create_domain(self.domainC['id'],
self.domainC)
self.domainD = self.new_domain_ref()
self.resource_api.create_domain(self.domainD['id'],
self.domainD)
# Create and add projects
self.proj_employees = self.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = self.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = self.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.project_all['id'],
self.project_all)
self.project_inherited = self.new_project_ref(
domain_id=self.domainD['id'])
self.resource_api.create_project(self.project_inherited['id'],
self.project_inherited)
# Create and add groups
self.group_employees = self.new_group_ref(
domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
self.group_customers = self.new_group_ref(
domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
self.group_admins = self.new_group_ref(
domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
self.role_employee = self.new_role_ref()
self.role_api.create_role(self.role_employee['id'], self.role_employee)
self.role_customer = self.new_role_ref()
self.role_api.create_role(self.role_customer['id'], self.role_customer)
self.role_admin = self.new_role_ref()
self.role_api.create_role(self.role_admin['id'], self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access projects via inheritance:
# * domain D
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainD['id'],
inherited_to_projects=True)
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
'any_one_of': [
'[email protected]'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
# rules with local group names
{
"local": [
{
'user': {
'name': '{0}'
}
},
{
"group": {
"name": self.group_customers['name'],
"domain": {
"name": self.domainA['name']
}
}
}
],
"remote": [
{
'type': 'UserName',
},
{
"type": "orgPersonType",
"any_one_of": [
"CEO",
"CTO"
],
}
]
},
{
"local": [
{
'user': {
'name': '{0}'
}
},
{
"group": {
"name": self.group_admins['name'],
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "orgPersonType",
"any_one_of": [
"Managers"
]
}
]
},
{
"local": [
{
"user": {
"name": "{0}"
}
},
{
"group": {
"name": "NON_EXISTING",
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "UserName",
"any_one_of": [
"IamTester"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": self.user['domain_id']
}
}
},
{
"group": {
"id": self.group_customers['id']
}
}
],
"remote": [
{
"type": "UserType",
"any_one_of": [
"random"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": uuid.uuid4().hex
}
}
}
],
"remote": [
{
"type": "Position",
"any_one_of": [
"DirectorGeneral"
]
}
]
}
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add IDP with remote
self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
self.idp_with_remote['remote_ids'] = self.REMOTE_IDS
self.federation_api.create_idp(self.idp_with_remote['id'],
self.idp_with_remote)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Add protocols IDP with remote
self.federation_api.create_protocol(self.idp_with_remote['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.project_inherited['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
class FederatedIdentityProviderTests(FederationTests):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on its id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=201)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex]
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_repeated(self):
"""Creates two IdentityProvider entities with some remote_ids
A remote_id is the same for both so the second IdP is not
created because of the uniqueness of the remote_ids
Expect HTTP 409 code for the latter call.
"""
body = self.default_body.copy()
repeated_remote_id = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
repeated_remote_id]
self._create_default_idp(body=body)
url = self.base_url(suffix=uuid.uuid4().hex)
body['remote_ids'] = [uuid.uuid4().hex,
repeated_remote_id]
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_create_idp_remote_empty(self):
"""Creates an IdP with empty remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = []
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_none(self):
"""Creates an IdP with a None remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = None
resp = self._create_default_idp(body=body)
expected = body.copy()
expected['remote_ids'] = []
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=expected)
def test_update_idp_remote_ids(self):
"""Update IdP's remote_ids parameter."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex]
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_update_idp_clean_remote_ids(self):
"""Update IdP's remote_ids parameter with an empty list."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = []
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=201)
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=404)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=404)
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=404)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex],
'description': uuid.uuid4().hex,
'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP 403 code.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body}, expected_status=403)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=404)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=201)
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
names, however attached to different IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': 409}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': 404}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 code for the GET call after the protocol is deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=404)
class MappingCRUDTests(FederationTests):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=201)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, 200)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(1, len(entities))
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, 204)
self.get(url, expected_status=404)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=404)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=404)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
def test_create_mapping_with_blacklist_and_whitelist(self):
"""Test for adding whitelist and blacklist in the rule
Server should respond with HTTP 400 error upon discovering both
``whitelist`` and ``blacklist`` keywords in the same rule.
"""
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST
self.put(url, expected_status=400, body={'mapping': mapping})
class MappingRuleEngineTests(FederationTests):
"""A class for testing the mapping rule engine."""
def assertValidMappedUserObject(self, mapped_properties,
user_type='ephemeral',
domain_id=None):
"""Check whether mapped properties object has 'user' within.
According to today's rules, RuleProcessor does not have to issue user's
id or name. What's actually required is user's type and for ephemeral
users that would be service domain named 'Federated'.
"""
self.assertIn('user', mapped_properties,
message='Missing user object in mapped properties')
user = mapped_properties['user']
self.assertIn('type', user)
self.assertEqual(user_type, user['type'])
self.assertIn('domain', user)
domain = user['domain']
domain_name_or_id = domain.get('id') or domain.get('name')
domain_ref = domain_id or federation.FEDERATED_DOMAIN_KEYWORD
self.assertEqual(domain_ref, domain_name_or_id)
def test_rule_engine_any_one_of_and_direct_mapping(self):
"""Should return user's name and group id EMPLOYEE_GROUP_ID.
The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
They will test the case where `any_one_of` is valid, and there is
a direct mapping for the users name.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
ln = assertion.get('LastName')
full_name = '%s %s' % (fn, ln)
group_ids = values.get('group_ids')
user_name = values.get('user', {}).get('name')
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
self.assertEqual(full_name, user_name)
def test_rule_engine_no_regex_match(self):
"""Should deny authorization, the email of the tester won't match.
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to [email protected].
RuleProcessor should return list of empty group_ids.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertValidMappedUserObject(mapped_properties)
self.assertIsNone(mapped_properties['user'].get('name'))
self.assertListEqual(list(), mapped_properties['group_ids'])
def test_rule_engine_regex_many_groups(self):
"""Should return group CONTRACTOR_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_TESTER_REGEX. This will test the case where many groups
are in the assertion, and a regex value is used to try and find
a match.
"""
mapping = mapping_fixtures.MAPPING_TESTER_REGEX
assertion = mapping_fixtures.TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_any_one_of_many_rules(self):
"""Should return group CONTRACTOR_GROUP_ID.
The CONTRACTOR_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many rules
must be matched, including an `any_one_of`, and a direct
mapping.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_and_direct_mapping(self):
"""Should return user's name and email.
The CUSTOMER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test the case where a requirement
has `not_any_of`, and direct mapping to a username, no group.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertEqual([], group_ids,)
def test_rule_engine_not_any_of_many_rules(self):
"""Should return group EMPLOYEE_GROUP_ID.
The EMPLOYEE_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many remote
rules must be matched, including a `not_any_of`.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_regex_verify_pass(self):
"""Should return group DEVELOPER_GROUP_ID.
The DEVELOPER_ASSERTION should successfully have a match in
MAPPING_DEVELOPER_REGEX. This will test the case where many
remote rules must be matched, including a `not_any_of`, with
regex set to True.
"""
mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
assertion = mapping_fixtures.DEVELOPER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_regex_verify_fail(self):
"""Should deny authorization.
The email in the assertion will fail the regex test.
It is set to reject any @example.org address, but the
incoming value is set to [email protected].
RuleProcessor should return list of empty group_ids.
"""
mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertValidMappedUserObject(mapped_properties)
self.assertIsNone(mapped_properties['user'].get('name'))
self.assertListEqual(list(), mapped_properties['group_ids'])
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
A helper function injecting assertion passed as an argument.
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
mapping = mapping_fixtures.MAPPING_LARGE
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertValidMappedUserObject(values)
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_regex_match_and_many_groups(self):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test a successful regex match
for an `any_one_of` evaluation type, and will have many
groups returned.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.TESTER_ASSERTION)
def test_rule_engine_discards_nonstring_objects(self):
"""Check whether RuleProcessor discards non string objects.
Despite the fact that assertion is malformed and contains
non string objects, RuleProcessor should correctly discard them and
successfully have a match in MAPPING_LARGE.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.MALFORMED_TESTER_ASSERTION)
def test_rule_engine_fails_after_discarding_nonstring(self):
"""Check whether RuleProcessor discards non string objects.
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. RuleProcessor will result with
empty list of groups.
"""
mapping = mapping_fixtures.MAPPING_SMALL
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
mapped_properties = rp.process(assertion)
self.assertValidMappedUserObject(mapped_properties)
self.assertIsNone(mapped_properties['user'].get('name'))
self.assertListEqual(list(), mapped_properties['group_ids'])
def test_rule_engine_returns_group_names(self):
"""Check whether RuleProcessor returns group names with their domains.
RuleProcessor should return 'group_names' entry with a list of
dictionaries with two entries 'name' and 'domain' identifying group by
its name and domain.
"""
mapping = mapping_fixtures.MAPPING_GROUP_NAMES
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
reference = {
mapping_fixtures.DEVELOPER_GROUP_NAME:
{
"name": mapping_fixtures.DEVELOPER_GROUP_NAME,
"domain": {
"name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME
}
},
mapping_fixtures.TESTER_GROUP_NAME:
{
"name": mapping_fixtures.TESTER_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
def test_rule_engine_whitelist_and_direct_groups_mapping(self):
"""Should return user's groups Developer and Contractor.
The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist'
correctly filters out Manager and only allows Developer and Contractor.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
reference = {
mapping_fixtures.DEVELOPER_GROUP_NAME:
{
"name": mapping_fixtures.DEVELOPER_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
},
mapping_fixtures.CONTRACTOR_GROUP_NAME:
{
"name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual([], mapped_properties['group_ids'])
def test_rule_engine_blacklist_and_direct_groups_mapping(self):
"""Should return user's group Developer.
The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist'
correctly filters out Manager and Developer and only allows Contractor.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
reference = {
mapping_fixtures.CONTRACTOR_GROUP_NAME:
{
"name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual([], mapped_properties['group_ids'])
def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self):
"""Tests matching multiple values before the blacklist.
Verifies that the local indexes are correct when matching multiple
remote values for a field when the field occurs before the blacklist
entry in the remote rules.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
reference = {
mapping_fixtures.CONTRACTOR_GROUP_NAME:
{
"name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual([], mapped_properties['group_ids'])
def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self):
"""Test if the local rule is rejected upon missing domain value
This is a variation with a ``whitelist`` filter.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(mapping['rules'])
self.assertRaises(exception.ValidationError, rp.process, assertion)
def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self):
"""Test if the local rule is rejected upon missing domain value
This is a variation with a ``blacklist`` filter.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(mapping['rules'])
self.assertRaises(exception.ValidationError, rp.process, assertion)
def test_rule_engine_no_groups_allowed(self):
"""Should return user mapped to no groups.
The EMPLOYEE_ASSERTION should successfully have a match
in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out
the group values from the assertion and thus map to no groups.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertListEqual(mapped_properties['group_names'], [])
self.assertListEqual(mapped_properties['group_ids'], [])
self.assertEqual('tbo', mapped_properties['user']['name'])
def test_mapping_federated_domain_specified(self):
"""Test mapping engine when domain 'ephemeral' is explicitely set.
For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion
EMPLOYEE_ASSERTION
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
def test_create_user_object_with_bad_mapping(self):
"""Test if user object is created even with bad mapping.
User objects will be created by mapping engine always as long as there
is corresponding local rule. This test shows, that even with assertion
where no group names nor ids are matched, but there is 'blind' rule for
mapping user, such object will be created.
In this test MAPPING_EHPEMERAL_USER expects UserName set to jsmith
whereas value from assertion is 'tbo'.
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
self.assertNotIn('id', mapped_properties['user'])
self.assertNotIn('name', mapped_properties['user'])
def test_set_ephemeral_domain_to_ephemeral_users(self):
"""Test auto assigning service domain to ephemeral users.
Test that ephemeral users will always become members of federated
service domain. The check depends on ``type`` value which must be set
to ``ephemeral`` in case of ephemeral user.
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
def test_local_user_local_domain(self):
"""Test that local users can have non-service domains assigned."""
mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(
mapped_properties, user_type='local',
domain_id=mapping_fixtures.LOCAL_DOMAIN)
def test_user_identifications_name(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has property type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- Check if user's id is properly set and equal to name, as it was not
explicitely specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
mapped.setup_username({}, mapped_properties)
self.assertEqual('jsmith', mapped_properties['user']['id'])
self.assertEqual('jsmith', mapped_properties['user']['name'])
def test_user_identifications_name_and_federated_domain(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has propert type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- Check if user's id is properly set and equal to name, as it was not
explicitely specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
mapped.setup_username({}, mapped_properties)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual('tbo', mapped_properties['user']['id'])
def test_user_identification_id(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has propert type set ('ephemeral')
- Check if user's id is properly mapped from the assertion
- Check if user's name is properly set and equal to id, as it was not
explicitely specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.ADMIN_ASSERTION
mapped_properties = rp.process(assertion)
context = {'environment': {}}
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
mapped.setup_username(context, mapped_properties)
self.assertEqual('bob', mapped_properties['user']['name'])
self.assertEqual('bob', mapped_properties['user']['id'])
def test_user_identification_id_and_name(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has proper type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- Check if user's id is properly set and and equal to value hardcoded
in the mapping
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CUSTOMER_ASSERTION
mapped_properties = rp.process(assertion)
context = {'environment': {}}
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
mapped.setup_username(context, mapped_properties)
self.assertEqual('bwilliams', mapped_properties['user']['name'])
self.assertEqual('abc123', mapped_properties['user']['id'])
class FederatedTokenTests(FederationTests, FederatedSetupMixin):
def auth_plugin_config_override(self):
methods = ['saml2']
method_classes = {'saml2': 'keystone.auth.plugins.saml2.Saml2'}
super(FederatedTokenTests, self).auth_plugin_config_override(
methods, **method_classes)
def setUp(self):
super(FederatedTokenTests, self).setUp()
self._notifications = []
def fake_saml_notify(action, context, user_id, group_ids,
identity_provider, protocol, token_id, outcome):
note = {
'action': action,
'user_id': user_id,
'identity_provider': identity_provider,
'protocol': protocol,
'send_notification_called': True}
self._notifications.append(note)
self.useFixture(mockpatch.PatchObject(
notifications,
'send_saml_audit_notification',
fake_saml_notify))
def _assert_last_notify(self, action, identity_provider, protocol,
user_id=None):
self.assertTrue(self._notifications)
note = self._notifications[-1]
if user_id:
self.assertEqual(note['user_id'], user_id)
self.assertEqual(note['action'], action)
self.assertEqual(note['identity_provider'], identity_provider)
self.assertEqual(note['protocol'], protocol)
self.assertTrue(note['send_notification_called'])
def load_fixtures(self, fixtures):
super(FederationTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_issue_unscoped_token_notify(self):
self._issue_unscoped_token()
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL)
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_disabled_idp(self):
"""Checks if authentication works with disabled identity providers.
Test plan:
1) Disable default IdP
2) Try issuing unscoped token for that IdP
3) Expect server to forbid authentication
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token)
def test_issue_unscoped_token_group_names_in_mapping(self):
r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION')
ref_groups = set([self.group_customers['id'], self.group_admins['id']])
token_resp = r.json_body
token_groups = token_resp['token']['user']['OS-FEDERATION']['groups']
token_groups = set([group['id'] for group in token_groups])
self.assertEqual(ref_groups, token_groups)
def test_issue_unscoped_tokens_nonexisting_group(self):
self.assertRaises(exception.MissingGroups,
self._issue_unscoped_token,
assertion='ANOTHER_TESTER_ASSERTION')
def test_issue_unscoped_token_with_remote_no_attribute(self):
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_saml2_remote(self):
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_different(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_default_overwritten(self):
"""Test that protocol remote_id_attribute has higher priority.
Make sure the parameter stored under ``protocol`` section has higher
priority over parameter from default ``federation`` configuration
section.
"""
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.config_fixture.config(group='federation',
remote_id_attribute=uuid.uuid4().hex)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_unavailable(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.ValidationError,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
uuid.uuid4().hex: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_user_as_empty_string(self):
# make sure that REMOTE_USER set as the empty string won't interfere
r = self._issue_unscoped_token(environment={'REMOTE_USER': ''})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(xrange(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once_notify(self):
r = self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
user_id = r.json['token']['user']['id']
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id)
def test_scope_to_project_once(self):
r = self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, self.proj_employees['id'])
self._check_scoped_token_attributes(token_resp)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
def test_scope_token_with_idp_disabled(self):
"""Scope token issued by disabled IdP.
Try scoping the token issued by an IdP which is disabled now. Expect
server to refuse scoping operation.
This test confirms correct behaviour when IdP was enabled and unscoped
token was issued, but disabled before user tries to scope the token.
Here we assume the unscoped token was already issued and start from
the moment where IdP is being disabled and unscoped token is being
used.
Test plan:
1) Disable IdP
2) Try scoping unscoped token
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=403)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=401)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.v3_authenticate_token(body)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_scope_to_project_with_only_inherited_roles(self):
"""Try to scope token whose only roles are inherited."""
self.config_fixture.config(group='os_inherit', enabled=True)
r = self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, self.project_inherited['id'])
self._check_scoped_token_attributes(token_resp)
roles_ref = [self.role_customer]
projects_ref = self.project_inherited
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=404)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.v3_authenticate_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(self.domainA['id'], domain_id)
self._check_scoped_token_attributes(token_resp)
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.v3_authenticate_token(body)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id_ref, domain_id)
self._check_scoped_token_attributes(token_resp)
def test_scope_to_domain_with_only_inherited_roles_fails(self):
"""Try to scope to a domain that has no direct roles."""
self.v3_authenticate_token(
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER,
expected_status=401)
def test_list_projects(self):
urls = ('/OS-FEDERATION/projects', '/auth/projects')
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
self.config_fixture.config(group='os_inherit', enabled=True)
projects_refs = (set([self.proj_customers['id'],
self.project_inherited['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id'],
self.project_inherited['id']]))
for token, projects_ref in zip(token, projects_refs):
for url in urls:
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects_ref, projects,
'match failed for url %s' % url)
# TODO(samueldmq): Create another test class for role inheritance tests.
# The advantage would be to reduce the complexity of this test class and
# have tests specific to this fuctionality grouped, easing readability and
# maintenability.
def test_list_projects_for_inherited_project_assignment(self):
# Enable os_inherit extension
self.config_fixture.config(group='os_inherit', enabled=True)
# Create a subproject
subproject_inherited = self.new_project_ref(
domain_id=self.domainD['id'],
parent_id=self.project_inherited['id'])
self.resource_api.create_project(subproject_inherited['id'],
subproject_inherited)
# Create an inherited role assignment
self.assignment_api.create_grant(
role_id=self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_inherited['id'],
inherited_to_projects=True)
# Define expected projects from employee assertion, which contain
# the created subproject
expected_project_ids = [self.project_all['id'],
self.proj_employees['id'],
subproject_inherited['id']]
# Assert expected projects for both available URLs
for url in ('/OS-FEDERATION/projects', '/auth/projects'):
r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION'])
project_ids = [project['id'] for project in r.result['projects']]
self.assertEqual(len(expected_project_ids), len(project_ids))
for expected_project_id in expected_project_ids:
self.assertIn(expected_project_id, project_ids,
'Projects match failed for url %s' % url)
def test_list_domains(self):
urls = ('/OS-FEDERATION/domains', '/auth/domains')
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
# NOTE(henry-nash): domain D does not appear in the expected results
# since it only had inherited roles (which only apply to projects
# within the domain)
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
for url in urls:
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains_ref, domains,
'match failed for url %s' % url)
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/OS-FEDERATION/projects',
token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_authenticate_token(v3_scope_request)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project['id'], project_id)
self._check_scoped_token_attributes(token_resp)
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = self.new_group_ref(
domain_id=self.domainA['id'])
group = self.identity_api.create_group(group)
role = self.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.v3_authenticate_token(scoped_token, expected_status=500)
def test_lists_with_missing_group_in_backend(self):
"""Test a mapping that points to a group that does not exist
For explicit mappings, we expect the group to exist in the backend,
but for lists, specifically blacklists, a missing group is expected
as many groups will be specified by the IdP that are not Keystone
groups.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on group ``EXISTS`` id in it
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = self.new_group_ref(domain_id=domain_id)
group['name'] = 'EXISTS'
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
def test_empty_blacklist_passess_all_values(self):
"""Test a mapping with empty blacklist specified
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``.
In both cases, the mapping engine will not discard any groups that are
associated with apache environment variables.
This test checks scenario where an empty blacklist was specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = self.new_group_ref(domain_id=domain_id)
group_exists['name'] = 'EXISTS'
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = self.new_group_ref(domain_id=domain_id)
group_no_exists['name'] = 'NO_EXISTS'
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"blacklist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_not_adding_blacklist_passess_all_values(self):
"""Test a mapping without blacklist specified.
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``. In both cases all values will
be accepted and passed.
This test checks scenario where an blacklist was not specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = self.new_group_ref(domain_id=domain_id)
group_exists['name'] = 'EXISTS'
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = self.new_group_ref(domain_id=domain_id)
group_no_exists['name'] = 'NO_EXISTS'
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_empty_whitelist_discards_all_values(self):
"""Test that empty whitelist blocks all the values
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks scenario where an empty whitelist was specified.
The expected result is that no groups are matched.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Try issuing unscoped token, expect server to raise
``exception.MissingGroups`` as no groups were matched and ephemeral
user does not have any group assigned.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = self.new_group_ref(domain_id=domain_id)
group['name'] = 'EXISTS'
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"whitelist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
self.assertRaises(exception.MissingGroups,
self._issue_unscoped_token,
assertion='UNMATCHED_GROUP_ASSERTION')
def test_not_setting_whitelist_accepts_all_values(self):
"""Test that not setting whitelist passes
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks a scenario where a ``whitelist`` was not specified.
Expected result is that no groups are ignored.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Issue an unscoped token and make sure ephemeral user is a member of
two groups.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = self.new_group_ref(domain_id=domain_id)
group_exists['name'] = 'EXISTS'
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = self.new_group_ref(domain_id=domain_id)
group_no_exists['name'] = 'NO_EXISTS'
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non default value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def test_v2_auth_with_federation_token_fails(self):
"""Test that using a federation token with v2 auth fails.
If an admin sets up a federated Keystone environment, and a user
incorrectly configures a service (like Nova) to only use v2 auth, the
returned message should be informative.
"""
r = self._issue_unscoped_token()
token_id = r.headers.get('X-Subject-Token')
self.assertRaises(exception.Unauthorized,
self.token_provider_api.validate_v2_token,
token_id=token_id)
def test_unscoped_token_has_user_domain(self):
r = self._issue_unscoped_token()
self._check_domains_are_valid(r.json_body['token'])
def test_scoped_token_has_user_domain(self):
r = self.v3_authenticate_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
self._check_domains_are_valid(r.result['token'])
def test_issue_unscoped_token_for_local_user(self):
r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION')
token_resp = r.json_body['token']
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertEqual(self.user['id'], token_resp['user']['id'])
self.assertEqual(self.user['name'], token_resp['user']['name'])
self.assertEqual(self.domain['id'], token_resp['user']['domain']['id'])
# Make sure the token is not scoped
self.assertNotIn('project', token_resp)
self.assertNotIn('domain', token_resp)
def test_issue_token_for_local_user_user_not_found(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='ANOTHER_LOCAL_USER_ASSERTION')
class FernetFederatedTokenTests(FederationTests, FederatedSetupMixin):
AUTH_METHOD = 'token'
def load_fixtures(self, fixtures):
super(FernetFederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def auth_plugin_config_override(self):
methods = ['saml2', 'token', 'password']
method_classes = dict(
password='keystone.auth.plugins.password.Password',
token='keystone.auth.plugins.token.Token',
saml2='keystone.auth.plugins.saml2.Saml2')
super(FernetFederatedTokenTests,
self).auth_plugin_config_override(methods, **method_classes)
self.config_fixture.config(
group='token',
provider='keystone.token.providers.fernet.Provider')
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
def test_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
self.assertEqual(186, len(resp.headers['X-Subject-Token']))
def test_federated_unscoped_token_with_multiple_groups(self):
assertion = 'ANOTHER_CUSTOMER_ASSERTION'
resp = self._issue_unscoped_token(assertion=assertion)
self.assertEqual(204, len(resp.headers['X-Subject-Token']))
def test_validate_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
unscoped_token = resp.headers.get('X-Subject-Token')
# assert that the token we received is valid
self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token})
def test_fernet_full_workflow(self):
"""Test 'standard' workflow for granting Fernet access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
resp = self._issue_unscoped_token()
unscoped_token = resp.headers.get('X-Subject-Token')
resp = self.get('/OS-FEDERATION/projects',
token=unscoped_token)
projects = resp.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(unscoped_token,
'project', project['id'])
resp = self.v3_authenticate_token(v3_scope_request)
token_resp = resp.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project['id'], project_id)
self._check_scoped_token_attributes(token_resp)
class FederatedTokenTestsMethodToken(FederatedTokenTests):
"""Test federation operation with unified scoping auth method.
Test all the operations with auth method set to ``token`` as a new, unified
way for scoping all the tokens.
"""
AUTH_METHOD = 'token'
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
method_classes = dict(
token='keystone.auth.plugins.token.Token',
saml2='keystone.auth.plugins.saml2.Saml2')
super(FederatedTokenTests,
self).auth_plugin_config_override(methods, **method_classes)
class JsonHomeTests(FederationTests, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/'
'1.0/rel/identity_provider': {
'href-template': '/OS-FEDERATION/identity_providers/{idp_id}',
'href-vars': {
'idp_id': 'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-FEDERATION/1.0/param/idp_id'
},
},
}
def _is_xmlsec1_installed():
p = subprocess.Popen(
['which', 'xmlsec1'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# invert the return code
return not bool(p.wait())
def _load_xml(filename):
with open(os.path.join(XMLDIR, filename), 'r') as xml:
return xml.read()
class SAMLGenerationTests(FederationTests):
SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
'/BETA/protocols/saml2/auth')
ISSUER = 'https://acme.com/FIM/sps/openstack/saml20'
RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST'
SUBJECT = 'test_user'
ROLES = ['admin', 'member']
PROJECT = 'development'
SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2'
ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp'
ASSERTION_VERSION = "2.0"
SERVICE_PROVDIER_ID = 'ACME'
def sp_ref(self):
ref = {
'auth_url': self.SP_AUTH_URL,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': self.RECIPIENT,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def setUp(self):
super(SAMLGenerationTests, self).setUp()
self.signed_assertion = saml2.create_class_from_xml_string(
saml.Assertion, _load_xml('signed_saml2_assertion.xml'))
self.sp = self.sp_ref()
url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID
self.put(url, body={'service_provider': self.sp},
expected_status=201)
def test_samlize_token_values(self):
"""Test the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.ROLES,
self.PROJECT)
assertion = response.assertion
self.assertIsNotNone(assertion)
self.assertIsInstance(assertion, saml.Assertion)
issuer = response.issuer
self.assertEqual(self.RECIPIENT, response.destination)
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion.attribute_statement[0].attribute[0]
self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text)
role_attribute = assertion.attribute_statement[0].attribute[1]
for attribute_value in role_attribute.attribute_value:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion.attribute_statement[0].attribute[2]
self.assertEqual(self.PROJECT,
project_attribute.attribute_value[0].text)
def test_verify_assertion_object(self):
"""Test that the Assertion object is built properly.
The Assertion doesn't need to be signed in this test, so
_sign_assertion method is patched and doesn't alter the assertion.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
side_effect=lambda x: x):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.ROLES,
self.PROJECT)
assertion = response.assertion
self.assertEqual(self.ASSERTION_VERSION, assertion.version)
def test_valid_saml_xml(self):
"""Test the generated SAML object can become valid XML.
Test the generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.ROLES,
self.PROJECT)
saml_str = response.to_string()
response = etree.fromstring(saml_str)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertEqual(self.SUBJECT, user_attribute[0].text)
role_attribute = assertion[4][1]
for attribute_value in role_attribute:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion[4][2]
self.assertEqual(self.PROJECT, project_attribute[0].text)
def test_assertion_using_explicit_namespace_prefixes(self):
def mocked_subprocess_check_output(*popenargs, **kwargs):
# the last option is the assertion file to be signed
filename = popenargs[0][-1]
with open(filename, 'r') as f:
assertion_content = f.read()
# since we are not testing the signature itself, we can return
# the assertion as is without signing it
return assertion_content
with mock.patch('subprocess.check_output',
side_effect=mocked_subprocess_check_output):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.ROLES,
self.PROJECT)
assertion_xml = response.assertion.to_string()
# make sure we have the proper tag and prefix for the assertion
# namespace
self.assertIn('<saml:Assertion', assertion_xml)
self.assertIn('xmlns:saml="' + saml2.NAMESPACE + '"',
assertion_xml)
self.assertIn('xmlns:xmldsig="' + xmldsig.NAMESPACE + '"',
assertion_xml)
def test_saml_signing(self):
"""Test that the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
if not _is_xmlsec1_installed():
self.skip('xmlsec1 is not installed')
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.ROLES,
self.PROJECT)
signature = response.assertion.signature
self.assertIsNotNone(signature)
self.assertIsInstance(signature, xmldsig.Signature)
idp_public_key = sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
cert_text = signature.key_info.x509_data[0].x509_certificate.text
# NOTE(stevemar): Rather than one line of text, the certificate is
# printed with newlines for readability, we remove these so we can
# match it with the key that we used.
cert_text = cert_text.replace(os.linesep, '')
self.assertEqual(idp_public_key, cert_text)
def _create_generate_saml_request(self, token_id, sp_id):
return {
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": token_id
}
},
"scope": {
"service_provider": {
"id": sp_id
}
}
}
}
def _fetch_valid_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
resp = self.v3_authenticate_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def _fetch_domain_scoped_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
user_domain_id=self.domain['id'])
resp = self.v3_authenticate_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def test_not_project_scoped_token(self):
"""Ensure SAML generation fails when passing domain-scoped tokens.
The server should return a 403 Forbidden Action.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_domain_scoped_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=403)
def test_generate_saml_route(self):
"""Test that the SAML generation endpoint produces XML.
The SAML endpoint /v3/auth/OS-FEDERATION/saml2 should take as input,
a scoped token ID, and a Service Provider ID.
The controller should fetch details about the user from the token,
and details about the service provider from its ID.
This should be enough information to invoke the SAML generator and
provide a valid SAML (XML) document back.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.SAML_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=200)
response = etree.fromstring(http_response.result)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
# NOTE(stevemar): We should test this against expected values,
# but the self.xyz attribute names are uuids, and we mock out
# the result. Ideally we should update the mocked result with
# some known data, and create the roles/project/user before
# these tests run.
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
role_attribute = assertion[4][1]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][2]
self.assertIsInstance(project_attribute[0].text, str)
def test_invalid_scope_body(self):
"""Test that missing the scope in request body raises an exception.
Raises exception.SchemaValidationError() - error code 400
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['scope']
self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=400)
def test_invalid_token_body(self):
"""Test that missing the token in request body raises an exception.
Raises exception.SchemaValidationError() - error code 400
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['identity']['token']
self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=400)
def test_sp_not_found(self):
"""Test SAML generation with an invalid service provider ID.
Raises exception.ServiceProviderNotFound() - error code 404
"""
sp_id = uuid.uuid4().hex
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id, sp_id)
self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=404)
def test_sp_disabled(self):
"""Try generating assertion for disabled Service Provider."""
# Disable Service Provider
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SERVICE_PROVDIER_ID, sp_ref)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=403)
def test_token_not_found(self):
"""Test that an invalid token in the request body raises an exception.
Raises exception.TokenNotFound() - error code 404
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=404)
def test_generate_ecp_route(self):
"""Test that the ECP generation endpoint produces XML.
The ECP endpoint /v3/auth/OS-FEDERATION/saml2/ecp should take the same
input as the SAML generation endpoint (scoped token ID + Service
Provider ID).
The controller should return a SAML assertion that is wrapped in a
SOAP envelope.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.ECP_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=200)
env_response = etree.fromstring(http_response.result)
header = env_response[0]
# Verify the relay state starts with 'ss:mem'
prefix = CONF.saml.relay_state_prefix
self.assertThat(header[0].text, matchers.StartsWith(prefix))
# Verify that the content in the body matches the expected assertion
body = env_response[1]
response = body[0]
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
role_attribute = assertion[4][1]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][2]
self.assertIsInstance(project_attribute[0].text, str)
class IdPMetadataGenerationTests(FederationTests):
"""A class for testing Identity Provider Metadata generation."""
METADATA_URL = '/OS-FEDERATION/saml2/metadata'
def setUp(self):
super(IdPMetadataGenerationTests, self).setUp()
self.generator = keystone_idp.MetadataGenerator()
def config_overrides(self):
super(IdPMetadataGenerationTests, self).config_overrides()
self.config_fixture.config(
group='saml',
idp_entity_id=federation_fixtures.IDP_ENTITY_ID,
idp_sso_endpoint=federation_fixtures.IDP_SSO_ENDPOINT,
idp_organization_name=federation_fixtures.IDP_ORGANIZATION_NAME,
idp_organization_display_name=(
federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME),
idp_organization_url=federation_fixtures.IDP_ORGANIZATION_URL,
idp_contact_company=federation_fixtures.IDP_CONTACT_COMPANY,
idp_contact_name=federation_fixtures.IDP_CONTACT_GIVEN_NAME,
idp_contact_surname=federation_fixtures.IDP_CONTACT_SURNAME,
idp_contact_email=federation_fixtures.IDP_CONTACT_EMAIL,
idp_contact_telephone=(
federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER),
idp_contact_type=federation_fixtures.IDP_CONTACT_TYPE)
def test_check_entity_id(self):
metadata = self.generator.generate_metadata()
self.assertEqual(federation_fixtures.IDP_ENTITY_ID, metadata.entity_id)
def test_metadata_validity(self):
"""Call md.EntityDescriptor method that does internal verification."""
self.generator.generate_metadata().verify()
def test_serialize_metadata_object(self):
"""Check whether serialization doesn't raise any exceptions."""
self.generator.generate_metadata().to_string()
# TODO(marek-denis): Check values here
def test_check_idp_sso(self):
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertEqual(federation_fixtures.IDP_SSO_ENDPOINT,
idpsso_descriptor.single_sign_on_service.location)
self.assertIsNotNone(idpsso_descriptor.organization)
organization = idpsso_descriptor.organization
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME,
organization.organization_display_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_NAME,
organization.organization_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_URL,
organization.organization_url.text)
self.assertIsNotNone(idpsso_descriptor.contact_person)
contact_person = idpsso_descriptor.contact_person
self.assertEqual(federation_fixtures.IDP_CONTACT_GIVEN_NAME,
contact_person.given_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_SURNAME,
contact_person.sur_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_EMAIL,
contact_person.email_address.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER,
contact_person.telephone_number.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TYPE,
contact_person.contact_type)
def test_metadata_no_organization(self):
self.config_fixture.config(
group='saml',
idp_organization_display_name=None,
idp_organization_url=None,
idp_organization_name=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNone(idpsso_descriptor.organization)
self.assertIsNotNone(idpsso_descriptor.contact_person)
def test_metadata_no_contact_person(self):
self.config_fixture.config(
group='saml',
idp_contact_name=None,
idp_contact_surname=None,
idp_contact_email=None,
idp_contact_telephone=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNotNone(idpsso_descriptor.organization)
self.assertEqual([], idpsso_descriptor.contact_person)
def test_metadata_invalid_contact_type(self):
self.config_fixture.config(
group='saml',
idp_contact_type="invalid")
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_sso_endpoint(self):
self.config_fixture.config(
group='saml',
idp_sso_endpoint=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_entity_id(self):
self.config_fixture.config(
group='saml',
idp_entity_id=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_get_metadata_with_no_metadata_file_configured(self):
self.get(self.METADATA_URL, expected_status=500)
def test_get_metadata(self):
self.config_fixture.config(
group='saml', idp_metadata_path=XMLDIR + '/idp_saml2_metadata.xml')
r = self.get(self.METADATA_URL, response_content_type='text/xml',
expected_status=200)
self.assertEqual('text/xml', r.headers.get('Content-Type'))
reference_file = _load_xml('idp_saml2_metadata.xml')
self.assertEqual(reference_file, r.result)
class ServiceProviderTests(FederationTests):
"""A test class for Service Providers."""
MEMBER_NAME = 'service_provider'
COLLECTION_NAME = 'service_providers'
SERVICE_PROVIDER_ID = 'ACME'
SP_KEYS = ['auth_url', 'id', 'enabled', 'description',
'relay_state_prefix', 'sp_url']
def setUp(self):
super(FederationTests, self).setUp()
# Add a Service Provider
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.SP_REF = self.sp_ref()
self.SERVICE_PROVIDER = self.put(
url, body={'service_provider': self.SP_REF},
expected_status=201).result
def sp_ref(self):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
return ref
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/service_providers/' + str(suffix)
return '/OS-FEDERATION/service_providers'
def test_get_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.get(url, expected_status=200)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_get_service_provider_fail(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.get(url, expected_status=404)
def test_create_service_provider(self):
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
resp = self.put(url, body={'service_provider': sp},
expected_status=201)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_create_sp_relay_state_default(self):
"""Create an SP without relay state, should default to `ss:mem`."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
del sp['relay_state_prefix']
resp = self.put(url, body={'service_provider': sp},
expected_status=201)
sp_result = resp.result['service_provider']
self.assertEqual(CONF.saml.relay_state_prefix,
sp_result['relay_state_prefix'])
def test_create_sp_relay_state_non_default(self):
"""Create an SP with custom relay state."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
sp['relay_state_prefix'] = non_default_prefix
resp = self.put(url, body={'service_provider': sp},
expected_status=201)
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_create_service_provider_fail(self):
"""Try adding SP object with unallowed attribute."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
sp[uuid.uuid4().hex] = uuid.uuid4().hex
self.put(url, body={'service_provider': sp},
expected_status=400)
def test_list_service_providers(self):
"""Test listing of service provider objects.
Add two new service providers. List all available service providers.
Expect to get list of three service providers (one created by setUp())
Test if attributes match.
"""
ref_service_providers = {
uuid.uuid4().hex: self.sp_ref(),
uuid.uuid4().hex: self.sp_ref(),
}
for id, sp in ref_service_providers.items():
url = self.base_url(suffix=id)
self.put(url, body={'service_provider': sp}, expected_status=201)
# Insert ids into service provider object, we will compare it with
# responses from server and those include 'id' attribute.
ref_service_providers[self.SERVICE_PROVIDER_ID] = self.SP_REF
for id, sp in ref_service_providers.items():
sp['id'] = id
url = self.base_url()
resp = self.get(url)
service_providers = resp.result
for service_provider in service_providers['service_providers']:
id = service_provider['id']
self.assertValidEntity(
service_provider, ref=ref_service_providers[id],
keys_to_check=self.SP_KEYS)
def test_update_service_provider(self):
"""Update existing service provider.
Update default existing service provider and make sure it has been
properly changed.
"""
new_sp_ref = self.sp_ref()
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref},
expected_status=200)
patch_result = resp.result
new_sp_ref['id'] = self.SERVICE_PROVIDER_ID
self.assertValidEntity(patch_result['service_provider'],
ref=new_sp_ref,
keys_to_check=self.SP_KEYS)
resp = self.get(url, expected_status=200)
get_result = resp.result
self.assertDictEqual(patch_result['service_provider'],
get_result['service_provider'])
def test_update_service_provider_immutable_parameters(self):
"""Update immutable attributes in service provider.
In this particular case the test will try to change ``id`` attribute.
The server should return an HTTP 403 error code.
"""
new_sp_ref = {'id': uuid.uuid4().hex}
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=400)
def test_update_service_provider_unknown_parameter(self):
new_sp_ref = self.sp_ref()
new_sp_ref[uuid.uuid4().hex] = uuid.uuid4().hex
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=400)
def test_update_service_provider_404(self):
new_sp_ref = self.sp_ref()
new_sp_ref['description'] = uuid.uuid4().hex
url = self.base_url(suffix=uuid.uuid4().hex)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=404)
def test_update_sp_relay_state(self):
"""Update an SP with custome relay state."""
new_sp_ref = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
new_sp_ref['relay_state_prefix'] = non_default_prefix
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref},
expected_status=200)
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_delete_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.delete(url, expected_status=204)
def test_delete_service_provider_404(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.delete(url, expected_status=404)
class WebSSOTests(FederatedTokenTests):
"""A class for testing Web SSO."""
SSO_URL = '/auth/OS-FEDERATION/websso/'
SSO_TEMPLATE_NAME = 'sso_callback_template.html'
SSO_TEMPLATE_PATH = os.path.join(core.dirs.etc(), SSO_TEMPLATE_NAME)
TRUSTED_DASHBOARD = 'http://horizon.com'
ORIGIN = urllib.parse.quote_plus(TRUSTED_DASHBOARD)
def setUp(self):
super(WebSSOTests, self).setUp()
self.api = federation_controllers.Auth()
def config_overrides(self):
super(WebSSOTests, self).config_overrides()
self.config_fixture.config(
group='federation',
trusted_dashboard=[self.TRUSTED_DASHBOARD],
sso_callback_template=self.SSO_TEMPLATE_PATH,
remote_id_attribute=self.REMOTE_ID_ATTR)
def test_render_callback_template(self):
token_id = uuid.uuid4().hex
resp = self.api.render_html_response(self.TRUSTED_DASHBOARD, token_id)
self.assertIn(token_id, resp.body)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_sso_auth(context, self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.IdentityProviderNotFound,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_query(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_query_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_remote_id(self):
context = {'environment': {}}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
class K2KServiceCatalogTests(FederationTests):
SP1 = 'SP1'
SP2 = 'SP2'
SP3 = 'SP3'
def setUp(self):
super(K2KServiceCatalogTests, self).setUp()
sp = self.sp_ref()
self.federation_api.create_sp(self.SP1, sp)
self.sp_alpha = {self.SP1: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP2, sp)
self.sp_beta = {self.SP2: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP3, sp)
self.sp_gamma = {self.SP3: sp}
self.token_v3_helper = token_common.V3TokenDataHelper()
def sp_response(self, id, ref):
ref.pop('enabled')
ref.pop('description')
ref.pop('relay_state_prefix')
ref['id'] = id
return ref
def sp_ref(self):
ref = {
'auth_url': uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': uuid.uuid4().hex,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def _validate_service_providers(self, token, ref):
token_data = token['token']
self.assertIn('service_providers', token_data)
self.assertIsNotNone(token_data['service_providers'])
service_providers = token_data.get('service_providers')
self.assertEqual(len(ref), len(service_providers))
for entity in service_providers:
id = entity.get('id')
ref_entity = self.sp_response(id, ref.get(id))
self.assertDictEqual(ref_entity, entity)
def test_service_providers_in_token(self):
"""Check if service providers are listed in service catalog."""
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_alpha, self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_service_provides_in_token_disabled_sp(self):
"""Test behaviour with disabled service providers.
Disabled service providers should not be listed in the service
catalog.
"""
# disable service provider ALPHA
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SP1, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_no_service_providers_in_token(self):
"""Test service catalog with disabled service providers.
There should be no entry ``service_providers`` in the catalog.
Test passes providing no attribute was raised.
"""
sp_ref = {'enabled': False}
for sp in (self.SP1, self.SP2, self.SP3):
self.federation_api.update_sp(sp, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
self.assertNotIn('service_providers', token['token'],
message=('Expected Service Catalog not to have '
'service_providers'))
| apache-2.0 | -7,007,309,145,518,074,000 | 39.075824 | 79 | 0.5457 | false |
wangjeaf/CSSCheckStyle | tests/unit/entity/RuleSet.py | 1 | 1922 | from helper import *
def doTest():
_ruleSet()
def _ruleSet():
ruleSet = RuleSet(' .test ', ' width:100px;height:100px; ', '/* aa */ ', None)
ok(not ruleSet.extra, 'ruleset is not extra')
equal(ruleSet.selector, '.test', 'selector is ok')
equal(ruleSet.roughSelector, ' .test ', 'roughSelector is ok')
equal(ruleSet.roughValue, ' width:100px;height:100px; ', 'roughValue is ok')
equal(ruleSet.roughComment, '/* aa */ ', 'rough comment is ok')
equal(ruleSet.values, 'width:100px;height:100px;', 'values is ok')
ok(ruleSet.singleLineFlag, 'it is single line')
ok(ruleSet.getSingleLineFlag(), 'it is single line')
equal(ruleSet.getStyleSheet(), None, 'no stylesheet')
equal(len(ruleSet.getRules()), 0, 'no rules')
equal(ruleSet.indexOf('width'), -1, 'no _width')
equal(ruleSet.existNames('width'), False, 'no width again')
equal(ruleSet.existNames(' _width '), False, 'no rough _width')
equal(ruleSet.getRuleByName('width'), None, 'can not find width')
equal(ruleSet.getRuleByRoughName(' _width '), None, 'can not find _width')
ruleSet.addRuleByStr(' .aaa', ' _width ', ' 100px; ')
equal(len(ruleSet.getRules()), 1, 'one rule')
equal(ruleSet.indexOf('_width'), 0, 'found width')
equal(ruleSet.existNames('width'), True, 'found width again')
equal(ruleSet.existRoughNames(' _width '), True, 'found rough width')
equal(ruleSet.getRuleByName('width').value, '100px', 'find width')
equal(ruleSet.getRuleByRoughName(' _width ').value, '100px', 'find width by rough name')
equal(ruleSet.getRuleByStrippedName('_width').value, '100px', 'find width by stripped name')
ruleSet.addRuleByStr(' .aaa', 'height', '100px; ')
equal(len(ruleSet.getRules()), 2, 'two rules')
equal(ruleSet.getRules()[0].name, 'width', 'width is first')
equal(ruleSet.getRules()[1].name, 'height', 'height is second')
| bsd-3-clause | 5,608,172,689,793,437,000 | 49.578947 | 96 | 0.652966 | false |
szendrei/django-unleashed | codesnippets/snippets/models.py | 1 | 1482 | from django.db import models
from pygments.lexers import get_all_lexers
from django.core.urlresolvers import reverse
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
class Snippet(models.Model):
create_date = models.DateTimeField('created date', auto_now_add=True)
title = models.CharField(max_length=63, blank=True, default='')
code = models.TextField()
language = models.CharField(choices=LANGUAGE_CHOICES, default='python',
max_length=100)
author = models.CharField(max_length=32, blank=True, default='Anonymus')
slug = models.CharField(max_length=63, unique=True, blank=True, default='')
class Meta:
ordering = ['-create_date']
def __str__(self):
if len(self.title):
return self.title
else:
return "{} code created at {}".format(self.language,
self.create_date)
def get_absolute_url(self):
return reverse('snippets_snippet_detail',kwargs={'slug':self.slug})
def get_category_url(self):
return reverse('snippets_snippet_category_list',
kwargs={'language':self.language})
def get_update_url(self):
return reverse('snippets_snippet_update',kwargs={'slug':self.slug})
def get_delete_url(self):
return reverse('snippets_snippet_delete',kwargs={'slug':self.slug})
| gpl-3.0 | -2,873,390,856,656,632,000 | 37 | 79 | 0.62888 | false |
enen92/script.sportscenter | resources/lib/calendar.py | 1 | 14046 | import xbmc,xbmcgui,xbmcaddon,xbmcplugin
import urllib
import thesportsdb
import datetime
import os
import re
import threading
from random import randint
from centerutils.common_variables import *
from centerutils.datemanipulation import *
import competlist as competlist
import teamview as teamview
import contextmenubuilder
import tweetbuild
def start(data_list):
window = dialog_calendar('DialogCalendar.xml',addonpath,'Default',str(data_list))
window.doModal()
class dialog_calendar(xbmcgui.WindowXML):
def __init__( self, *args, **kwargs ):
xbmcgui.WindowXML.__init__(self)
self.date_string = eval(args[3])
def onInit(self):
self.getControl(911).setImage(addon_fanart)
self.getControl(333).setLabel('Calendar View')
self.ignored_leagues = os.listdir(ignoredleaguesfolder)
self.rmleaguescalendar = os.listdir(ignoreleaguecalendar)
#Change background if custom is defined
if settings.getSetting('calendar-background-type') == '1' and settings.getSetting('calendar-background-custom') != '':
self.getControl(912).setImage(settings.getSetting('calendar-background-custom'))
#Populate week days
menu = []
#grab datetime now and transform into a timezone object based on user timezone
date_now = datetime.datetime.now()
date_now_mytz = pytz.timezone(str(pytz.timezone(str(my_location)))).localize(date_now)
#convert datetime timezone object to the timezone of the database
date_now_tsdb = date_now_mytz.astimezone(my_location)
menu.append(('Today, %s' % (date_now_mytz.day),'%s-%s-%s' % (str(date_now_tsdb.year),str(date_now_tsdb.month),str(date_now_tsdb.day))))
for i in range(7):
date_now_mytz += datetime.timedelta(days=1)
date_now_tsdb += datetime.timedelta(days=1)
if i == 0: day_string ='%s, %s' % ('Tomorrow',date_now_mytz.day)
else:
day_string = '%s, %s' % (get_weekday(date_now_mytz.weekday()),date_now_mytz.day)
date_string = '%s-%s-%s' % (str(date_now_tsdb.year),str(date_now_tsdb.month),str(date_now_tsdb.day))
menu.append((day_string,date_string))
self.getControl(983).reset()
for data_string,date in menu:
menu_entry = xbmcgui.ListItem(data_string)
menu_entry.setProperty('menu_entry', data_string)
menu_entry.setProperty('entry_date', date)
self.getControl(983).addItem(menu_entry)
#use this to direct navigation to a given date! -TODO
threading.Thread(name='watcher', target=self.watcher).start()
if not self.date_string:
self.setFocusId(983)
self.getControl(983).selectItem(0)
self.date_string = menu[0][1]
self.fill_calendar(self.date_string)
def fill_calendar(self,datestring):
self.getControl(93).setVisible(False)
items_to_add = []
self.getControl(94).setPercent(0)
self.getControl(92).setImage(os.path.join(addonpath,art,'busy.png'))
xbmc.executebuiltin("SetProperty(loading,1,home)")
self.getControl(987).reset()
#next matches stuff
event_next_list = thesportsdb.Schedules(tsdbkey).eventsday(datestring,None,None)["events"]
j = 0
if event_next_list:
total_events = len(event_next_list)
for event in event_next_list:
event_sport = thesportsdb.Events().get_sport(event)
event_id = thesportsdb.Events().get_eventid(event)
#check if event belongs to blocked sport strSport
if event_sport == 'Soccer' and settings.getSetting('enable-football') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Basketball' and settings.getSetting('enable-basketball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Ice Hockey' and settings.getSetting('enable-icehockey') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Baseball' and settings.getSetting('enable-baseball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Motorsport' and settings.getSetting('enable-motorsport') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Rugby' and settings.getSetting('enable-rugby') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Golf' and settings.getSetting('enable-golf') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'American Football' and settings.getSetting('enable-amfootball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
else:
#get league id and check if the league is not ignored
league_id = thesportsdb.Events().get_leagueid(event)
if ((league_id + '.txt') in self.ignored_leagues and settings.getSetting('calendar-disabledleagues') == 'true') or ((league_id + '.txt') in self.rmleaguescalendar): pass
else:
event_fullname = thesportsdb.Events().get_eventtitle(event)
event_race = thesportsdb.Events().get_racelocation(event)
event_league = thesportsdb.Events().get_league(event)
event_sport = thesportsdb.Events().get_sport(event)
if event_sport == 'Soccer': sport_logo = os.path.join(addonpath,art,'loadingsports','soccer.png')
elif event_sport == 'Basketball': sport_logo = os.path.join(addonpath,art,'loadingsports','basketball.png')
elif event_sport == 'Ice Hockey': sport_logo = os.path.join(addonpath,art,'loadingsports','ice%20hockey.png')
elif event_sport == 'Baseball': sport_logo = os.path.join(addonpath,art,'loadingsports','baseball.png')
elif event_sport == 'Motorsport': sport_logo = os.path.join(addonpath,art,'loadingsports','motorsport.png')
elif event_sport == 'Rugby': sport_logo = os.path.join(addonpath,art,'loadingsports','rugby.png')
elif event_sport == 'Golf': sport_logo = os.path.join(addonpath,art,'loadingsports','golf.png')
elif event_sport == 'American Football': sport_logo = os.path.join(addonpath,art,'loadingsports','american%20football.png')
fmt = "%y-%m-%d"
fmt_time = "%H:%M"
event_datetime = thesportsdb.Events().get_datetime_object(event)
if event_datetime:
#datetime object conversion goes here
db_time = pytz.timezone(str(pytz.timezone(tsdbtimezone))).localize(event_datetime)
event_datetime=db_time.astimezone(my_location)
event_strtime = thesportsdb.Events().get_time(event)
if event_strtime and event_strtime != 'null' and event_strtime != 'None':
event_time = event_datetime.strftime(fmt_time)
if len(str(event_datetime.minute)) == 1: event_minute = str(event_datetime.minute) + '0'
else: event_minute = str(event_datetime.minute)
event_order = int(str(event_datetime.hour) + str(event_minute))
else:
event_time = 'N/A'
event_order = 30000
else:
event_time = 'N/A'
event_order = 30000
if event_race:
home_team_logo = os.path.join(addonpath,art,'raceflag.png')
event_name = thesportsdb.Events().get_eventtitle(event)
event_round = ''
else:
home_team_id = thesportsdb.Events().get_hometeamid(event)
home_team_dict = thesportsdb.Lookups(tsdbkey).lookupteam(home_team_id)["teams"][0]
if settings.getSetting('team-naming')=='0': home_team_name = thesportsdb.Teams().get_name(home_team_dict)
else: team_name = home_team_name = thesportsdb.Teams().get_alternativefirst(home_team_dict)
home_team_logo = thesportsdb.Teams().get_badge(home_team_dict)
stadium_fanart = thesportsdb.Teams().get_stadium_thumb(home_team_dict)
away_team_id = thesportsdb.Events().get_awayteamid(event)
away_team_dict = thesportsdb.Lookups(tsdbkey).lookupteam(away_team_id)["teams"][0]
if settings.getSetting('team-naming')=='0': away_team_name = thesportsdb.Teams().get_name(away_team_dict)
else: away_team_name = thesportsdb.Teams().get_alternativefirst(away_team_dict)
away_team_logo = thesportsdb.Teams().get_badge(away_team_dict)
event_round = thesportsdb.Events().get_round(event)
if event_round and event_round != '0':
round_label = ' - Round ' + str(event_round)
event_league = event_league + round_label
game = xbmcgui.ListItem(event_fullname)
game.setProperty('HomeTeamLogo',home_team_logo)
game.setProperty('league',event_league)
game.setProperty('sport_logo',sport_logo)
game.setProperty('sport',event_sport)
game.setProperty('event_time',event_time)
game.setProperty('event_order',str(event_order))
game.setProperty('event_id',event_id)
if not event_race:
if ' ' in home_team_name:
if len(home_team_name) > 12: game.setProperty('HomeTeamLong',home_team_name)
else: game.setProperty('HomeTeamShort',home_team_name)
else: game.setProperty('HomeTeamShort',home_team_name)
game.setProperty('AwayTeamLogo',away_team_logo)
if ' ' in away_team_name:
if len(away_team_name) > 12: game.setProperty('AwayTeamLong',away_team_name)
else: game.setProperty('AwayTeamShort',away_team_name)
else: game.setProperty('AwayTeamShort',away_team_name)
game.setProperty('StadiumThumb',stadium_fanart)
game.setProperty('vs','VS')
try: game.setProperty('date',event_datetime.strftime(fmt))
except: pass
if event_race:
game.setProperty('EventName',event_name)
try:
date_now_mytz = pytz.timezone(str(pytz.timezone(str(my_location)))).localize(datetime.datetime.now())
if event_datetime > date_now_mytz:
hour_diff = (event_datetime-date_now_mytz).seconds/3600
else: hour_diff = ((date_now_mytz-event_datetime).seconds/3600)*(-1)
if settings.getsetting('calendar-disabledpassed') == 'true' and hour_diff > int(settings.getSetting('calendar-disabledpassed-delay')): pass
else: items_to_add.append(game)
except:items_to_add.append(game)
#try to set progress bar here
#for the events presented
j+=1
self.getControl(94).setPercent(int(float(j)/total_events*100))
#for the events not presented
j+=1
self.getControl(94).setPercent(int(float(j)/total_events*100))
#order the items here by start time
time_array = []
items_to_add_processed = []
for item in items_to_add:
time_array.append(int(item.getProperty('event_order')))
for timestmp in sorted(time_array):
for item in items_to_add:
itemorder = int(item.getProperty('event_order'))
if itemorder == timestmp:
items_to_add_processed.append(item)
items_to_add.remove(item)
if items_to_add_processed: self.getControl(987).addItems(items_to_add_processed)
else:
self.getControl(93).setVisible(True)
self.getControl(93).setLabel('No events available!')
else:
self.getControl(93).setVisible(True)
self.getControl(93).setLabel('No events available!')
xbmc.executebuiltin("ClearProperty(loading,Home)")
xbmc.executebuiltin("ClearProperty(lastmatchview,Home)")
xbmc.executebuiltin("ClearProperty(plotview,Home)")
xbmc.executebuiltin("ClearProperty(bannerview,Home)")
xbmc.executebuiltin("ClearProperty(nextview,Home)")
xbmc.executebuiltin("ClearProperty(videosview,Home)")
xbmc.executebuiltin("ClearProperty(jerseyview,Home)")
xbmc.executebuiltin("ClearProperty(badgeview,Home)")
xbmc.executebuiltin("ClearProperty(newsview,Home)")
xbmc.executebuiltin("SetProperty(nextmatchview,1,home)")
settings.setSetting("view_type_league",'nextmatchview')
self.getControl(2).setLabel("League: NextMatchView")
def watcher(self,):
while not xbmc.abortRequested:
rmleaguescalendar = os.listdir(ignoreleaguecalendar)
if self.rmleaguescalendar != rmleaguescalendar:
self.rmleaguescalendar = rmleaguescalendar
self.fill_calendar(self.date_string)
xbmc.sleep(200)
def onAction(self,action):
if action.getId() == 92 or action.getId() == 10:
self.close()
elif action.getId() == 117: #contextmenu
if xbmc.getCondVisibility("Control.HasFocus(987)"): container = 987
self.specific_id = self.getControl(container).getSelectedItem().getProperty('event_id')
contextmenubuilder.start(['calendaritem',self.specific_id])
def onClick(self,controlId):
if controlId == 983:
listControl = self.getControl(controlId)
selected_date=listControl.getSelectedItem().getProperty('entry_date')
self.date_string = selected_date
self.fill_calendar(selected_date)
elif controlId == 980 or controlId == 984 or controlId == 985 or controlId == 981:
self.team = self.getControl(controlId).getSelectedItem().getProperty('team_id')
teamview.start([self.team,self.sport,'','plotview'])
elif controlId == 2:
active_view_type = self.getControl(controlId).getLabel()
if active_view_type == "League: PlotView":
self.setvideosview()
elif active_view_type == "League: VideosView":
self.setbannerview()
elif active_view_type == "League: BannerView":
self.setbadgeview()
elif active_view_type == "League: BadgeView":
self.setjerseyview()
elif active_view_type == "League: JerseyView":
self.setnewsview()
elif active_view_type == "League: NewsView":
self.setnextmatchview()
elif active_view_type == "League: NextMatchView":
self.setlastmatchview()
elif active_view_type == "League: LastMatchView":
self.setplotview()
elif controlId == 989:
youtube_id = self.getControl(989).getSelectedItem().getProperty('video_id')
xbmc.executebuiltin('PlayMedia(plugin://plugin.video.youtube/play/?video_id='+youtube_id+')')
elif controlId == 986:
news_content = self.getControl(986).getSelectedItem().getProperty('content')
news_title = self.getControl(986).getSelectedItem().getProperty('title')
news_image = self.getControl(986).getSelectedItem().getProperty('news_img')
self.getControl(939).setImage(news_image)
self.getControl(937).setText(news_content)
self.getControl(938).setLabel(news_title)
| gpl-2.0 | -670,272,421,011,220,700 | 45.664452 | 174 | 0.698206 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.