repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rdmorganiser/rdmo | rdmo/questions/admin.py | 1 | 3411 | from django import forms
from django.contrib import admin
from django.db import models
from rdmo.core.utils import get_language_fields
from .models import Catalog, Question, QuestionSet, Section
from .validators import (CatalogLockedValidator, CatalogUniqueURIValidator,
QuestionLockedValidator, QuestionSetLockedValidator,
QuestionSetUniqueURIValidator,
QuestionUniqueURIValidator, SectionLockedValidator,
SectionUniqueURIValidator)
class CatalogAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = Catalog
fields = '__all__'
def clean(self):
CatalogUniqueURIValidator(self.instance)(self.cleaned_data)
CatalogLockedValidator(self.instance)(self.cleaned_data)
class SectionAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = Section
fields = '__all__'
def clean(self):
SectionUniqueURIValidator(self.instance)(self.cleaned_data)
SectionLockedValidator(self.instance)(self.cleaned_data)
class QuestionSetAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = QuestionSet
fields = '__all__'
def clean(self):
QuestionSetUniqueURIValidator(self.instance)(self.cleaned_data)
QuestionSetLockedValidator(self.instance)(self.cleaned_data)
class QuestionAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = Question
fields = '__all__'
def clean(self):
QuestionUniqueURIValidator(self.instance)(self.cleaned_data)
QuestionLockedValidator(self.instance)(self.cleaned_data)
class CatalogAdmin(admin.ModelAdmin):
form = CatalogAdminForm
search_fields = ['uri'] + get_language_fields('title')
list_display = ('uri', 'title', 'projects_count', 'available')
readonly_fields = ('uri', )
list_filter = ('available', )
def get_queryset(self, request):
return super().get_queryset(request) \
.annotate(projects_count=models.Count('projects'))
def projects_count(self, obj):
return obj.projects_count
class SectionAdmin(admin.ModelAdmin):
form = SectionAdminForm
search_fields = ['uri'] + get_language_fields('title')
list_display = ('uri', 'title')
readonly_fields = ('uri', 'path')
list_filter = ('catalog', )
class QuestionSetAdmin(admin.ModelAdmin):
form = QuestionSetAdminForm
search_fields = ['uri'] + get_language_fields('title') + get_language_fields('help')
list_display = ('uri', 'attribute', 'is_collection')
readonly_fields = ('uri', 'path')
list_filter = ('section__catalog', 'section', 'is_collection')
class QuestionItemAdmin(admin.ModelAdmin):
form = QuestionAdminForm
search_fields = ['uri'] + get_language_fields('help') + get_language_fields('text')
list_display = ('uri', 'attribute', 'text', 'is_collection')
readonly_fields = ('uri', 'path')
list_filter = ('questionset__section__catalog', 'questionset__section', 'is_collection', 'widget_type', 'value_type')
admin.site.register(Catalog, CatalogAdmin)
admin.site.register(Section, SectionAdmin)
admin.site.register(QuestionSet, QuestionSetAdmin)
admin.site.register(Question, QuestionItemAdmin)
| apache-2.0 | -897,750,510,695,204,200 | 30.293578 | 121 | 0.6781 | false |
Neetuj/softlayer-python | SoftLayer/CLI/block/snapshot_list.py | 1 | 1781 | """List block storage snapshots."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
COLUMNS = [
column_helper.Column(
'id',
('snapshots', 'id',),
mask='snapshots.id'),
column_helper.Column('name', ('snapshots', 'notes',),
mask='snapshots.notes'),
column_helper.Column('created',
('snapshots', 'snapshotCreationTimestamp',),
mask='snapshots.snapshotCreationTimestamp'),
column_helper.Column('size_bytes', ('snapshots', 'snapshotSizeBytes',),
mask='snapshots.snapshotSizeBytes'),
]
DEFAULT_COLUMNS = [
'id',
'name',
'created',
'size_bytes'
]
@click.command()
@click.argument('volume_id')
@click.option('--sortby', help='Column to sort by',
default='created')
@click.option('--columns',
callback=column_helper.get_formatter(COLUMNS),
help='Columns to display. Options: {0}'.format(
', '.join(column.name for column in COLUMNS)),
default=','.join(DEFAULT_COLUMNS))
@environment.pass_env
def cli(env, sortby, columns, volume_id):
"""List block storage snapshots."""
block_manager = SoftLayer.BlockStorageManager(env.client)
snapshots = block_manager.get_block_volume_snapshot_list(
volume_id=volume_id,
mask=columns.mask(),
)
table = formatting.Table(columns.columns)
table.sortby = sortby
for snapshot in snapshots:
table.add_row([value or formatting.blank()
for value in columns.row(snapshot)])
env.fout(table)
| mit | 8,083,339,656,087,134,000 | 29.706897 | 75 | 0.613139 | false |
AndriesSHP/Gellish | CommunicatorSource/GellishDict.py | 1 | 5979 | class GellishDict(dict):
''' A dictionary for names in a context that refer to the denoted concepts.
The roles of the names are indicated by alias relation_type_UIDs, such as for <is a code for>:
key = name_in_context(tuple) = (languageUID, communityUID, name).
value = value_triple = (UID, naming_relation_type_UID, description)
'''
def __init__(self, name):
self.name = name
def add_name_in_context(self, name_in_context, value_triple):
if name_in_context not in self:
#self.key = name_in_context(tuple) = (lanuageUID, communityUID, name).
#self.value = value_triple = (UID, naming_relation_type_UID, description)
self[name_in_context] = value_triple
print('add: ',name_in_context, self[name_in_context])
else:
value_triple2 = self.find_anything(name_in_context)
print('Error: Name in context: %s, %s is already known by uid (%s)' % (name_in_context, value_triple, value_triple2))
def find_anything(self, q_name_in_context):
if q_name_in_context in self:
print('Found: ', q_name_in_context, self[q_name_in_context])
return(self[q_name_in_context])
else:
print('Not found: ',q_name_in_context)
return(None)
def filter_on_key(self, q_string, string_commonality):
"""Search for q-string in the third part of the key of the dictionary,
where key = term_in_context = (language_uid, community_uid, name).
Returns a list of items (key, value_triple) that contain q_string as the third part of the key.
Example item: term_in_context, value_triple = {(910036, 193259, "anything"),(730000, 5117, 'descr'))
"""
# a list of tuples of [(key0, val0]), (key1, val1), ...]
items = self.items()
result_list = []
# create a filter function that returns true if
# 0) q_string is equal to the third position of the first(key) field of an item:
# case sensitive identical
# 1) q_string is in that field:
# case sensitive partially identical
# 2) q_string is in that field and starts with that string
# case sensitive front end identical
# 3), 4), 5) idem, but case insensitive
string_commonalities = ['csi', 'cspi', 'csfi', 'cii', 'cipi', 'cifi']
if string_commonality == string_commonalities[0]:
filt = lambda item: q_string == item[0][2]
elif string_commonality == string_commonalities[1]:
filt = lambda item: q_string in item[0][2]
elif string_commonality == string_commonalities[2]:
filt = lambda item: item[0][2].startswith(q_string)
elif string_commonality == string_commonalities[3]:
filt = lambda item: q_string.lower() == item[0][2].lower()
elif string_commonality == string_commonalities[4]:
filt = lambda item: q_string.lower() in item[0][2].lower()
elif string_commonality == string_commonalities[5]:
filt = lambda item: item[0][2].lower().startswith(q_string.lower())
else:
print('Error: string commonality %s unknown' % (string_commonality))
filt = ''
# use the filter to create a *list* of items that match the filter
result_list = filter(filt, items)
# convert the list to a Gellish dictionary
#result = GellishDict(result_list)
# and return the resulting list of filtered items
return(result_list)
class Preferences(dict):
'''A dictionary for preferences and defaults for the owner of the table of preferences'''
def __init__(self, dict_name):
self.name = dict_name
#----------------------------------------------------------------------------
if __name__ == "__main__":
d = GellishDict('Dictionary')
d[1, 4, "anything"] = (730000, 5117, 'what can be thought of')
d[1, 4, "THING"] = (2,1, 'thing')
d[1, 5, "pump"] = (4,1, 'that is intended to ...')
d[2, 5, "pomp"] = (4,1, 'die bedoeld is om ...')
d[3, 5, "Pumpe"] = (4,2, 'der is geeignet zu ...')
d[1, 5, "Pump"] = (4,1, 'synonym of pump')
print('Dictionary-0: ',d)
n = (2, 5, "iets")
v = (730000, 5117, 'waar aan gedacht kan worden.')
d.add_name_in_context(n,v)
print('Dictionary-1: ',d)
n2 = (2, 5, "iets")
v2 = (1, 1, 'verkeerde UID')
d.add_name_in_context(n2,v2)
print('Dictionary-2: ',d)
# print all items that have "pump" as the third field in the key:
candidates = d.filter_on_key("pump",'csi')
for candidate in candidates:
print ("case sensitive identical (pump): ",candidate)
# print all items that contain "Pu" at the front end of the third field of the key:
candidates = d.filter_on_key("Pu",'csfi')
for candidate in candidates:
print ("case sensitive front end identical (Pu): ",candidate)
# print all items that contain "ump" as a string somewhere in the third field of the key:
candidates = d.filter_on_key("ump",'cspi')
for candidate in candidates:
print ("case sensitive partially identical (ump): ",candidate)
# print all items that have "pump" as the third field in the key:
candidates = d.filter_on_key("pump",'cii')
for candidate in candidates:
print ("case insensitive identical (pump): ",candidate)
# print all items that contain "pu" at the front end of the third field of the key:
candidates = d.filter_on_key("pu",'cifi')
for candidate in candidates:
print ("case insensitive front end identical (pu): ",candidate)
# print all items that contain "i" as a string somewhere in the third field of the key:
candidates = d.filter_on_key("i",'cipi')
for candidate in candidates:
print ("case insensitive partially identical (i): ",candidate)
| gpl-3.0 | 2,889,238,609,246,712,000 | 46.07874 | 129 | 0.597592 | false |
jslootbeek/roundware-server | roundware/urls.py | 1 | 1451 | # Roundware Server is released under the GNU Affero General Public License v3.
# See COPYRIGHT.txt, AUTHORS.txt, and LICENSE.txt in the project root directory.
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, url
# Loading static files for debug mode
from django.conf.urls.static import static
from django.conf.urls import include
from django.contrib import admin
from adminplus.sites import AdminSitePlus
from roundware.rw import urls as rw_urls
admin.site = AdminSitePlus()
admin.sites.site = admin.site
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^tools/asset-map$', 'rw.views.asset_map'),
url(r'^tools/listen-map$', 'rw.views.listen_map'),
url(r'^dashboard/$', 'rw.views.chart_views'),
# V1 DRF API
url(r'^api/1/', include('roundware.api1.urls')),
# V2 RESTful DRF API
url(r'^api/2/', include('roundware.api2.urls')),
# Use Django Admin login as overall login
url(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'admin/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^rw/', include(rw_urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'Roundware Administration'
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| agpl-3.0 | 6,197,231,107,093,475,000 | 28.02 | 80 | 0.694693 | false |
ingenieroariel/geonode | geonode/base/models.py | 1 | 49348 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import datetime
import math
import os
import re
import logging
import traceback
import uuid
import urllib
import urllib2
import cookielib
from geonode.decorators import on_ogc_backend
from pyproj import transform, Proj
from urlparse import urljoin, urlsplit
from django.db import models
from django.core import serializers
from django.db.models import Q, signals
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django.conf import settings
from django.contrib.staticfiles.templatetags import staticfiles
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.files.storage import default_storage as storage
from django.core.files.base import ContentFile
from django.contrib.gis.geos import GEOSGeometry
from mptt.models import MPTTModel, TreeForeignKey
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
from agon_ratings.models import OverallRating
from geonode import geoserver
from geonode.base.enumerations import ALL_LANGUAGES, \
HIERARCHY_LEVELS, UPDATE_FREQUENCIES, \
DEFAULT_SUPPLEMENTAL_INFORMATION, LINK_TYPES
from geonode.utils import bbox_to_wkt
from geonode.utils import forward_mercator
from geonode.security.models import PermissionLevelMixin
from taggit.managers import TaggableManager, _TaggableManager
from taggit.models import TagBase, ItemBase
from treebeard.mp_tree import MP_Node
from geonode.people.enumerations import ROLE_VALUES
from oauthlib.common import generate_token
from oauth2_provider.models import AccessToken, get_application_model
logger = logging.getLogger(__name__)
class ContactRole(models.Model):
"""
ContactRole is an intermediate model to bind Profiles as Contacts to Resources and apply roles.
"""
resource = models.ForeignKey('ResourceBase', blank=True, null=True)
contact = models.ForeignKey(settings.AUTH_USER_MODEL)
role = models.CharField(
choices=ROLE_VALUES,
max_length=255,
help_text=_(
'function performed by the responsible '
'party'))
def clean(self):
"""
Make sure there is only one poc and author per resource
"""
if (self.role == self.resource.poc_role) or (
self.role == self.resource.metadata_author_role):
contacts = self.resource.contacts.filter(
contactrole__role=self.role)
if contacts.count() == 1:
# only allow this if we are updating the same contact
if self.contact != contacts.get():
raise ValidationError(
'There can be only one %s for a given resource' %
self.role)
if self.contact.user is None:
# verify that any unbound contact is only associated to one
# resource
bounds = ContactRole.objects.filter(contact=self.contact).count()
if bounds > 1:
raise ValidationError(
'There can be one and only one resource linked to an unbound contact' %
self.role)
elif bounds == 1:
# verify that if there was one already, it corresponds to this
# instance
if ContactRole.objects.filter(
contact=self.contact).get().id != self.id:
raise ValidationError(
'There can be one and only one resource linked to an unbound contact' %
self.role)
class Meta:
unique_together = (("contact", "resource", "role"),)
class TopicCategory(models.Model):
"""
Metadata about high-level geographic data thematic classification.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_MD_TopicCategoryCode">
"""
identifier = models.CharField(max_length=255, default='location')
description = models.TextField(default='')
gn_description = models.TextField(
'GeoNode description', default='', null=True)
is_choice = models.BooleanField(default=True)
fa_class = models.CharField(max_length=64, default='fa-times')
def __unicode__(self):
return u"{0}".format(self.gn_description)
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Topic Categories'
class SpatialRepresentationType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_SpatialRepresentationTypeCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.CharField(max_length=255, editable=False)
gn_description = models.CharField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Spatial Representation Types'
class RegionManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Region(MPTTModel):
# objects = RegionManager()
code = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=255)
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children')
# Save bbox values in the database.
# This is useful for spatial searches and for generating thumbnail images
# and metadata records.
bbox_x0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_x1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
srid = models.CharField(max_length=255, default='EPSG:4326')
def __unicode__(self):
return self.name
@property
def bbox(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return [
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
self.srid]
@property
def bbox_string(self):
"""BBOX is in the format: [x0,y0,x1,y1]."""
return ",".join([str(self.bbox_x0), str(self.bbox_y0),
str(self.bbox_x1), str(self.bbox_y1)])
@property
def geographic_bounding_box(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return bbox_to_wkt(
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
srid=self.srid)
class Meta:
ordering = ("name",)
verbose_name_plural = 'Metadata Regions'
class MPTTMeta:
order_insertion_by = ['name']
class RestrictionCodeType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_RestrictionCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.TextField(max_length=255, editable=False)
gn_description = models.TextField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Restriction Code Types'
class Backup(models.Model):
identifier = models.CharField(max_length=255, editable=False)
name = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True, blank=True)
description = models.TextField(null=True, blank=True)
base_folder = models.CharField(max_length=100)
location = models.TextField(null=True, blank=True)
class Meta:
ordering = ("date", )
verbose_name_plural = 'Backups'
class License(models.Model):
identifier = models.CharField(max_length=255, editable=False)
name = models.CharField(max_length=100)
abbreviation = models.CharField(max_length=20, null=True, blank=True)
description = models.TextField(null=True, blank=True)
url = models.URLField(max_length=2000, null=True, blank=True)
license_text = models.TextField(null=True, blank=True)
def __unicode__(self):
return self.name
@property
def name_long(self):
if self.abbreviation is None or len(self.abbreviation) == 0:
return self.name
else:
return self.name + " (" + self.abbreviation + ")"
@property
def description_bullets(self):
if self.description is None or len(self.description) == 0:
return ""
else:
bullets = []
lines = self.description.split("\n")
for line in lines:
bullets.append("+ " + line)
return bullets
class Meta:
ordering = ("name", )
verbose_name_plural = 'Licenses'
class HierarchicalKeyword(TagBase, MP_Node):
node_order_by = ['name']
@classmethod
def dump_bulk_tree(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
qset = cls._get_serializable_model().get_tree(parent)
ret, lnk = [], {}
for pyobj in qset:
serobj = serializers.serialize('python', [pyobj])[0]
# django's serializer stores the attributes in 'fields'
fields = serobj['fields']
depth = fields['depth']
fields['text'] = fields['name']
fields['href'] = fields['slug']
del fields['name']
del fields['slug']
del fields['path']
del fields['numchild']
del fields['depth']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {}
for field in fields:
newobj[field] = fields[field]
if keep_ids:
newobj['id'] = serobj['pk']
if (not parent and depth == 1) or\
(parent and depth == parent.depth):
ret.append(newobj)
else:
parentobj = pyobj.get_parent()
parentser = lnk[parentobj.pk]
if 'nodes' not in parentser:
parentser['nodes'] = []
parentser['nodes'].append(newobj)
lnk[pyobj.pk] = newobj
return ret
class TaggedContentItem(ItemBase):
content_object = models.ForeignKey('ResourceBase')
tag = models.ForeignKey('HierarchicalKeyword', related_name='keywords')
# see https://github.com/alex/django-taggit/issues/101
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class _HierarchicalTagManager(_TaggableManager):
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
name__in=str_tags
)
tag_objs.update(existing)
for new_tag in str_tags - set(t.name for t in existing):
tag_objs.add(HierarchicalKeyword.add_root(name=new_tag))
for tag in tag_objs:
try:
self.through.objects.get_or_create(
tag=tag, **self._lookup_kwargs())
except Exception as e:
logger.exception(e)
class Thesaurus(models.Model):
"""
Loadable thesaurus containing keywords in different languages
"""
identifier = models.CharField(
max_length=255,
null=False,
blank=False,
unique=True)
# read from the RDF file
title = models.CharField(max_length=255, null=False, blank=False)
# read from the RDF file
date = models.CharField(max_length=20, default='')
# read from the RDF file
description = models.TextField(max_length=255, default='')
slug = models.CharField(max_length=64, default='')
def __unicode__(self):
return u"{0}".format(self.identifier)
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Thesauri'
class ThesaurusKeyword(models.Model):
"""
Loadable thesaurus containing keywords in different languages
"""
# read from the RDF file
about = models.CharField(max_length=255, null=True, blank=True)
# read from the RDF file
alt_label = models.CharField(
max_length=255,
default='',
null=True,
blank=True)
thesaurus = models.ForeignKey('Thesaurus', related_name='thesaurus')
def __unicode__(self):
return u"{0}".format(self.alt_label)
class Meta:
ordering = ("alt_label",)
verbose_name_plural = 'Thesaurus Keywords'
unique_together = (("thesaurus", "alt_label"),)
class ThesaurusKeywordLabel(models.Model):
"""
Loadable thesaurus containing keywords in different languages
"""
# read from the RDF file
lang = models.CharField(max_length=3)
# read from the RDF file
label = models.CharField(max_length=255)
# note = models.CharField(max_length=511)
keyword = models.ForeignKey('ThesaurusKeyword', related_name='keyword')
def __unicode__(self):
return u"{0}".format(self.label)
class Meta:
ordering = ("keyword", "lang")
verbose_name_plural = 'Labels'
unique_together = (("keyword", "lang"),)
class ResourceBaseManager(PolymorphicManager):
def admin_contact(self):
# this assumes there is at least one superuser
superusers = get_user_model().objects.filter(is_superuser=True).order_by('id')
if superusers.count() == 0:
raise RuntimeError(
'GeoNode needs at least one admin/superuser set')
return superusers[0]
def get_queryset(self):
return super(
ResourceBaseManager,
self).get_queryset().non_polymorphic()
def polymorphic_queryset(self):
return super(ResourceBaseManager, self).get_queryset()
class ResourceBase(PolymorphicModel, PermissionLevelMixin, ItemBase):
"""
Base Resource Object loosely based on ISO 19115:2003
"""
VALID_DATE_TYPES = [(x.lower(), _(x))
for x in ['Creation', 'Publication', 'Revision']]
date_help_text = _('reference date for the cited resource')
date_type_help_text = _('identification of when a given event occurred')
edition_help_text = _('version of the cited resource')
abstract_help_text = _(
'brief narrative summary of the content of the resource(s)')
purpose_help_text = _(
'summary of the intentions with which the resource(s) was developed')
maintenance_frequency_help_text = _(
'frequency with which modifications and deletions are made to the data after '
'it is first produced')
keywords_help_text = _(
'commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject '
'(space or comma-separated')
tkeywords_help_text = _(
'formalised word(s) or phrase(s) from a fixed thesaurus used to describe the subject '
'(space or comma-separated')
regions_help_text = _('keyword identifies a location')
restriction_code_type_help_text = _(
'limitation(s) placed upon the access or use of the data.')
constraints_other_help_text = _(
'other restrictions and legal prerequisites for accessing and using the resource or'
' metadata')
license_help_text = _('license of the dataset')
language_help_text = _('language used within the dataset')
category_help_text = _(
'high-level geographic data thematic classification to assist in the grouping and search of '
'available geographic data sets.')
spatial_representation_type_help_text = _(
'method used to represent geographic information in the dataset.')
temporal_extent_start_help_text = _(
'time period covered by the content of the dataset (start)')
temporal_extent_end_help_text = _(
'time period covered by the content of the dataset (end)')
data_quality_statement_help_text = _(
'general explanation of the data producer\'s knowledge about the lineage of a'
' dataset')
# internal fields
uuid = models.CharField(max_length=36)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name='owned_resource',
verbose_name=_("Owner"))
contacts = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='ContactRole')
title = models.CharField(_('title'), max_length=255, help_text=_(
'name by which the cited resource is known'))
alternate = models.CharField(max_length=128, null=True, blank=True)
date = models.DateTimeField(
_('date'),
default=datetime.datetime.now,
help_text=date_help_text)
date_type = models.CharField(
_('date type'),
max_length=255,
choices=VALID_DATE_TYPES,
default='publication',
help_text=date_type_help_text)
edition = models.CharField(
_('edition'),
max_length=255,
blank=True,
null=True,
help_text=edition_help_text)
abstract = models.TextField(
_('abstract'),
max_length=2000,
blank=True,
help_text=abstract_help_text)
purpose = models.TextField(
_('purpose'),
max_length=500,
null=True,
blank=True,
help_text=purpose_help_text)
maintenance_frequency = models.CharField(
_('maintenance frequency'),
max_length=255,
choices=UPDATE_FREQUENCIES,
blank=True,
null=True,
help_text=maintenance_frequency_help_text)
keywords = TaggableManager(
_('keywords'),
through=TaggedContentItem,
blank=True,
help_text=keywords_help_text,
manager=_HierarchicalTagManager)
tkeywords = models.ManyToManyField(
ThesaurusKeyword,
help_text=tkeywords_help_text,
blank=True)
regions = models.ManyToManyField(
Region,
verbose_name=_('keywords region'),
blank=True,
help_text=regions_help_text)
restriction_code_type = models.ForeignKey(
RestrictionCodeType,
verbose_name=_('restrictions'),
help_text=restriction_code_type_help_text,
null=True,
blank=True,
limit_choices_to=Q(
is_choice=True))
constraints_other = models.TextField(
_('restrictions other'),
blank=True,
null=True,
help_text=constraints_other_help_text)
license = models.ForeignKey(License, null=True, blank=True,
verbose_name=_("License"),
help_text=license_help_text)
language = models.CharField(
_('language'),
max_length=3,
choices=ALL_LANGUAGES,
default='eng',
help_text=language_help_text)
category = models.ForeignKey(
TopicCategory,
null=True,
blank=True,
limit_choices_to=Q(
is_choice=True),
help_text=category_help_text)
spatial_representation_type = models.ForeignKey(
SpatialRepresentationType,
null=True,
blank=True,
limit_choices_to=Q(
is_choice=True),
verbose_name=_("spatial representation type"),
help_text=spatial_representation_type_help_text)
# Section 5
temporal_extent_start = models.DateTimeField(
_('temporal extent start'),
blank=True,
null=True,
help_text=temporal_extent_start_help_text)
temporal_extent_end = models.DateTimeField(
_('temporal extent end'),
blank=True,
null=True,
help_text=temporal_extent_end_help_text)
supplemental_information = models.TextField(
_('supplemental information'),
max_length=2000,
default=DEFAULT_SUPPLEMENTAL_INFORMATION,
help_text=_('any other descriptive information about the dataset'))
# Section 8
data_quality_statement = models.TextField(
_('data quality statement'),
max_length=2000,
blank=True,
null=True,
help_text=data_quality_statement_help_text)
group = models.ForeignKey(Group, null=True, blank=True)
# Section 9
# see metadata_author property definition below
# Save bbox values in the database.
# This is useful for spatial searches and for generating thumbnail images
# and metadata records.
bbox_x0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_x1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y0 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
bbox_y1 = models.DecimalField(
max_digits=19,
decimal_places=10,
blank=True,
null=True)
srid = models.CharField(max_length=255, default='EPSG:4326')
# CSW specific fields
csw_typename = models.CharField(
_('CSW typename'),
max_length=32,
default='gmd:MD_Metadata',
null=False)
csw_schema = models.CharField(_('CSW schema'),
max_length=64,
default='http://www.isotc211.org/2005/gmd',
null=False)
csw_mdsource = models.CharField(
_('CSW source'),
max_length=256,
default='local',
null=False)
csw_insert_date = models.DateTimeField(
_('CSW insert date'), auto_now_add=True, null=True)
csw_type = models.CharField(
_('CSW type'),
max_length=32,
default='dataset',
null=False,
choices=HIERARCHY_LEVELS)
csw_anytext = models.TextField(_('CSW anytext'), null=True, blank=True)
csw_wkt_geometry = models.TextField(
_('CSW WKT geometry'),
null=False,
default='POLYGON((-180 -90,-180 90,180 90,180 -90,-180 -90))')
# metadata XML specific fields
metadata_uploaded = models.BooleanField(default=False)
metadata_uploaded_preserve = models.BooleanField(default=False)
metadata_xml = models.TextField(
null=True,
default='<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"/>',
blank=True)
popular_count = models.IntegerField(default=0)
share_count = models.IntegerField(default=0)
featured = models.BooleanField(_("Featured"), default=False, help_text=_(
'Should this resource be advertised in home page?'))
is_published = models.BooleanField(
_("Is Published"),
default=True,
help_text=_('Should this resource be published and searchable?'))
is_approved = models.BooleanField(
_("Approved"),
default=False,
help_text=_('Is this resource validated from a publisher or editor?'))
# fields necessary for the apis
thumbnail_url = models.TextField(null=True, blank=True)
detail_url = models.CharField(max_length=255, null=True, blank=True)
rating = models.IntegerField(default=0, null=True, blank=True)
def __unicode__(self):
return self.title
@property
def group_name(self):
if self.group:
return str(self.group)
return None
@property
def bbox(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return [
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
self.srid]
@property
def bbox_string(self):
"""BBOX is in the format: [x0,y0,x1,y1]."""
return ",".join([str(self.bbox_x0), str(self.bbox_y0),
str(self.bbox_x1), str(self.bbox_y1)])
@property
def geographic_bounding_box(self):
"""BBOX is in the format: [x0,x1,y0,y1]."""
return bbox_to_wkt(
self.bbox_x0,
self.bbox_x1,
self.bbox_y0,
self.bbox_y1,
srid=self.srid)
@property
def license_light(self):
a = []
if not self.license:
return ''
if (not (self.license.name is None)) and (len(self.license.name) > 0):
a.append(self.license.name)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("(" + self.license.url + ")")
return " ".join(a)
@property
def license_verbose(self):
a = []
if (not (self.license.name_long is None)) and (
len(self.license.name_long) > 0):
a.append(self.license.name_long + ":")
if (not (self.license.description is None)) and (
len(self.license.description) > 0):
a.append(self.license.description)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("(" + self.license.url + ")")
return " ".join(a)
@property
def metadata_completeness(self):
required_fields = [
'abstract',
'category',
'data_quality_statement',
'date',
'date_type',
'language',
'license',
'regions',
'title']
if self.restriction_code_type == 'otherRestrictions':
required_fields.append('constraints_other')
filled_fields = []
for required_field in required_fields:
field = getattr(self, required_field, None)
if field:
if required_field is 'license':
if field.name is 'Not Specified':
continue
if required_field is 'regions':
if not field.all():
continue
if required_field is 'category':
if not field.identifier:
continue
filled_fields.append(field)
return '{}%'.format(len(filled_fields) * 100 / len(required_fields))
def keyword_list(self):
return [kw.name for kw in self.keywords.all()]
def keyword_slug_list(self):
return [kw.slug for kw in self.keywords.all()]
def region_name_list(self):
return [region.name for region in self.regions.all()]
def spatial_representation_type_string(self):
if hasattr(self.spatial_representation_type, 'identifier'):
return self.spatial_representation_type.identifier
else:
if hasattr(self, 'storeType'):
if self.storeType == 'coverageStore':
return 'grid'
return 'vector'
else:
return None
@property
def keyword_csv(self):
keywords_qs = self.get_real_instance().keywords.all()
if keywords_qs:
return ','.join([kw.name for kw in keywords_qs])
else:
return ''
def set_latlon_bounds(self, box):
"""
Set the four bounds in lat lon projection
"""
self.bbox_x0 = box[0]
self.bbox_x1 = box[1]
self.bbox_y0 = box[2]
self.bbox_y1 = box[3]
def set_bounds_from_center_and_zoom(self, center_x, center_y, zoom):
"""
Calculate zoom level and center coordinates in mercator.
"""
self.center_x = center_x
self.center_y = center_y
self.zoom = zoom
deg_len_equator = 40075160 / 360
# covert center in lat lon
def get_lon_lat():
wgs84 = Proj(init='epsg:4326')
mercator = Proj(init='epsg:3857')
lon, lat = transform(mercator, wgs84, center_x, center_y)
return lon, lat
# calculate the degree length at this latitude
def deg_len():
lon, lat = get_lon_lat()
return math.cos(lat) * deg_len_equator
lon, lat = get_lon_lat()
# taken from http://wiki.openstreetmap.org/wiki/Zoom_levels
# it might be not precise but enough for the purpose
distance_per_pixel = 40075160 * math.cos(lat) / 2**(zoom + 8)
# calculate the distance from the center of the map in degrees
# we use the calculated degree length on the x axis and the
# normal degree length on the y axis assumin that it does not change
# Assuming a map of 1000 px of width and 700 px of height
distance_x_degrees = distance_per_pixel * 500 / deg_len()
distance_y_degrees = distance_per_pixel * 350 / deg_len_equator
self.bbox_x0 = lon - distance_x_degrees
self.bbox_x1 = lon + distance_x_degrees
self.bbox_y0 = lat - distance_y_degrees
self.bbox_y1 = lat + distance_y_degrees
def set_bounds_from_bbox(self, bbox):
"""
Calculate zoom level and center coordinates in mercator.
:param bbox: BBOX is in the format: [x0, x1, y0, y1], which is:
[min lon, max lon, min lat, max lat] or
[xmin, xmax, ymin, ymax]
:type bbox: list
"""
self.set_latlon_bounds(bbox)
minx, maxx, miny, maxy = [float(c) for c in bbox]
x = (minx + maxx) / 2
y = (miny + maxy) / 2
(center_x, center_y) = forward_mercator((x, y))
xdiff = maxx - minx
ydiff = maxy - miny
zoom = 0
if xdiff > 0 and ydiff > 0:
width_zoom = math.log(360 / xdiff, 2)
height_zoom = math.log(360 / ydiff, 2)
zoom = math.ceil(min(width_zoom, height_zoom))
self.zoom = zoom
self.center_x = center_x
self.center_y = center_y
def download_links(self):
"""assemble download links for pycsw"""
links = []
for url in self.link_set.all():
if url.link_type == 'metadata': # avoid recursion
continue
if url.link_type == 'html':
links.append(
(self.title,
'Web address (URL)',
'WWW:LINK-1.0-http--link',
url.url))
elif url.link_type in ('OGC:WMS', 'OGC:WFS', 'OGC:WCS'):
links.append((self.title, url.name, url.link_type, url.url))
else:
description = '%s (%s Format)' % (self.title, url.name)
links.append(
(self.title,
description,
'WWW:DOWNLOAD-1.0-http--download',
url.url))
return links
def get_tiles_url(self):
"""Return URL for Z/Y/X mapping clients or None if it does not exist.
"""
try:
tiles_link = self.link_set.get(name='Tiles')
except Link.DoesNotExist:
return None
else:
return tiles_link.url
def get_legend(self):
"""Return Link for legend or None if it does not exist.
"""
try:
legends_link = self.link_set.get(name='Legend')
except Link.DoesNotExist:
return None
except Link.MultipleObjectsReturned:
return None
else:
return legends_link
def get_legend_url(self):
"""Return URL for legend or None if it does not exist.
The legend can be either an image (for Geoserver's WMS)
or a JSON object for ArcGIS.
"""
legend = self.get_legend()
if legend is None:
return None
return legend.url
def get_ows_url(self):
"""Return URL for OGC WMS server None if it does not exist.
"""
try:
ows_link = self.link_set.get(name='OGC:WMS')
except Link.DoesNotExist:
return None
else:
return ows_link.url
def get_thumbnail_url(self):
"""Return a thumbnail url.
It could be a local one if it exists, a remote one (WMS GetImage) for example
or a 'Missing Thumbnail' one.
"""
local_thumbnails = self.link_set.filter(name='Thumbnail')
if local_thumbnails.count() > 0:
return local_thumbnails[0].url
remote_thumbnails = self.link_set.filter(name='Remote Thumbnail')
if remote_thumbnails.count() > 0:
return remote_thumbnails[0].url
return staticfiles.static(settings.MISSING_THUMBNAIL)
def has_thumbnail(self):
"""Determine if the thumbnail object exists and an image exists"""
return self.link_set.filter(name='Thumbnail').exists()
def save_thumbnail(self, filename, image):
upload_to = 'thumbs/'
upload_path = os.path.join('thumbs/', filename)
try:
if storage.exists(upload_path):
# Delete if exists otherwise the (FileSystemStorage) implementation
# will create a new file with a unique name
storage.delete(os.path.join(upload_path))
storage.save(upload_path, ContentFile(image))
url_path = os.path.join(
settings.MEDIA_URL,
upload_to,
filename).replace(
'\\',
'/')
url = urljoin(settings.SITEURL, url_path)
Link.objects.get_or_create(resource=self,
url=url,
defaults=dict(
name='Thumbnail',
extension='png',
mime='image/png',
link_type='image',
))
ResourceBase.objects.filter(id=self.id).update(
thumbnail_url=url
)
except Exception:
logger.error(
'Error when generating the thumbnail for resource %s.' %
self.id)
logger.error('Check permissions for file %s.' % upload_path)
def set_missing_info(self):
"""Set default permissions and point of contacts.
It is mandatory to call it from descendant classes
but hard to enforce technically via signals or save overriding.
"""
from guardian.models import UserObjectPermission
logger.debug('Checking for permissions.')
# True if every key in the get_all_level_info dict is empty.
no_custom_permissions = UserObjectPermission.objects.filter(
content_type=ContentType.objects.get_for_model(
self.get_self_resource()), object_pk=str(
self.pk)).exists()
if not no_custom_permissions:
logger.debug(
'There are no permissions for this object, setting default perms.')
self.set_default_permissions()
user = None
if self.owner:
user = self.owner
else:
try:
user = ResourceBase.objects.admin_contact().user
except BaseException:
pass
if user:
if self.poc is None:
self.poc = user
if self.metadata_author is None:
self.metadata_author = user
def maintenance_frequency_title(self):
return [v for i, v in enumerate(
UPDATE_FREQUENCIES) if v[0] == self.maintenance_frequency][0][1].title()
def language_title(self):
return [v for i, v in enumerate(
ALL_LANGUAGES) if v[0] == self.language][0][1].title()
def _set_poc(self, poc):
# reset any poc assignation to this resource
ContactRole.objects.filter(
role='pointOfContact',
resource=self).delete()
# create the new assignation
ContactRole.objects.create(
role='pointOfContact',
resource=self,
contact=poc)
def _get_poc(self):
try:
the_poc = ContactRole.objects.get(
role='pointOfContact', resource=self).contact
except ContactRole.DoesNotExist:
the_poc = None
return the_poc
poc = property(_get_poc, _set_poc)
def _set_metadata_author(self, metadata_author):
# reset any metadata_author assignation to this resource
ContactRole.objects.filter(role='author', resource=self).delete()
# create the new assignation
ContactRole.objects.create(
role='author',
resource=self,
contact=metadata_author)
def _get_metadata_author(self):
try:
the_ma = ContactRole.objects.get(
role='author', resource=self).contact
except ContactRole.DoesNotExist:
the_ma = None
return the_ma
def handle_moderated_uploads(self):
if settings.ADMIN_MODERATE_UPLOADS:
self.is_published = False
metadata_author = property(_get_metadata_author, _set_metadata_author)
objects = ResourceBaseManager()
class Meta:
# custom permissions,
# add, change and delete are standard in django-guardian
permissions = (
('view_resourcebase', 'Can view resource'),
('change_resourcebase_permissions', 'Can change resource permissions'),
('download_resourcebase', 'Can download resource'),
('publish_resourcebase', 'Can publish resource'),
('change_resourcebase_metadata', 'Can change resource metadata'),
)
class LinkManager(models.Manager):
"""Helper class to access links grouped by type
"""
def data(self):
return self.get_queryset().filter(link_type='data')
def image(self):
return self.get_queryset().filter(link_type='image')
def download(self):
return self.get_queryset().filter(link_type__in=['image', 'data'])
def metadata(self):
return self.get_queryset().filter(link_type='metadata')
def original(self):
return self.get_queryset().filter(link_type='original')
def geogig(self):
return self.get_queryset().filter(name__icontains='geogig')
def ows(self):
return self.get_queryset().filter(
link_type__in=['OGC:WMS', 'OGC:WFS', 'OGC:WCS'])
class Link(models.Model):
"""Auxiliary model for storing links for resources.
This helps avoiding the need for runtime lookups
to the OWS server or the CSW Catalogue.
There are four types of links:
* original: For uploaded files (Shapefiles or GeoTIFFs)
* data: For WFS and WCS links that allow access to raw data
* image: For WMS and TMS links
* metadata: For CSW links
* OGC:WMS: for WMS service links
* OGC:WFS: for WFS service links
* OGC:WCS: for WCS service links
"""
resource = models.ForeignKey(ResourceBase, blank=True, null=True)
extension = models.CharField(
max_length=255,
help_text=_('For example "kml"'))
link_type = models.CharField(
max_length=255, choices=[
(x, x) for x in LINK_TYPES])
name = models.CharField(max_length=255, help_text=_(
'For example "View in Google Earth"'))
mime = models.CharField(max_length=255,
help_text=_('For example "text/xml"'))
url = models.TextField(max_length=1000)
objects = LinkManager()
def __str__(self):
return '%s link' % self.link_type
def resourcebase_post_save(instance, *args, **kwargs):
"""
Used to fill any additional fields after the save.
Has to be called by the children
"""
# we need to remove stale links
for link in instance.link_set.all():
if link.name == "External Document":
if link.resource.doc_url != link.url:
link.delete()
else:
if urlsplit(settings.SITEURL).hostname not in link.url:
link.delete()
try:
ResourceBase.objects.filter(id=instance.id).update(
thumbnail_url=instance.get_thumbnail_url(),
detail_url=instance.get_absolute_url(),
csw_insert_date=datetime.datetime.now())
except BaseException:
pass
try:
instance.thumbnail_url = instance.get_thumbnail_url()
instance.detail_url = instance.get_absolute_url()
instance.csw_insert_date = datetime.datetime.now()
finally:
instance.set_missing_info()
try:
if instance.regions and instance.regions.all():
"""
try:
queryset = instance.regions.all().order_by('name')
for region in queryset:
print ("%s : %s" % (region.name, region.geographic_bounding_box))
except:
tb = traceback.format_exc()
else:
tb = None
finally:
if tb:
logger.debug(tb)
"""
pass
else:
srid1, wkt1 = instance.geographic_bounding_box.split(";")
srid1 = re.findall(r'\d+', srid1)
poly1 = GEOSGeometry(wkt1, srid=int(srid1[0]))
poly1.transform(4326)
queryset = Region.objects.all().order_by('name')
global_regions = []
regions_to_add = []
for region in queryset:
try:
srid2, wkt2 = region.geographic_bounding_box.split(";")
srid2 = re.findall(r'\d+', srid2)
poly2 = GEOSGeometry(wkt2, srid=int(srid2[0]))
poly2.transform(4326)
if poly2.intersection(poly1):
regions_to_add.append(region)
if region.level == 0 and region.parent is None:
global_regions.append(region)
except BaseException:
tb = traceback.format_exc()
if tb:
logger.debug(tb)
if regions_to_add or global_regions:
if regions_to_add and len(
regions_to_add) > 0 and len(regions_to_add) <= 30:
instance.regions.add(*regions_to_add)
else:
instance.regions.add(*global_regions)
except BaseException:
tb = traceback.format_exc()
if tb:
logger.debug(tb)
# set default License if no specified
if instance.license is None:
no_license = License.objects.filter(name="Not Specified")
if no_license and len(no_license) > 0:
instance.license = no_license[0]
def rating_post_save(instance, *args, **kwargs):
"""
Used to fill the average rating field on OverallRating change.
"""
ResourceBase.objects.filter(
id=instance.object_id).update(
rating=instance.rating)
signals.post_save.connect(rating_post_save, sender=OverallRating)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def do_login(sender, user, request, **kwargs):
"""
Take action on user login. Generate a new user access_token to be shared
with GeoServer, and store it into the request.session
"""
if user and user.is_authenticated():
token = None
try:
Application = get_application_model()
app = Application.objects.get(name="GeoServer")
# Lets create a new one
token = generate_token()
AccessToken.objects.get_or_create(
user=user,
application=app,
expires=datetime.datetime.now() +
datetime.timedelta(
days=1),
token=token)
except BaseException:
u = uuid.uuid1()
token = u.hex
# Do GeoServer Login
url = "%s%s?access_token=%s" % (settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
'ows?service=wms&version=1.3.0&request=GetCapabilities',
token)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
jsessionid = None
try:
opener.open(url)
for c in cj:
if c.name == "JSESSIONID":
jsessionid = c.value
except BaseException:
u = uuid.uuid1()
jsessionid = u.hex
request.session['access_token'] = token
request.session['JSESSIONID'] = jsessionid
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def do_logout(sender, user, request, **kwargs):
"""
Take action on user logout. Cleanup user access_token and send logout
request to GeoServer
"""
if 'access_token' in request.session:
try:
Application = get_application_model()
app = Application.objects.get(name="GeoServer")
# Lets delete the old one
try:
old = AccessToken.objects.get(user=user, application=app)
except BaseException:
pass
else:
old.delete()
except BaseException:
pass
# Do GeoServer Logout
if 'access_token' in request.session:
access_token = request.session['access_token']
else:
access_token = None
if access_token:
url = "%s%s?access_token=%s" % (settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
settings.OGC_SERVER['default']['LOGOUT_ENDPOINT'],
access_token)
header_params = {
"Authorization": ("Bearer %s" % access_token)
}
else:
url = "%s%s" % (settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
settings.OGC_SERVER['default']['LOGOUT_ENDPOINT'])
param = {}
data = urllib.urlencode(param)
cookies = None
for cook in request.COOKIES:
name = str(cook)
value = request.COOKIES.get(name)
if name == 'csrftoken':
header_params['X-CSRFToken'] = value
cook = "%s=%s" % (name, value)
if not cookies:
cookies = cook
else:
cookies = cookies + '; ' + cook
if cookies:
if 'JSESSIONID' in request.session and request.session['JSESSIONID']:
cookies = cookies + '; JSESSIONID=' + \
request.session['JSESSIONID']
header_params['Cookie'] = cookies
gs_request = urllib2.Request(url, data, header_params)
try:
urllib2.urlopen(gs_request)
except BaseException:
tb = traceback.format_exc()
if tb:
logger.debug(tb)
if 'access_token' in request.session:
del request.session['access_token']
request.session.modified = True
user_logged_in.connect(do_login)
user_logged_out.connect(do_logout)
| gpl-3.0 | -7,080,191,159,422,798,000 | 32.661664 | 101 | 0.582212 | false |
anish/buildbot | master/buildbot/steps/source/gitlab.py | 1 | 2149 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import log
from buildbot.steps.source.git import Git
class GitLab(Git):
"""
Source step that knows how to handle merge requests from
the GitLab change source
"""
def startVC(self, branch, revision, patch):
# If this is a merge request:
if self.build.hasProperty("target_branch"):
target_repourl = self.build.getProperty("target_git_ssh_url", None)
if self.repourl != target_repourl:
log.msg("GitLab.startVC: note: GitLab step for merge requests"
" should probably have repourl='%s' instead of '%s'?" %
(target_repourl, self.repourl))
# This step is (probably) configured to fetch the target
# branch of a merge (because it is impractical for users to
# configure one builder for each of the infinite number of
# possible source branches for merge requests).
# Point instead to the source being proposed for merge.
branch = self.build.getProperty("source_branch", None)
# FIXME: layering violation, should not be modifying self here?
self.repourl = self.build.getProperty("source_git_ssh_url", None)
# The revision is unlikely to exist in the repo already,
# so tell Git to not check.
revision = None
super(GitLab, self).startVC(branch, revision, patch)
| gpl-2.0 | -870,936,740,008,805,600 | 43.770833 | 79 | 0.670079 | false |
parasgithub/PrairieLearn | elements/pl-symbolic-input/pl-symbolic-input.py | 1 | 17204 | import prairielearn as pl
import lxml.html
from html import escape
import chevron
import sympy
import random
import math
import python_helper_sympy as phs
def get_variables_list(variables_string):
if variables_string is not None:
variables_list = [variable.strip() for variable in variables_string.split(',')]
return variables_list
else:
return []
def prepare(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
required_attribs = ['answers-name']
optional_attribs = ['weight', 'correct-answer', 'variables', 'label', 'display', 'allow-complex', 'imaginary-unit-for-display']
pl.check_attribs(element, required_attribs, optional_attribs)
name = pl.get_string_attrib(element, 'answers-name')
correct_answer = pl.get_string_attrib(element, 'correct-answer', None)
if correct_answer is not None:
if name in data['correct-answers']:
raise Exception('duplicate correct-answers variable name: %s' % name)
data['correct-answers'][name] = correct_answer
imaginary_unit = pl.get_string_attrib(element, 'imaginary-unit-for-display', 'i')
if not (imaginary_unit == 'i' or imaginary_unit == 'j'):
raise Exception('imaginary-unit-for-display must be either i or j')
def render(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
label = pl.get_string_attrib(element, 'label', None)
variables_string = pl.get_string_attrib(element, 'variables', None)
variables = get_variables_list(variables_string)
display = pl.get_string_attrib(element, 'display', 'inline')
allow_complex = pl.get_boolean_attrib(element, 'allow-complex', False)
imaginary_unit = pl.get_string_attrib(element, 'imaginary-unit-for-display', 'i')
if data['panel'] == 'question':
editable = data['editable']
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
operators = ', '.join(['cos', 'sin', 'tan', 'exp', 'log', 'sqrt', '( )', '+', '-', '*', '/', '^', '**'])
constants = ', '.join(['pi, e'])
info_params = {
'format': True,
'variables': variables_string,
'operators': operators,
'constants': constants,
'allow_complex': allow_complex,
}
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
info = chevron.render(f, info_params).strip()
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
info_params.pop('format', None)
info_params['shortformat'] = True
shortinfo = chevron.render(f, info_params).strip()
html_params = {
'question': True,
'name': name,
'label': label,
'editable': editable,
'info': info,
'shortinfo': shortinfo,
'uuid': pl.get_uuid(),
'allow_complex': allow_complex,
}
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
if display == 'inline':
html_params['inline'] = True
elif display == 'block':
html_params['block'] = True
else:
raise ValueError('method of display "%s" is not valid (must be "inline" or "block")' % display)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'submission':
parse_error = data['format_errors'].get(name, None)
html_params = {
'submission': True,
'label': label,
'parse_error': parse_error,
'uuid': pl.get_uuid()
}
if parse_error is None:
a_sub = data['submitted_answers'][name]
if isinstance(a_sub, str):
# this is for backward-compatibility
a_sub = phs.convert_string_to_sympy(a_sub, variables, allow_complex=allow_complex)
else:
a_sub = phs.json_to_sympy(a_sub, allow_complex=allow_complex)
a_sub = a_sub.subs(sympy.I, sympy.Symbol(imaginary_unit))
html_params['a_sub'] = sympy.latex(a_sub)
else:
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
if display == 'inline':
html_params['inline'] = True
elif display == 'block':
html_params['block'] = True
else:
raise ValueError('method of display "%s" is not valid (must be "inline" or "block")' % display)
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'answer':
a_tru = data['correct_answers'].get(name, None)
if a_tru is not None:
if isinstance(a_tru, str):
# this is so instructors can specify the true answer simply as a string
a_tru = phs.convert_string_to_sympy(a_tru, variables, allow_complex=allow_complex)
else:
a_tru = phs.json_to_sympy(a_tru, allow_complex=allow_complex)
a_tru = a_tru.subs(sympy.I, sympy.Symbol(imaginary_unit))
html_params = {
'answer': True,
'label': label,
'a_tru': sympy.latex(a_tru)
}
with open('pl-symbolic-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
else:
html = ''
else:
raise Exception('Invalid panel type: %s' % data['panel'])
return html
def parse(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
variables = get_variables_list(pl.get_string_attrib(element, 'variables', None))
allow_complex = pl.get_boolean_attrib(element, 'allow-complex', False)
imaginary_unit = pl.get_string_attrib(element, 'imaginary-unit-for-display', 'i')
# Get submitted answer or return parse_error if it does not exist
a_sub = data['submitted_answers'].get(name, None)
if not a_sub:
data['format_errors'][name] = 'No submitted answer.'
data['submitted_answers'][name] = None
return
# Parse the submitted answer and put the result in a string
try:
# Replace '^' with '**' wherever it appears. In MATLAB, either can be used
# for exponentiation. In python, only the latter can be used.
a_sub = a_sub.replace('^', '**')
# Strip whitespace
a_sub = a_sub.strip()
# Convert safely to sympy
a_sub_parsed = phs.convert_string_to_sympy(a_sub, variables, allow_complex=allow_complex)
# If complex numbers are not allowed, raise error if expression has the imaginary unit
if (not allow_complex) and (a_sub_parsed.has(sympy.I)):
a_sub_parsed = a_sub_parsed.subs(sympy.I, sympy.Symbol(imaginary_unit))
s = 'Your answer was simplified to this, which contains a complex number (denoted ${:s}$): $${:s}$$'.format(imaginary_unit, sympy.latex(a_sub_parsed))
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
# Store result as json.
a_sub_json = phs.sympy_to_json(a_sub_parsed, allow_complex=allow_complex)
except phs.HasFloatError as err:
s = 'Your answer contains the floating-point number ' + str(err.n) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasComplexError as err:
s = 'Your answer contains the complex number ' + str(err.n) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
if allow_complex:
s += 'To include a complex number in your expression, write it as the product of an integer with the imaginary unit <code>i</code> or <code>j</code>. '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasInvalidExpressionError as err:
s = 'Your answer has an invalid expression. '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasInvalidFunctionError as err:
s = 'Your answer calls an invalid function "' + err.text + '". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasInvalidVariableError as err:
s = 'Your answer refers to an invalid variable "' + err.text + '". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasParseError as err:
s = 'Your answer has a syntax error. '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasEscapeError as err:
s = 'Your answer must not contain the character "\\". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except phs.HasCommentError as err:
s = 'Your answer must not contain the character "#". '
s += '<br><br><pre>' + phs.point_to_error(a_sub, err.offset) + '</pre>'
data['format_errors'][name] = s
data['submitted_answers'][name] = None
return
except Exception:
data['format_errors'][name] = 'Invalid format.'
data['submitted_answers'][name] = None
return
# Make sure we can parse the json again
try:
# Convert safely to sympy
phs.json_to_sympy(a_sub_json, allow_complex=allow_complex)
# Finally, store the result
data['submitted_answers'][name] = a_sub_json
except Exception:
s = 'Your answer was simplified to this, which contains an invalid expression: $${:s}$$'.format(sympy.latex(a_sub_parsed))
data['format_errors'][name] = s
data['submitted_answers'][name] = None
def grade(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
variables = get_variables_list(pl.get_string_attrib(element, 'variables', None))
allow_complex = pl.get_boolean_attrib(element, 'allow-complex', False)
weight = pl.get_integer_attrib(element, 'weight', 1)
# Get true answer (if it does not exist, create no grade - leave it
# up to the question code)
a_tru = data['correct_answers'].get(name, None)
if a_tru is None:
return
# Get submitted answer (if it does not exist, score is zero)
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
return
# Parse true answer
if isinstance(a_tru, str):
# this is so instructors can specify the true answer simply as a string
a_tru = phs.convert_string_to_sympy(a_tru, variables, allow_complex=allow_complex)
else:
a_tru = phs.json_to_sympy(a_tru, allow_complex=allow_complex)
# Parse submitted answer
if isinstance(a_sub, str):
# this is for backward-compatibility
a_sub = phs.convert_string_to_sympy(a_sub, variables, allow_complex=allow_complex)
else:
a_sub = phs.json_to_sympy(a_sub, allow_complex=allow_complex)
# Check equality
correct = a_tru.equals(a_sub)
if correct:
data['partial_scores'][name] = {'score': 1, 'weight': weight}
else:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
def test(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
weight = pl.get_integer_attrib(element, 'weight', 1)
result = random.choices(['correct', 'incorrect', 'invalid'], [5, 5, 1])[0]
if result == 'correct':
data['raw_submitted_answers'][name] = str(pl.from_json(data['correct_answers'][name]))
data['partial_scores'][name] = {'score': 1, 'weight': weight}
elif result == 'incorrect':
data['raw_submitted_answers'][name] = str(pl.from_json(data['correct_answers'][name])) + ' + {:d}'.format(random.randint(1, 100))
data['partial_scores'][name] = {'score': 0, 'weight': weight}
elif result == 'invalid':
invalid_type = random.choice(['float', 'complex', 'expression', 'function', 'variable', 'syntax', 'escape', 'comment'])
if invalid_type == 'float':
data['raw_submitted_answers'][name] = 'x + 1.234'
s = 'Your answer contains the floating-point number ' + str(1.234) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
s += '<br><br><pre>' + phs.point_to_error('x + 1.234', 4) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'complex':
data['raw_submitted_answers'][name] = 'x + (1+2j)'
s = 'Your answer contains the complex number ' + str(2j) + '. '
s += 'All numbers must be expressed as integers (or ratios of integers). '
s += '<br><br><pre>' + phs.point_to_error('x + (1+2j)', 7) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'expression':
data['raw_submitted_answers'][name] = '1 and 0'
s = 'Your answer has an invalid expression. '
s += '<br><br><pre>' + phs.point_to_error('1 and 0', 0) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'function':
data['raw_submitted_answers'][name] = 'atan(x)'
s = 'Your answer calls an invalid function "' + 'atan' + '". '
s += '<br><br><pre>' + phs.point_to_error('atan(x)', 0) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'variable':
data['raw_submitted_answers'][name] = 'x + y'
s = 'Your answer refers to an invalid variable "' + 'y' + '". '
s += '<br><br><pre>' + phs.point_to_error('x + y', 4) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'syntax':
data['raw_submitted_answers'][name] = 'x +* 1'
s = 'Your answer has a syntax error. '
s += '<br><br><pre>' + phs.point_to_error('x +* 1', 4) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'escape':
data['raw_submitted_answers'][name] = 'x + 1\\n'
s = 'Your answer must not contain the character "\\". '
s += '<br><br><pre>' + phs.point_to_error('x + 1\\n', 5) + '</pre>'
data['format_errors'][name] = s
elif invalid_type == 'comment':
data['raw_submitted_answers'][name] = 'x # some text'
s = 'Your answer must not contain the character "#". '
s += '<br><br><pre>' + phs.point_to_error('x # some text', 2) + '</pre>'
data['format_errors'][name] = s
else:
raise Exception('invalid invalid_type: %s' % invalid_type)
else:
raise Exception('invalid result: %s' % result)
| agpl-3.0 | 4,258,647,680,519,534,600 | 44.036649 | 163 | 0.575273 | false |
kubeflow/katib | test/suggestion/v1beta1/test_enas_service.py | 1 | 8377 | # Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import grpc_testing
import unittest
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.suggestion.v1beta1.nas.enas.service import EnasService
class TestEnas(unittest.TestCase):
def setUp(self):
servicers = {
api_pb2.DESCRIPTOR.services_by_name['Suggestion']: EnasService(
)
}
self.test_server = grpc_testing.server_from_dictionary(
servicers, grpc_testing.strict_real_time())
def test_get_suggestion(self):
trials = [
api_pb2.Trial(
name="first-trial",
spec=api_pb2.TrialSpec(
objective=api_pb2.ObjectiveSpec(
type=api_pb2.MAXIMIZE,
objective_metric_name="Validation-Accuracy",
goal=0.99
),
parameter_assignments=api_pb2.TrialSpec.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value="[[3], [0, 1], [0, 0, 1], [2, 1, 0, 0]]",
),
api_pb2.ParameterAssignment(
name="nn_config",
value="{'num_layers': 4}",
),
]
)
),
status=api_pb2.TrialStatus(
observation=api_pb2.Observation(
metrics=[
api_pb2.Metric(
name="Validation-Accuracy",
value="0.88"
),
]
),
condition=api_pb2.TrialStatus.TrialConditionType.SUCCEEDED,
)
),
api_pb2.Trial(
name="second-trial",
spec=api_pb2.TrialSpec(
objective=api_pb2.ObjectiveSpec(
type=api_pb2.MAXIMIZE,
objective_metric_name="Validation-Accuracy",
goal=0.99
),
parameter_assignments=api_pb2.TrialSpec.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value="[[1], [0, 1], [2, 1, 1], [2, 1, 1, 0]]",
),
api_pb2.ParameterAssignment(
name="nn_config",
value="{'num_layers': 4}",
),
],
)
),
status=api_pb2.TrialStatus(
observation=api_pb2.Observation(
metrics=[
api_pb2.Metric(
name="Validation-Accuracy",
value="0.84"
),
]
),
condition=api_pb2.TrialStatus.TrialConditionType.SUCCEEDED,
)
)
]
experiment = api_pb2.Experiment(
name="enas-experiment",
spec=api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="enas",
),
objective=api_pb2.ObjectiveSpec(
type=api_pb2.MAXIMIZE,
goal=0.9,
objective_metric_name="Validation-Accuracy"
),
parallel_trial_count=2,
max_trial_count=10,
nas_config=api_pb2.NasConfig(
graph_config=api_pb2.GraphConfig(
num_layers=4,
input_sizes=[32, 32, 8],
output_sizes=[10]
),
operations=api_pb2.NasConfig.Operations(
operation=[
api_pb2.Operation(
operation_type="convolution",
parameter_specs=api_pb2.Operation.ParameterSpecs(
parameters=[
api_pb2.ParameterSpec(
name="filter_size",
parameter_type=api_pb2.CATEGORICAL,
feasible_space=api_pb2.FeasibleSpace(
max=None, min=None, list=["5"])
),
api_pb2.ParameterSpec(
name="num_filter",
parameter_type=api_pb2.CATEGORICAL,
feasible_space=api_pb2.FeasibleSpace(
max=None, min=None, list=["128"])
),
api_pb2.ParameterSpec(
name="stride",
parameter_type=api_pb2.CATEGORICAL,
feasible_space=api_pb2.FeasibleSpace(
max=None, min=None, list=["1", "2"])
),
]
)
),
api_pb2.Operation(
operation_type="reduction",
parameter_specs=api_pb2.Operation.ParameterSpecs(
parameters=[
api_pb2.ParameterSpec(
name="reduction_type",
parameter_type=api_pb2.CATEGORICAL,
feasible_space=api_pb2.FeasibleSpace(
max=None, min=None, list=["max_pooling"])
),
api_pb2.ParameterSpec(
name="pool_size",
parameter_type=api_pb2.INT,
feasible_space=api_pb2.FeasibleSpace(
min="2", max="3", step="1", list=[])
),
]
)
),
],
)
)
)
)
request = api_pb2.GetSuggestionsRequest(
experiment=experiment,
trials=trials,
request_number=2,
)
get_suggestion = self.test_server.invoke_unary_unary(
method_descriptor=(api_pb2.DESCRIPTOR
.services_by_name['Suggestion']
.methods_by_name['GetSuggestions']),
invocation_metadata={},
request=request, timeout=100)
response, metadata, code, details = get_suggestion.termination()
print(response.parameter_assignments)
self.assertEqual(code, grpc.StatusCode.OK)
self.assertEqual(2, len(response.parameter_assignments))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,538,063,537,198,039,000 | 41.958974 | 89 | 0.390951 | false |
kawamuray/ganeti | lib/utils/text.py | 1 | 18178 | #
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Utility functions for manipulating or working with text.
"""
import re
import os
import time
import collections
from ganeti import errors
from ganeti import compat
#: Unit checker regexp
_PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
#: Characters which don't need to be quoted for shell commands
_SHELL_UNQUOTED_RE = re.compile("^[-.,=:/_+@A-Za-z0-9]+$")
#: Shell param checker regexp
_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
#: ASCII equivalent of unicode character 'HORIZONTAL ELLIPSIS' (U+2026)
_ASCII_ELLIPSIS = "..."
#: MAC address octet
_MAC_ADDR_OCTET_RE = r"[0-9a-f]{2}"
def MatchNameComponent(key, name_list, case_sensitive=True):
"""Try to match a name against a list.
This function will try to match a name like test1 against a list
like C{['test1.example.com', 'test2.example.com', ...]}. Against
this list, I{'test1'} as well as I{'test1.example'} will match, but
not I{'test1.ex'}. A multiple match will be considered as no match
at all (e.g. I{'test1'} against C{['test1.example.com',
'test1.example.org']}), except when the key fully matches an entry
(e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
@type key: str
@param key: the name to be searched
@type name_list: list
@param name_list: the list of strings against which to search the key
@type case_sensitive: boolean
@param case_sensitive: whether to provide a case-sensitive match
@rtype: None or str
@return: None if there is no match I{or} if there are multiple matches,
otherwise the element from the list which matches
"""
if key in name_list:
return key
re_flags = 0
if not case_sensitive:
re_flags |= re.IGNORECASE
key = key.upper()
name_re = re.compile(r"^%s(\..*)?$" % re.escape(key), re_flags)
names_filtered = []
string_matches = []
for name in name_list:
if name_re.match(name) is not None:
names_filtered.append(name)
if not case_sensitive and key == name.upper():
string_matches.append(name)
if len(string_matches) == 1:
return string_matches[0]
if len(names_filtered) == 1:
return names_filtered[0]
return None
def _DnsNameGlobHelper(match):
"""Helper function for L{DnsNameGlobPattern}.
Returns regular expression pattern for parts of the pattern.
"""
text = match.group(0)
if text == "*":
return "[^.]*"
elif text == "?":
return "[^.]"
else:
return re.escape(text)
def DnsNameGlobPattern(pattern):
"""Generates regular expression from DNS name globbing pattern.
A DNS name globbing pattern (e.g. C{*.site}) is converted to a regular
expression. Escape sequences or ranges (e.g. [a-z]) are not supported.
Matching always starts at the leftmost part. An asterisk (*) matches all
characters except the dot (.) separating DNS name parts. A question mark (?)
matches a single character except the dot (.).
@type pattern: string
@param pattern: DNS name globbing pattern
@rtype: string
@return: Regular expression
"""
return r"^%s(\..*)?$" % re.sub(r"\*|\?|[^*?]*", _DnsNameGlobHelper, pattern)
def FormatUnit(value, units, roman=False):
"""Formats an incoming number of MiB with the appropriate unit.
@type value: int
@param value: integer representing the value in MiB (1048576)
@type units: char
@param units: the type of formatting we should do:
- 'h' for automatic scaling
- 'm' for MiBs
- 'g' for GiBs
- 't' for TiBs
@rtype: str
@return: the formatted value (with suffix)
"""
if units not in ("m", "g", "t", "h"):
raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
suffix = ""
if units == "m" or (units == "h" and value < 1024):
if units == "h":
suffix = "M"
return "%s%s" % (compat.RomanOrRounded(value, 0, roman), suffix)
elif units == "g" or (units == "h" and value < (1024 * 1024)):
if units == "h":
suffix = "G"
return "%s%s" % (compat.RomanOrRounded(float(value) / 1024, 1, roman),
suffix)
else:
if units == "h":
suffix = "T"
return "%s%s" % (compat.RomanOrRounded(float(value) / 1024 / 1024, 1,
roman), suffix)
def ParseUnit(input_string):
"""Tries to extract number and scale from the given string.
Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
[UNIT]}. If no unit is specified, it defaults to MiB. Return value
is always an int in MiB.
"""
m = _PARSEUNIT_REGEX.match(str(input_string))
if not m:
raise errors.UnitParseError("Invalid format")
value = float(m.groups()[0])
unit = m.groups()[1]
if unit:
lcunit = unit.lower()
else:
lcunit = "m"
if lcunit in ("m", "mb", "mib"):
# Value already in MiB
pass
elif lcunit in ("g", "gb", "gib"):
value *= 1024
elif lcunit in ("t", "tb", "tib"):
value *= 1024 * 1024
else:
raise errors.UnitParseError("Unknown unit: %s" % unit)
# Make sure we round up
if int(value) < value:
value += 1
# Round up to the next multiple of 4
value = int(value)
if value % 4:
value += 4 - value % 4
return value
def ShellQuote(value):
"""Quotes shell argument according to POSIX.
@type value: str
@param value: the argument to be quoted
@rtype: str
@return: the quoted value
"""
if _SHELL_UNQUOTED_RE.match(value):
return value
else:
return "'%s'" % value.replace("'", "'\\''")
def ShellQuoteArgs(args):
"""Quotes a list of shell arguments.
@type args: list
@param args: list of arguments to be quoted
@rtype: str
@return: the quoted arguments concatenated with spaces
"""
return " ".join([ShellQuote(i) for i in args])
def ShellCombineCommands(cmdlist):
"""Out of a list of shell comands construct a single one.
"""
return ["/bin/sh", "-c", " && ".join(ShellQuoteArgs(c) for c in cmdlist)]
class ShellWriter:
"""Helper class to write scripts with indentation.
"""
INDENT_STR = " "
def __init__(self, fh, indent=True):
"""Initializes this class.
"""
self._fh = fh
self._indent_enabled = indent
self._indent = 0
def IncIndent(self):
"""Increase indentation level by 1.
"""
self._indent += 1
def DecIndent(self):
"""Decrease indentation level by 1.
"""
assert self._indent > 0
self._indent -= 1
def Write(self, txt, *args):
"""Write line to output file.
"""
assert self._indent >= 0
if args:
line = txt % args
else:
line = txt
if line and self._indent_enabled:
# Indent only if there's something on the line
self._fh.write(self._indent * self.INDENT_STR)
self._fh.write(line)
self._fh.write("\n")
def GenerateSecret(numbytes=20):
"""Generates a random secret.
This will generate a pseudo-random secret returning an hex string
(so that it can be used where an ASCII string is needed).
@param numbytes: the number of bytes which will be represented by the returned
string (defaulting to 20, the length of a SHA1 hash)
@rtype: str
@return: an hex representation of the pseudo-random sequence
"""
return os.urandom(numbytes).encode("hex")
def _MakeMacAddrRegexp(octets):
"""Builds a regular expression for verifying MAC addresses.
@type octets: integer
@param octets: How many octets to expect (1-6)
@return: Compiled regular expression
"""
assert octets > 0
assert octets <= 6
return re.compile("^%s$" % ":".join([_MAC_ADDR_OCTET_RE] * octets),
re.I)
#: Regular expression for full MAC address
_MAC_CHECK_RE = _MakeMacAddrRegexp(6)
#: Regular expression for half a MAC address
_MAC_PREFIX_CHECK_RE = _MakeMacAddrRegexp(3)
def _MacAddressCheck(check_re, mac, msg):
"""Checks a MAC address using a regular expression.
@param check_re: Compiled regular expression as returned by C{re.compile}
@type mac: string
@param mac: MAC address to be validated
@type msg: string
@param msg: Error message (%s will be replaced with MAC address)
"""
if check_re.match(mac):
return mac.lower()
raise errors.OpPrereqError(msg % mac, errors.ECODE_INVAL)
def NormalizeAndValidateMac(mac):
"""Normalizes and check if a MAC address is valid and contains six octets.
Checks whether the supplied MAC address is formally correct. Accepts
colon-separated format only. Normalize it to all lower case.
@type mac: string
@param mac: MAC address to be validated
@rtype: string
@return: Normalized and validated MAC address
@raise errors.OpPrereqError: If the MAC address isn't valid
"""
return _MacAddressCheck(_MAC_CHECK_RE, mac, "Invalid MAC address '%s'")
def NormalizeAndValidateThreeOctetMacPrefix(mac):
"""Normalizes a potential MAC address prefix (three octets).
Checks whether the supplied string is a valid MAC address prefix consisting
of three colon-separated octets. The result is normalized to all lower case.
@type mac: string
@param mac: Prefix to be validated
@rtype: string
@return: Normalized and validated prefix
@raise errors.OpPrereqError: If the MAC address prefix isn't valid
"""
return _MacAddressCheck(_MAC_PREFIX_CHECK_RE, mac,
"Invalid MAC address prefix '%s'")
def SafeEncode(text):
"""Return a 'safe' version of a source string.
This function mangles the input string and returns a version that
should be safe to display/encode as ASCII. To this end, we first
convert it to ASCII using the 'backslashreplace' encoding which
should get rid of any non-ASCII chars, and then we process it
through a loop copied from the string repr sources in the python; we
don't use string_escape anymore since that escape single quotes and
backslashes too, and that is too much; and that escaping is not
stable, i.e. string_escape(string_escape(x)) != string_escape(x).
@type text: str or unicode
@param text: input data
@rtype: str
@return: a safe version of text
"""
if isinstance(text, unicode):
# only if unicode; if str already, we handle it below
text = text.encode("ascii", "backslashreplace")
resu = ""
for char in text:
c = ord(char)
if char == "\t":
resu += r"\t"
elif char == "\n":
resu += r"\n"
elif char == "\r":
resu += r'\'r'
elif c < 32 or c >= 127: # non-printable
resu += "\\x%02x" % (c & 0xff)
else:
resu += char
return resu
def UnescapeAndSplit(text, sep=","):
r"""Split and unescape a string based on a given separator.
This function splits a string based on a separator where the
separator itself can be escape in order to be an element of the
elements. The escaping rules are (assuming coma being the
separator):
- a plain , separates the elements
- a sequence \\\\, (double backslash plus comma) is handled as a
backslash plus a separator comma
- a sequence \, (backslash plus comma) is handled as a
non-separator comma
@type text: string
@param text: the string to split
@type sep: string
@param text: the separator
@rtype: string
@return: a list of strings
"""
# we split the list by sep (with no escaping at this stage)
slist = text.split(sep)
# next, we revisit the elements and if any of them ended with an odd
# number of backslashes, then we join it with the next
rlist = []
while slist:
e1 = slist.pop(0)
if e1.endswith("\\"):
num_b = len(e1) - len(e1.rstrip("\\"))
if num_b % 2 == 1 and slist:
e2 = slist.pop(0)
# Merge the two elements and push the result back to the source list for
# revisiting. If e2 ended with backslashes, further merging may need to
# be done.
slist.insert(0, e1 + sep + e2)
continue
# here the backslashes remain (all), and will be reduced in the next step
rlist.append(e1)
# finally, replace backslash-something with something
rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
return rlist
def EscapeAndJoin(slist, sep=","):
"""Encode a list in a way parsable by UnescapeAndSplit.
@type slist: list of strings
@param slist: the strings to be encoded
@rtype: string
@return: the encoding of the list oas a string
"""
return sep.join([re.sub("\\" + sep, "\\\\" + sep,
re.sub(r"\\", r"\\\\", v)) for v in slist])
def CommaJoin(names):
"""Nicely join a set of identifiers.
@param names: set, list or tuple
@return: a string with the formatted results
"""
return ", ".join([str(val) for val in names])
def FormatTime(val, usecs=None):
"""Formats a time value.
@type val: float or None
@param val: Timestamp as returned by time.time() (seconds since Epoch,
1970-01-01 00:00:00 UTC)
@return: a string value or N/A if we don't have a valid timestamp
"""
if val is None or not isinstance(val, (int, float)):
return "N/A"
# these two codes works on Linux, but they are not guaranteed on all
# platforms
result = time.strftime("%F %T", time.localtime(val))
if usecs is not None:
result += ".%06d" % usecs
return result
def FormatSeconds(secs):
"""Formats seconds for easier reading.
@type secs: number
@param secs: Number of seconds
@rtype: string
@return: Formatted seconds (e.g. "2d 9h 19m 49s")
"""
parts = []
secs = round(secs, 0)
if secs > 0:
# Negative values would be a bit tricky
for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
(complete, secs) = divmod(secs, one)
if complete or parts:
parts.append("%d%s" % (complete, unit))
parts.append("%ds" % secs)
return " ".join(parts)
class LineSplitter:
"""Splits data chunks into lines separated by newline.
Instances provide a file-like interface.
"""
def __init__(self, line_fn, *args):
"""Initializes this class.
@type line_fn: callable
@param line_fn: Function called for each line, first parameter is line
@param args: Extra arguments for L{line_fn}
"""
assert callable(line_fn)
if args:
# Python 2.4 doesn't have functools.partial yet
self._line_fn = \
lambda line: line_fn(line, *args) # pylint: disable=W0142
else:
self._line_fn = line_fn
self._lines = collections.deque()
self._buffer = ""
def write(self, data):
parts = (self._buffer + data).split("\n")
self._buffer = parts.pop()
self._lines.extend(parts)
def flush(self):
while self._lines:
self._line_fn(self._lines.popleft().rstrip("\r\n"))
def close(self):
self.flush()
if self._buffer:
self._line_fn(self._buffer)
def IsValidShellParam(word):
"""Verifies is the given word is safe from the shell's p.o.v.
This means that we can pass this to a command via the shell and be
sure that it doesn't alter the command line and is passed as such to
the actual command.
Note that we are overly restrictive here, in order to be on the safe
side.
@type word: str
@param word: the word to check
@rtype: boolean
@return: True if the word is 'safe'
"""
return bool(_SHELLPARAM_REGEX.match(word))
def BuildShellCmd(template, *args):
"""Build a safe shell command line from the given arguments.
This function will check all arguments in the args list so that they
are valid shell parameters (i.e. they don't contain shell
metacharacters). If everything is ok, it will return the result of
template % args.
@type template: str
@param template: the string holding the template for the
string formatting
@rtype: str
@return: the expanded command line
"""
for word in args:
if not IsValidShellParam(word):
raise errors.ProgrammerError("Shell argument '%s' contains"
" invalid characters" % word)
return template % args
def FormatOrdinal(value):
"""Formats a number as an ordinal in the English language.
E.g. the number 1 becomes "1st", 22 becomes "22nd".
@type value: integer
@param value: Number
@rtype: string
"""
tens = value % 10
if value > 10 and value < 20:
suffix = "th"
elif tens == 1:
suffix = "st"
elif tens == 2:
suffix = "nd"
elif tens == 3:
suffix = "rd"
else:
suffix = "th"
return "%s%s" % (value, suffix)
def Truncate(text, length):
"""Truncate string and add ellipsis if needed.
@type text: string
@param text: Text
@type length: integer
@param length: Desired length
@rtype: string
@return: Truncated text
"""
assert length > len(_ASCII_ELLIPSIS)
# Serialize if necessary
if not isinstance(text, basestring):
text = str(text)
if len(text) <= length:
return text
else:
return text[:length - len(_ASCII_ELLIPSIS)] + _ASCII_ELLIPSIS
def FilterEmptyLinesAndComments(text):
"""Filters empty lines and comments from a line-based string.
Whitespace is also removed from the beginning and end of all lines.
@type text: string
@param text: Input string
@rtype: list
"""
return [line for line in map(lambda s: s.strip(), text.splitlines())
# Ignore empty lines and comments
if line and not line.startswith("#")]
def FormatKeyValue(data):
"""Formats a dictionary as "key=value" parameters.
The keys are sorted to have a stable order.
@type data: dict
@rtype: list of string
"""
return ["%s=%s" % (key, value) for (key, value) in sorted(data.items())]
| gpl-2.0 | -4,384,509,988,743,595,500 | 25.306802 | 80 | 0.655298 | false |
whtsky/Waterspout | waterspout/auth.py | 1 | 1374 | import functools
from tornado.web import urlparse, urlencode, HTTPError
def permission_required(f):
"""
Returns a decoration that check the current user with given function.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If the user does not have the permission, they will receive 403 page.
"""
@functools.wraps(f)
def check_permission(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.current_user
if not user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
elif f(user):
return method(self, *args, **kwargs)
raise HTTPError(403)
return wrapper
return check_permission
login_required = permission_required(lambda x: True)
| mit | -6,591,939,943,022,267,000 | 33.35 | 78 | 0.542213 | false |
IvIePhisto/Ancestration | doc/source/conf.py | 1 | 9265 | # -*- coding: utf-8 -*-
#
# Ancestration documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 28 15:10:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
autodoc_default_flags = ['members', 'undoc-members']
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ancestration'
copyright = u'2013, Michael Pohl'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
bg_color = 'rgb(251, 255, 253)'
link_color = 'rgb(100, 110, 160)'
visted_link_color = 'rgb(50, 60, 120)'
text_color = 'rgb(10, 10, 15)'
html_theme_options = {
'externalrefs': True,
'footerbgcolor': None,
'footertextcolor': text_color,
'sidebarbgcolor': 'rgb(235, 240, 247)',
#'sidebarbtncolor': None,
'sidebartextcolor': text_color,
'sidebarlinkcolor': visted_link_color,
'relbarbgcolor': visted_link_color,
'relbartextcolor': bg_color,
'relbarlinkcolor': bg_color,
'bgcolor': bg_color,
'textcolor': text_color,
'linkcolor': link_color,
'visitedlinkcolor': visted_link_color,
'headbgcolor': 'inherit',
'headtextcolor': 'inherit',
'headlinkcolor': 'inherit',
#'codebgcolor': None,
#'codetextcolor': None,
'bodyfont': "'Helvetica', Arial, sans-serif",
'headfont': "'Lucida Grande', Arial, sans-serif",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ancestrationdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Ancestration.tex', u'Ancestration Documentation',
u'Michael Pohl', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ancestration', u'Ancestration Documentation',
[u'Michael Pohl'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ancestration', u'Ancestration Documentation',
u'Michael Pohl', 'Ancestration', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'tinkerpy': ('http://pythonhosted.org/TinkerPy', None),
'python': ('http://docs.python.org/2.7', None),
}
| mit | 5,730,674,737,882,248,000 | 30.40678 | 80 | 0.698759 | false |
elainekmao/hiphoptextanalysis | lyricwiki-scraper/lyricwiki/spiders/chance_spider.py | 1 | 1152 | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
name = "chance" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Chance_The_Rapper", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Chance_The_Rapper:.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item | gpl-2.0 | -5,528,600,276,207,493,000 | 44.16 | 131 | 0.578993 | false |
sailfish-sdk/sailfish-qtcreator | tests/system/suite_general/tst_cmake_speedcrunch/test.py | 1 | 3171 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
SpeedCrunchPath = ""
BuildPath = tempDir()
def cmakeSupportsServerMode():
versionLines = filter(lambda line: "cmake version " in line,
getOutputFromCmdline(["cmake", "--version"]).splitlines())
try:
test.log("Using " + versionLines[0])
matcher = re.match("cmake version (\d+)\.(\d+)\.\d+", versionLines[0])
major = __builtin__.int(matcher.group(1))
minor = __builtin__.int(matcher.group(2))
except:
return False
if major < 3:
return False
elif major > 3:
return True
else:
return minor >= 7
def main():
if (which("cmake") == None):
test.fatal("cmake not found in PATH - needed to run this test")
return
if not neededFilePresent(SpeedCrunchPath):
return
startQC()
if not startedWithoutPluginError():
return
result = openCmakeProject(SpeedCrunchPath, BuildPath)
if not result:
test.fatal("Could not open/create cmake project - leaving test")
invokeMenuItem("File", "Exit")
return
progressBarWait(30000)
naviTreeView = "{column='0' container=':Qt Creator_Utils::NavigationTreeView' text~='%s' type='QModelIndex'}"
if cmakeSupportsServerMode():
treeFile = "projecttree_speedcrunch_server.tsv"
else:
treeFile = "projecttree_speedcrunch.tsv"
compareProjectTree(naviTreeView % "speedcrunch( \[\S+\])?", treeFile)
# Invoke a rebuild of the application
invokeMenuItem("Build", "Rebuild All")
# Wait for, and test if the build succeeded
waitForCompile(300000)
checkCompile()
checkLastBuild()
invokeMenuItem("File", "Exit")
def init():
global SpeedCrunchPath
SpeedCrunchPath = srcPath + "/creator-test-data/speedcrunch/src/CMakeLists.txt"
cleanup()
def cleanup():
global BuildPath
# Make sure the .user files are gone
cleanUpUserFiles(SpeedCrunchPath)
deleteDirIfExists(BuildPath)
| gpl-3.0 | -7,002,482,751,329,805,000 | 34.233333 | 113 | 0.653106 | false |
Honghe/weiboanalyze | initclient/initclient.py | 1 | 5036 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Feb 15, 2014
@author: honghe
客户端初始化
"""
import weibo
import time
import os
# 从配置文件导入微博APP信息
config = {}
execfile(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../conf.py"), config)
# python 3: exec(open("example.conf").read(), config)
APP_KEY = config['APP_KEY']
APP_SECRET = config['APP_SECRET']
CALLBACK_URL = config['CALLBACK_URL']
class aAPIClient(weibo.APIClient):
"""
myAPIClient类继承自weibo包中的APIClient类,对其进行了扩展。SDK中的APIClient类没有根据已授权的access_token获取授权详细信息的接口。
另外,SDK中的APIClient不能保存当前授权用户的uid,该继承类实现了这两个功能,使得用起来更加方便。
"""
def __init__(self, app_key, app_secret, redirect_uri=None, response_type='code', domain='api.weibo.com', version='2'):
super(aAPIClient, self).__init__(app_key, app_secret, redirect_uri, response_type=response_type, domain=domain, version=version)
# 保存当前授权用户的uid
self.uid = ''
def request_access_token_info(self, access_token):
"""
该接口传入参数access_token为已经授权的access_token,函数将返回该access_token的详细信息,返回Json对象,与APIClient类的request_access_token类似。
"""
r = weibo._http_post('%s%s' % (self.auth_url, 'get_token_info'), access_token = access_token)
# TODO 此处时间rtime,expires比较用处是何?
current = int(time.time())
expires = r.expire_in + current
remind_in = r.get('remind_in', None)
if remind_in:
rtime = int(remind_in) + current
if rtime < expires:
expires = rtime
return weibo.JsonDict(expires=expires, expires_in=expires, uid=r.get('uid', None))
def set_uid(self, uid):
self.uid = uid
TOKEN_FILE = 'access_token.txt'
def load_tokens(filename=TOKEN_FILE):
access_token_list = []
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
# TODO better process logic
if not os.path.exists(filepath):
print "file %s not exist." % filepath
return None
try:
f = open(filepath)
# 防止list添加空的''
access_token = f.readline().strip()
if access_token:
access_token_list.append(access_token)
print '=> Get the access_token from file %s: %s' % (TOKEN_FILE, access_token_list[0])
except IOError, e:
raise e
finally:
f.close()
return access_token_list
# TODO duplicated access_token dump
def dump_tokens(access_token, filename=TOKEN_FILE):
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
if not os.path.exists(filepath):
print "file %s not exist." % filepath
return None
try:
f = open(filepath, 'a+')
f.write(access_token)
f.write('\n')
except IOError, e:
raise e
finally:
f.close()
def get_client(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL):
# 在网站放置“使用微博账号登录”的链接,当用户点击链接后,引导用户跳转至如下地址:
client = aAPIClient(app_key, app_secret, redirect_uri)
access_token_list = load_tokens()
# 若有已存储的access_token,使用它
if access_token_list:
access_token = access_token_list[-1]
r = client.request_access_token_info(access_token)
expires_in = r.expires_in
print '=> The access_token expires_in : %f' % expires_in
# 授权access_token过期
if r.expires_in <= 0:
return None
client.set_uid(r.uid)
# 若没有已存储的access_token,调用API获取
else:
auth_url = client.get_authorize_url()
# TODO: redirect to url
print '=> auth_url : %s' % auth_url
print '=> Note! The access_token is not available, you should be authorized again. Please open the url above in your browser, then you will get a returned url with the code field. Input the code in the follow step.'
# 用户授权后,将跳转至网站回调地址,并附加参数code=abcd1234:
# 获取URL参数code:
# code = your.web.framework.request.get('code')
code = raw_input('=> input the retured code:')
r = client.request_access_token(code)
access_token = r.access_token # 新浪返回的token,类似abc123xyz456
expires_in = r.expires_in # token过期的UNIX时间:http://zh.wikipedia.org/wiki/UNIX%E6%97%B6%E9%97%B4
print '=> the new access_token is : %s' % access_token
print 'access_token expires_in: ', expires_in
dump_tokens(access_token)
client.set_access_token(access_token, expires_in)
client.set_uid(r.uid)
# 然后,可调用任意API, 用户身份审核后:
return client
| apache-2.0 | 5,662,989,894,456,770,000 | 36.233333 | 223 | 0.632274 | false |
matthewoliver/swift | test/unit/common/middleware/s3api/test_subresource.py | 1 | 17587 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.utils import json
from swift.common.middleware.s3api.s3response import AccessDenied, \
InvalidArgument, S3NotImplemented
from swift.common.middleware.s3api.subresource import User, \
AuthenticatedUsers, AllUsers, \
ACLPrivate, ACLPublicRead, ACLPublicReadWrite, ACLAuthenticatedRead, \
ACLBucketOwnerRead, ACLBucketOwnerFullControl, Owner, ACL, encode_acl, \
decode_acl, canned_acl_grantees, Grantee
from swift.common.middleware.s3api.utils import sysmeta_header
from swift.common.middleware.s3api.exception import InvalidSubresource
class TestS3ApiSubresource(unittest.TestCase):
def setUp(self):
self.s3_acl = True
self.allow_no_owner = False
def test_acl_canonical_user(self):
grantee = User('test:tester')
self.assertTrue('test:tester' in grantee)
self.assertTrue('test:tester2' not in grantee)
self.assertEqual(str(grantee), 'test:tester')
self.assertEqual(grantee.elem().find('./ID').text, 'test:tester')
def test_acl_authenticated_users(self):
grantee = AuthenticatedUsers()
self.assertTrue('test:tester' in grantee)
self.assertTrue('test:tester2' in grantee)
uri = 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
self.assertEqual(grantee.elem().find('./URI').text, uri)
def test_acl_all_users(self):
grantee = AllUsers()
self.assertTrue('test:tester' in grantee)
self.assertTrue('test:tester2' in grantee)
uri = 'http://acs.amazonaws.com/groups/global/AllUsers'
self.assertEqual(grantee.elem().find('./URI').text, uri)
def check_permission(self, acl, user_id, permission):
try:
acl.check_permission(user_id, permission)
return True
except AccessDenied:
return False
def test_acl_private(self):
acl = ACLPrivate(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_public_read(self):
acl = ACLPublicRead(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_public_read_write(self):
acl = ACLPublicReadWrite(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_authenticated_read(self):
acl = ACLAuthenticatedRead(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_bucket_owner_read(self):
acl = ACLBucketOwnerRead(
bucket_owner=Owner('test:tester2', 'test:tester2'),
object_owner=Owner('test:tester', 'test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_bucket_owner_full_control(self):
acl = ACLBucketOwnerFullControl(
bucket_owner=Owner('test:tester2', 'test:tester2'),
object_owner=Owner('test:tester', 'test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester2', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_elem(self):
acl = ACLPrivate(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
elem = acl.elem()
self.assertTrue(elem.find('./Owner') is not None)
self.assertTrue(elem.find('./AccessControlList') is not None)
grants = [e for e in elem.findall('./AccessControlList/Grant')]
self.assertEqual(len(grants), 1)
self.assertEqual(grants[0].find('./Grantee/ID').text, 'test:tester')
self.assertEqual(
grants[0].find('./Grantee/DisplayName').text, 'test:tester')
def test_acl_from_elem(self):
# check translation from element
acl = ACLPrivate(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner)
elem = acl.elem()
acl = ACL.from_elem(elem, self.s3_acl, self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_acl_from_elem_by_id_only(self):
elem = ACLPrivate(Owner(id='test:tester',
name='test:tester'),
s3_acl=self.s3_acl,
allow_no_owner=self.allow_no_owner).elem()
elem.find('./Owner').remove(elem.find('./Owner/DisplayName'))
acl = ACL.from_elem(elem, self.s3_acl, self.allow_no_owner)
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'READ_ACP'))
self.assertTrue(self.check_permission(acl, 'test:tester', 'WRITE_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'READ'))
self.assertFalse(self.check_permission(acl, 'test:tester2', 'WRITE'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'READ_ACP'))
self.assertFalse(self.check_permission(acl, 'test:tester2',
'WRITE_ACP'))
def test_decode_acl_container(self):
access_control_policy = \
{'Owner': 'test:tester',
'Grant': [{'Permission': 'FULL_CONTROL',
'Grantee': 'test:tester'}]}
headers = {sysmeta_header('container', 'acl'):
json.dumps(access_control_policy)}
acl = decode_acl('container', headers, self.allow_no_owner)
self.assertEqual(type(acl), ACL)
self.assertEqual(acl.owner.id, 'test:tester')
self.assertEqual(len(acl.grants), 1)
self.assertEqual(str(acl.grants[0].grantee), 'test:tester')
self.assertEqual(acl.grants[0].permission, 'FULL_CONTROL')
def test_decode_acl_object(self):
access_control_policy = \
{'Owner': 'test:tester',
'Grant': [{'Permission': 'FULL_CONTROL',
'Grantee': 'test:tester'}]}
headers = {sysmeta_header('object', 'acl'):
json.dumps(access_control_policy)}
acl = decode_acl('object', headers, self.allow_no_owner)
self.assertEqual(type(acl), ACL)
self.assertEqual(acl.owner.id, 'test:tester')
self.assertEqual(len(acl.grants), 1)
self.assertEqual(str(acl.grants[0].grantee), 'test:tester')
self.assertEqual(acl.grants[0].permission, 'FULL_CONTROL')
def test_decode_acl_undefined(self):
headers = {}
acl = decode_acl('container', headers, self.allow_no_owner)
self.assertEqual(type(acl), ACL)
self.assertIsNone(acl.owner.id)
self.assertEqual(len(acl.grants), 0)
def test_decode_acl_empty_list(self):
headers = {sysmeta_header('container', 'acl'): '[]'}
acl = decode_acl('container', headers, self.allow_no_owner)
self.assertEqual(type(acl), ACL)
self.assertIsNone(acl.owner.id)
self.assertEqual(len(acl.grants), 0)
def test_decode_acl_with_invalid_json(self):
headers = {sysmeta_header('container', 'acl'): '['}
self.assertRaises(
InvalidSubresource, decode_acl, 'container',
headers, self.allow_no_owner)
def test_encode_acl_container(self):
acl = ACLPrivate(Owner(id='test:tester',
name='test:tester'))
acp = encode_acl('container', acl)
header_value = json.loads(acp[sysmeta_header('container', 'acl')])
self.assertTrue('Owner' in header_value)
self.assertTrue('Grant' in header_value)
self.assertEqual('test:tester', header_value['Owner'])
self.assertEqual(len(header_value['Grant']), 1)
def test_encode_acl_object(self):
acl = ACLPrivate(Owner(id='test:tester',
name='test:tester'))
acp = encode_acl('object', acl)
header_value = json.loads(acp[sysmeta_header('object', 'acl')])
self.assertTrue('Owner' in header_value)
self.assertTrue('Grant' in header_value)
self.assertEqual('test:tester', header_value['Owner'])
self.assertEqual(len(header_value['Grant']), 1)
def test_encode_acl_many_grant(self):
headers = {}
users = []
for i in range(0, 99):
users.append('id=test:tester%s' % str(i))
users = ','.join(users)
headers['x-amz-grant-read'] = users
acl = ACL.from_headers(headers, Owner('test:tester', 'test:tester'))
acp = encode_acl('container', acl)
header_value = acp[sysmeta_header('container', 'acl')]
header_value = json.loads(header_value)
self.assertTrue('Owner' in header_value)
self.assertTrue('Grant' in header_value)
self.assertEqual('test:tester', header_value['Owner'])
self.assertEqual(len(header_value['Grant']), 99)
def test_from_headers_x_amz_acl(self):
canned_acls = ['public-read', 'public-read-write',
'authenticated-read', 'bucket-owner-read',
'bucket-owner-full-control', 'log-delivery-write']
owner = Owner('test:tester', 'test:tester')
grantee_map = canned_acl_grantees(owner)
for acl_str in canned_acls:
acl = ACL.from_headers({'x-amz-acl': acl_str}, owner)
expected = grantee_map[acl_str]
self.assertEqual(len(acl.grants), len(expected)) # sanity
# parse Grant object to permission and grantee
actual_grants = [(grant.permission, grant.grantee)
for grant in acl.grants]
assertions = zip(sorted(expected), sorted(actual_grants))
for (expected_permission, expected_grantee), \
(permission, grantee) in assertions:
self.assertEqual(expected_permission, permission)
self.assertTrue(
isinstance(grantee, expected_grantee.__class__))
if isinstance(grantee, User):
self.assertEqual(expected_grantee.id, grantee.id)
self.assertEqual(expected_grantee.display_name,
grantee.display_name)
def test_from_headers_x_amz_acl_invalid(self):
with self.assertRaises(InvalidArgument) as cm:
ACL.from_headers({'x-amz-acl': 'invalid'},
Owner('test:tester', 'test:tester'))
self.assertTrue('argument_name' in cm.exception.info)
self.assertEqual(cm.exception.info['argument_name'], 'x-amz-acl')
self.assertTrue('argument_value' in cm.exception.info)
self.assertEqual(cm.exception.info['argument_value'], 'invalid')
def test_canned_acl_grantees(self):
grantee_map = canned_acl_grantees(Owner('test:tester', 'test:tester'))
canned_acls = ['private', 'public-read', 'public-read-write',
'authenticated-read', 'bucket-owner-read',
'bucket-owner-full-control', 'log-delivery-write']
for canned_acl in canned_acls:
self.assertTrue(canned_acl in grantee_map)
self.assertEqual(len(canned_acls), len(grantee_map)) # sanity
def test_base_grantee(self):
grantee = Grantee()
func = lambda: '' in grantee
self.assertRaises(S3NotImplemented, func)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,613,494,544,707,589,000 | 46.920981 | 79 | 0.591118 | false |
KhronosGroup/COLLADA-CTS | Core/Gui/Grid/FGridCellRenderer.py | 1 | 1977 | # Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import wx.grid
class FGridCellRenderer(wx.grid.PyGridCellRenderer):
def __init__(self):
wx.grid.PyGridCellRenderer.__init__(self)
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
if (isSelected):
color = grid.GetSelectionBackground()
else:
color = attr.GetBackgroundColour()
self.ColorDraw(dc, rect, color)
def ColorDraw(self, dc, rect, color):
dc.SetBackgroundMode(wx.SOLID)
dc.SetBrush(wx.Brush(color, wx.SOLID))
dc.SetPen(wx.Pen(color, 1, wx.SOLID))
dc.DrawRectangleRect(rect)
def AddContext(self, grid, row, col, menu, position):
raise NotImplementedError, "FGridCellRenderer.AddContext()"
def Clicked(self, grid, row, col, position):
raise NotImplementedError, "FGridCellRenderer.Clicked()"
| mit | 129,467,530,635,586,690 | 58.909091 | 466 | 0.714213 | false |
dustinrohde/python-conway | test/test_cell_set.py | 1 | 2096 | import pytest
from conway.grid import Cell
from conway.grid import Point as P
from conway.grid.cell_set import Grid
from . import GameRulesTestMixin
T = Cell.ALIVE
F = Cell.DEAD
class TestGrid(GameRulesTestMixin):
GRID_CLS = Grid
def test_init_with_width_and_height(self):
grid = Grid(width=3, height=2)
assert (grid.width, grid.height) == (3, 2)
assert grid.cells == set()
with pytest.raises(ValueError):
grid = Grid(width=3)
with pytest.raises(ValueError):
grid = Grid(height=3)
with pytest.raises(ValueError):
grid = Grid(width=3, height=0)
with pytest.raises(ValueError):
grid = Grid(width=0, height=3)
with pytest.raises(ValueError):
grid = Grid(width=0, height=0)
with pytest.raises(ValueError):
grid = Grid()
def test_init_with_cells(self):
cells = {P(0, 0), P(1, 1), P(2, 1)}
grid = Grid(cells=cells)
assert (grid.width, grid.height) == (3, 2)
assert grid.cells == cells
cells = {P(1, 1), P(1, 2)}
grid = Grid(cells=cells.copy())
assert (grid.width, grid.height) == (2, 3)
assert grid.cells == cells
grid = Grid(cells=cells.copy(), width=2, height=3)
assert (grid.width, grid.height) == (2, 3)
assert grid.cells == cells
grid = Grid(cells=cells.copy(), width=4)
assert (grid.width, grid.height) == (4, 3)
assert grid.cells == cells
grid = Grid(cells=cells.copy(), height=4)
assert (grid.width, grid.height) == (2, 4)
assert grid.cells == cells
with pytest.raises(ValueError):
grid = Grid(cells=cells.copy(), height=2)
with pytest.raises(ValueError):
grid = Grid(cells=cells.copy(), width=1)
with pytest.raises(ValueError):
grid = Grid(cells=set())
with pytest.raises(ValueError):
grid = Grid(cells=set(), width=2)
with pytest.raises(ValueError):
grid = Grid(cells=set(), height=2)
| mit | 1,718,295,299,079,489,500 | 29.823529 | 58 | 0.57395 | false |
deepmind/sonnet | sonnet/src/build.py | 1 | 2561 | # Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility function to build Sonnet modules."""
from typing import Any, Callable
import tensorflow as tf
import tree
def _int_or_none(o):
return isinstance(o, (int, type(None)))
def _promote_shapes(o):
"""Promotes lists of ints/Nones to :tf:`TensorSpec` instances."""
if isinstance(o, (list, tuple)) and all(_int_or_none(e) for e in o):
return tf.TensorSpec(o)
return o
def _maybe_tensor_spec(shape, dtype):
return tf.TensorSpec(shape, dtype) if dtype is not None else None
# TODO(tomhennigan) Use TensorNest in types here.
def build(
f: Callable[..., Any],
*args,
**kwargs
):
r"""Builds a module by creating all parameters but not computing any output.
>>> mod = snt.nets.MLP([1000, 10])
>>> snt.build(mod, [None, 28 * 28])
TensorSpec(shape=(None, 10), dtype=tf.float32, name=None)
>>> mod.variables
(<tf.Variable 'mlp/linear_0/b:0' shape=(1000,) ...>,
<tf.Variable 'mlp/linear_0/w:0' shape=(784, 1000) ...>,
<tf.Variable 'mlp/linear_1/b:0' shape=(10,) ...>,
<tf.Variable 'mlp/linear_1/w:0' shape=(1000, 10) ...>)
Args:
f: A function or callable :class:`Module` that will create variables.
*args: Positional arguments to supply to ``f``. Note that positional
arguments that are sequences of None/ints are converted to
:tf:`TensorSpec` instances.
**kwargs: Keyword arguments to pass to the module.
Returns:
The output of ``f`` with any :tf:`Tensor`\ s replaced by :tf:`TensorSpec`.
"""
f = tf.function(f)
args = map(_promote_shapes, args)
# NOTE: We use a concrete function to ensure that weights are created and
# initialized, but other stateful ops (e.g. updating weights) are not.
cf = f.get_concrete_function(*args, **kwargs)
return tree.map_structure(_maybe_tensor_spec, cf.output_shapes,
cf.output_dtypes)
| apache-2.0 | 7,186,900,872,818,753,000 | 35.070423 | 78 | 0.655994 | false |
bnewbold/diffoscope | tests/comparators/test_rpm.py | 1 | 3005 | # -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <[email protected]>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <http://www.gnu.org/licenses/>.
import os.path
import pytest
from diffoscope.comparators import specialize
from diffoscope.comparators.binary import FilesystemFile, NonExistingFile
from diffoscope.comparators.rpm import RpmFile
from diffoscope.config import Config
from conftest import tool_missing
TEST_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.rpm')
TEST_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.rpm')
@pytest.fixture
def rpm1():
return specialize(FilesystemFile(TEST_FILE1_PATH))
@pytest.fixture
def rpm2():
return specialize(FilesystemFile(TEST_FILE2_PATH))
def test_identification(rpm1):
assert isinstance(rpm1, RpmFile)
def test_no_differences(rpm1):
difference = rpm1.compare(rpm1)
assert difference is None
@pytest.fixture
def differences(rpm1, rpm2):
return rpm1.compare(rpm2).details
@pytest.mark.skipif(tool_missing('rpm2cpio'), reason='missing rpm2cpio')
def test_header(differences):
assert differences[0].source1 == 'header'
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/rpm_header_expected_diff')).read()
assert differences[0].unified_diff == expected_diff
@pytest.mark.skipif(tool_missing('rpm2cpio'), reason='missing rpm2cpio')
def test_listing(differences):
assert differences[1].source1 == 'content'
assert differences[1].details[0].source1 == 'file list'
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/rpm_listing_expected_diff')).read()
assert differences[1].details[0].unified_diff == expected_diff
@pytest.mark.skipif(tool_missing('rpm2cpio'), reason='missing rpm2cpio')
def test_content(differences):
assert differences[1].source1 == 'content'
assert differences[1].details[1].source1 == './dir/text'
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/text_ascii_expected_diff')).read()
assert differences[1].details[1].unified_diff == expected_diff
def test_compare_non_existing(monkeypatch, rpm1):
monkeypatch.setattr(Config.general, 'new_file', True)
difference = rpm1.compare(NonExistingFile('/nonexisting', rpm1))
assert difference.source2 == '/nonexisting'
assert difference.details[-1].source2 == '/dev/null'
| gpl-3.0 | -2,231,490,363,198,703,600 | 39.567568 | 109 | 0.738175 | false |
syscoin/syscoin2 | test/functional/p2p_disconnect_ban.py | 1 | 5575 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
wait_until,
)
class DisconnectBanTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.log.info("Connect nodes both way")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban(subnet="127.0.0.1", command="add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
self.log.info("Connect nodes both way")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes(self.nodes[0], 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
| mit | -4,048,927,991,779,465,700 | 47.903509 | 155 | 0.650762 | false |
Zatsugami/python-egghead-crawler | egghead/spiders/lessons.py | 1 | 1244 | # -*- coding: utf-8 -*-
import scrapy
from egghead.items import LessonVideo
from egghead.spiders import LoginSpider
from urlparse import urlparse, urljoin
def lesson_filename(url):
file_name = urlparse(url).path.split('/')[-1]
return '{}.mp4'.format(file_name)
def lesson_urls(response):
return response.css('#lesson-list .lesson-row .title a::attr(href)').extract()
class LessonsSpider(LoginSpider):
name = 'lessons'
allowed_domains = ['egghead.io']
on_success_auth = 'https://egghead.io/technologies'
def with_session(self, response):
urls = lesson_urls(response)
for url in urls:
yield scrapy.Request(url, callback=self.parse_lesson)
next_page_url = response.css('.pagination .next a::attr(href)').extract_first()
if next_page_url:
next_url = urljoin(response.url, next_page_url)
yield scrapy.Request(next_url, callback=self.with_session)
def parse_lesson(self, response):
file_url = response.css('.wistia_embed meta[itemprop="contentURL"]::attr(content)').extract_first()
file_name = lesson_filename(response.url)
if file_url:
yield LessonVideo(file_urls=['{}/{}'.format(file_url, file_name)])
| gpl-2.0 | -1,784,667,792,561,143,300 | 32.621622 | 107 | 0.659164 | false |
PableraShow/Learn-to-program-with-Python-guide | Types & Operations/Integers-&-Floating-Point-Numbers/Constants.py | 1 | 1338 | """
Integers can be given in any of four bases:
- Base ten (decimal — what people are most used to): consists of a sequence of digits, not starting with 0
- Base two (binary): consists of 0b followed by 0's and 1's
- Base eight (octal): consists of a 0 followed by digits in the range 0 to 7
- Base sixteen (hexadecimal): consists of 0x followed by digits in the range 0 to 7 or letters in the range A to F (in upper or lower case)
- Integers can be preceded by a positive or negative sign. Any integer followed by an L or whose absolute value is greater than some minimum is consider a long integer.
Floating-point numbers consist of a series of digits, a decimal point, and another series of digits. They can be preceded by an optional positive or negative sign. They can be followed by an optional exponent — the letter e, an optional positive or negative sign, and a series of digits.
"""
print 12
# 12
print -12.0
# -12
print 0b1001
# 9
print 021
# 17
print -021
# -17
print 0x3A8
# 936
print 12L
# 12
print 12345678901234567890123456789012345678901234567890
# 12345678901234567890123456789012345678901234567890
print 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFL
# 340282366920938463463374607431768211455
print +12.12345
# 12.12345
print -12.12345
# -12.12345
print 1.2e3
# 1200
print 1.2e-3
# 0.0012 | mit | -5,535,984,399,341,046,000 | 34.108108 | 287 | 0.752624 | false |
apiaryio/black-belt | blackbelt/deployment.py | 1 | 1417 | from subprocess import check_call, check_output
from blackbelt.handle_github import get_current_branch, run_grunt_in_parallel
from blackbelt.messages import post_message
def deploy_staging():
branch_name = get_current_branch()
post_message("Deploying branch %s to staging" % branch_name, "#deploy-queue")
check_call(['grunt', 'deploy', '--app=apiary-staging', '--force', "--branch=%s" % branch_name])
def deploy_production():
post_message("Deploying to production", "#deploy-queue")
slug_creaction_return_code = run_grunt_in_parallel((
['grunt', 'create-slug'],
['grunt', 'create-slug', '--app=apiary-staging-pre'],
['grunt', 'create-slug', '--app=apiary-staging-qa'],
))
if slug_creaction_return_code != 0:
post_message("Slug creation failed, deploy stopped.", "#deploy-queue")
raise ValueError("One of the slug creations failed. Check output few lines above.")
check_output(['grunt', 'deploy-slug', '--app=apiary-staging-qa'])
check_output(['grunt', 'deploy-slug', '--app=apiary-staging-pre'])
check_output(['grunt', 'deploy-slug'])
def rollback_production():
post_message("Rollback production for all environments (prod, qa, pre)", "#deploy-queue")
check_call(['grunt', 'rollback', '--app=apiary-staging-qa'])
check_call(['grunt', 'rollback', '--app=apiary-staging-pre'])
check_call(['grunt', 'rollback'])
| mit | 1,149,457,579,899,781,200 | 37.297297 | 99 | 0.66055 | false |
richard-willowit/odoo | odoo/tools/xml_utils.py | 2 | 1848 | # -*- coding: utf-8 -*-
from lxml import etree
from odoo.tools.misc import file_open
def check_with_xsd(tree_or_str, xsd_path):
if not isinstance(tree_or_str, etree._Element):
tree_or_str = etree.fromstring(tree_or_str)
xml_schema_doc = etree.parse(file_open(xsd_path))
xsd_schema = etree.XMLSchema(xml_schema_doc)
try:
xsd_schema.assertValid(tree_or_str)
except etree.DocumentInvalid as xml_errors:
#import UserError only here to avoid circular import statements with tools.func being imported in exceptions.py
from odoo.exceptions import UserError
raise UserError('\n'.join(str(e) for e in xml_errors.error_log))
def create_xml_node_chain(first_parent_node, nodes_list, last_node_value=None):
""" Utility function for generating XML files nodes. Generates as a hierarchical
chain of nodes (each new node being the son of the previous one) based on the tags
contained in `nodes_list`, under the given node `first_parent_node`.
It will also set the value of the last of these nodes to `last_node_value` if it is
specified. This function returns the list of created nodes.
"""
res = []
current_node = first_parent_node
for tag in nodes_list:
current_node = etree.SubElement(current_node, tag)
res.append(current_node)
if last_node_value is not None:
current_node.text = last_node_value
return res
def create_xml_node(parent_node, node_name, node_value=None):
""" Utility function for managing XML. It creates a new node with the specified
`node_name` as a child of given `parent_node` and assigns it `node_value` as value.
:param parent_node: valid etree Element
:param node_name: string
:param node_value: string
"""
return create_xml_node_chain(parent_node, [node_name], node_value)[0]
| gpl-3.0 | -1,629,396,402,940,386,600 | 43 | 119 | 0.697511 | false |
sanjeevtripurari/hue | desktop/core/src/desktop/middleware.py | 1 | 25658 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import inspect
import json
import logging
import os.path
import re
import tempfile
import time
import kerberos
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, BACKEND_SESSION_KEY, authenticate, load_backend, login
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core import exceptions, urlresolvers
import django.db
from django.http import HttpResponseNotAllowed
from django.core.urlresolvers import resolve
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.utils.http import urlquote, is_safe_url
from django.utils.encoding import iri_to_uri
import django.views.static
import desktop.views
import desktop.conf
from desktop.context_processors import get_app_name
from desktop.lib import apputil, i18n
from desktop.lib.django_util import render, render_json, is_jframe_request, get_username_re_rule, get_groupname_re_rule
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_log, log_page_hit
from desktop import appmanager
from desktop import metrics
from hadoop import cluster
from desktop.log import get_audit_logger
LOG = logging.getLogger(__name__)
MIDDLEWARE_HEADER = "X-Hue-Middleware-Response"
# Views inside Django that don't require login
# (see LoginAndPermissionMiddleware)
DJANGO_VIEW_AUTH_WHITELIST = [
django.views.static.serve,
desktop.views.is_alive,
]
class AjaxMiddleware(object):
"""
Middleware that augments request to set request.ajax
for either is_ajax() (looks at HTTP headers) or ?format=json
GET parameters.
"""
def process_request(self, request):
request.ajax = request.is_ajax() or request.REQUEST.get("format", "") == "json"
return None
class ExceptionMiddleware(object):
"""
If exceptions know how to render themselves, use that.
"""
def process_exception(self, request, exception):
import traceback
tb = traceback.format_exc()
logging.info("Processing exception: %s: %s" % (i18n.smart_unicode(exception),
i18n.smart_unicode(tb)))
if isinstance(exception, PopupException):
return exception.response(request)
if isinstance(exception, StructuredException):
if request.ajax:
response = render_json(exception.response_data)
response[MIDDLEWARE_HEADER] = 'EXCEPTION'
response.status_code = getattr(exception, 'error_code', 500)
return response
else:
response = render("error.mako", request,
dict(error=exception.response_data.get("message")))
response.status_code = getattr(exception, 'error_code', 500)
return response
return None
class ClusterMiddleware(object):
"""
Manages setting request.fs and request.jt
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Sets request.fs and request.jt on every request to point to the
configured filesystem.
"""
request.fs_ref = request.REQUEST.get('fs', view_kwargs.get('fs', 'default'))
if "fs" in view_kwargs:
del view_kwargs["fs"]
try:
request.fs = cluster.get_hdfs(request.fs_ref)
except KeyError:
raise KeyError(_('Cannot find HDFS called "%(fs_ref)s".') % {'fs_ref': request.fs_ref})
if request.user.is_authenticated():
if request.fs is not None:
request.fs.setuser(request.user.username)
request.jt = cluster.get_default_mrcluster() # Deprecated, only there for MR1
if request.jt is not None:
request.jt.setuser(request.user.username)
else:
request.jt = None
class NotificationMiddleware(object):
"""
Manages setting request.info and request.error
"""
def process_view(self, request, view_func, view_args, view_kwargs):
def message(title, detail=None):
if detail is None:
detail = ''
else:
detail = '<br/>%s' % detail
return '%s %s' % (title, detail)
def info(title, detail=None):
messages.info(request, message(title, detail))
def error(title, detail=None):
messages.error(request, message(title, detail))
def warn(title, detail=None):
messages.warning(request, message(title, detail))
request.info = info
request.error = error
request.warn = warn
class AppSpecificMiddleware(object):
@classmethod
def augment_request_with_app(cls, request, view_func):
""" Stuff the app into the request for use in later-stage middleware """
if not hasattr(request, "_desktop_app"):
module = inspect.getmodule(view_func)
request._desktop_app = apputil.get_app_for_module(module)
if not request._desktop_app and not module.__name__.startswith('django.'):
logging.debug("no app for view func: %s in %s" % (view_func, module))
def __init__(self):
self.middlewares_by_app = {}
for app in appmanager.DESKTOP_APPS:
self.middlewares_by_app[app.name] = self._load_app_middleware(app)
def _get_middlewares(self, app, type):
return self.middlewares_by_app.get(app, {}).get(type, [])
def process_view(self, request, view_func, view_args, view_kwargs):
"""View middleware"""
self.augment_request_with_app(request, view_func)
if not request._desktop_app:
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'view'):
ret = middleware(request, view_func, view_args, view_kwargs)
if ret: return ret # short circuit
return ret
def process_response(self, request, response):
"""Response middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for request.")
return response
for middleware in reversed(self._get_middlewares(request._desktop_app, 'response')):
response = middleware(request, response)
return response
def process_exception(self, request, exception):
"""Exception middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for exception.")
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'exception'):
ret = middleware(request, exception)
if ret: return ret # short circuit
return ret
def _load_app_middleware(cls, app):
app_settings = app.settings
if not app_settings:
return
mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', [])
result = {'view': [], 'response': [], 'exception': []}
for middleware_path in mw_classes:
# This code brutally lifted from django.core.handlers
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, _('%(module)s isn\'t a middleware module.') % {'module': middleware_path}
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = __import__(mw_module, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, _('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e}
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, _('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class':mw_classname}
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
# End brutal code lift
# We need to make sure we don't have a process_request function because we don't know what
# application will handle the request at the point process_request is called
if hasattr(mw_instance, 'process_request'):
raise exceptions.ImproperlyConfigured, \
_('AppSpecificMiddleware module "%(module)s" has a process_request function' + \
' which is impossible.') % {'module': middleware_path}
if hasattr(mw_instance, 'process_view'):
result['view'].append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
result['response'].insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
result['exception'].insert(0, mw_instance.process_exception)
return result
class LoginAndPermissionMiddleware(object):
"""
Middleware that forces all views (except those that opt out) through authentication.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
We also perform access logging in ``process_view()`` since we have the view function,
which tells us the log level. The downside is that we don't have the status code,
which isn't useful for status logging anyways.
"""
access_log_level = getattr(view_func, 'access_log_level', None)
# First, skip views not requiring login
# If the view has "opted out" of login required, skip
if hasattr(view_func, "login_notrequired"):
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# There are certain django views which are also opt-out, but
# it would be evil to go add attributes to them
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# If user is logged in, check that he has permissions to access the
# app.
if request.user.is_active and request.user.is_authenticated():
AppSpecificMiddleware.augment_request_with_app(request, view_func)
# Until we get Django 1.3 and resolve returning the URL name, we just do a match of the name of the view
try:
access_view = 'access_view:%s:%s' % (request._desktop_app, resolve(request.path)[0].__name__)
except Exception, e:
access_log(request, 'error checking view perm: %s', e, level=access_log_level)
access_view =''
# Accessing an app can access an underlying other app.
# e.g. impala or spark uses code from beeswax and so accessing impala shows up as beeswax here.
# Here we trust the URL to be the real app we need to check the perms.
app_accessed = request._desktop_app
ui_app_accessed = get_app_name(request)
if app_accessed != ui_app_accessed and ui_app_accessed not in ('logs', 'accounts', 'login'):
app_accessed = ui_app_accessed
if app_accessed and \
app_accessed not in ("desktop", "home", "about") and \
not (request.user.has_hue_permission(action="access", app=app_accessed) or
request.user.has_hue_permission(action=access_view, app=app_accessed)):
access_log(request, 'permission denied', level=access_log_level)
return PopupException(
_("You do not have permission to access the %(app_name)s application.") % {'app_name': app_accessed.capitalize()}, error_code=401).response(request)
else:
log_page_hit(request, view_func, level=access_log_level)
return None
logging.info("Redirecting to login page: %s", request.get_full_path())
access_log(request, 'login redirection', level=access_log_level)
if request.ajax:
# Send back a magic header which causes Hue.Request to interpose itself
# in the ajax request and make the user login before resubmitting the
# request.
response = HttpResponse("/* login required */", content_type="text/javascript")
response[MIDDLEWARE_HEADER] = 'LOGIN_REQUIRED'
return response
else:
return HttpResponseRedirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, urlquote(request.get_full_path())))
class JsonMessage(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __str__(self):
return json.dumps(self.kwargs)
class AuditLoggingMiddleware(object):
username_re = get_username_re_rule()
groupname_re = get_groupname_re_rule()
operations = {
'/accounts/login': 'USER_LOGIN',
'/accounts/logout': 'USER_LOGOUT',
'/useradmin/users/add_ldap_users': 'ADD_LDAP_USERS',
'/useradmin/users/add_ldap_groups': 'ADD_LDAP_GROUPS',
'/useradmin/users/sync_ldap_users_groups': 'SYNC_LDAP_USERS_GROUPS',
'/useradmin/users/new': 'CREATE_USER',
'/useradmin/groups/new': 'CREATE_GROUP',
'/useradmin/users/delete': 'DELETE_USER',
'/useradmin/groups/delete': 'DELETE_GROUP'
}
operation_patterns = {
'/useradmin/permissions/edit/(?P<app>.*)/(?P<priv>.*)': 'EDIT_PERMISSION',
'/useradmin/users/edit/(?P<username>%s)' % (username_re,): 'EDIT_USER',
'/useradmin/groups/edit/(?P<name>%s)' % (groupname_re,): 'EDIT_GROUP'
}
def __init__(self):
from desktop.conf import AUDIT_EVENT_LOG_DIR, SERVER_USER
self.impersonator = SERVER_USER.get()
if not AUDIT_EVENT_LOG_DIR.get():
LOG.info('Unloading AuditLoggingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_view(self, request, view_func, view_args, view_kwargs):
try:
operation = self._get_operation(request.path)
if operation == 'USER_LOGOUT':
self._log_message(operation, request)
except Exception, e:
LOG.error('Could not audit the request: %s' % e)
return None
def process_response(self, request, response):
response['audited'] = False
try:
operation = self._get_operation(request.path)
if request.method == 'POST' and operation and operation != 'USER_LOGOUT':
self._log_message(operation, request, response)
response['audited'] = True
except Exception, e:
LOG.error('Could not audit the request: %s' % e)
return response
def _log_message(self, operation, request, response=None):
audit_logger = get_audit_logger()
allowed = True
status = 200
if response is not None:
allowed = response.status_code != 401
status = response.status_code
audit_logger.debug(JsonMessage(**{
'username': self._get_username(request),
'impersonator': self.impersonator,
'ipAddress': self._get_client_ip(request),
'operation': operation,
'eventTime': self._milliseconds_since_epoch(),
'allowed': allowed,
'status': status,
'service': get_app_name(request),
'url': request.path
}))
def _get_client_ip(self, request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
x_forwarded_for = x_forwarded_for.split(',')[0]
return request.META.get('HTTP_CLIENT_IP') or x_forwarded_for or request.META.get('REMOTE_ADDR')
def _get_username(self, request):
if hasattr(request, 'user') and not request.user.is_anonymous():
return request.user.get_username()
else:
return 'anonymous'
def _milliseconds_since_epoch(self):
return int(time.time() * 1000)
def _get_operation(self, path):
url = path.rstrip('/')
if url in AuditLoggingMiddleware.operations:
return AuditLoggingMiddleware.operations[url]
else:
for regex, operation in AuditLoggingMiddleware.operation_patterns.items():
pattern = re.compile(regex)
if pattern.match(url):
return operation
return None
try:
import tidylib
_has_tidylib = True
except Exception, ex:
# The exception type is not ImportError. It's actually an OSError.
logging.warn("Failed to import tidylib (for debugging). Is libtidy installed?")
_has_tidylib = False
class HtmlValidationMiddleware(object):
"""
If configured, validate output html for every response.
"""
def __init__(self):
self._logger = logging.getLogger('HtmlValidationMiddleware')
if not _has_tidylib:
logging.error("HtmlValidationMiddleware not activatived: "
"Failed to import tidylib.")
return
# Things that we don't care about
self._to_ignore = (
re.compile('- Warning: <.*> proprietary attribute "data-'),
re.compile('- Warning: trimming empty'),
re.compile('- Info:'),
)
# Find the directory to write tidy html output
try:
self._outdir = os.path.join(tempfile.gettempdir(), 'hue_html_validation')
if not os.path.isdir(self._outdir):
os.mkdir(self._outdir, 0755)
except Exception, ex:
self._logger.exception('Failed to get temp directory: %s', (ex,))
self._outdir = tempfile.mkdtemp(prefix='hue_html_validation-')
# Options to pass to libtidy. See
# http://tidy.sourceforge.net/docs/quickref.html
self._options = {
'show-warnings': 1,
'output-html': 0,
'output-xhtml': 1,
'char-encoding': 'utf8',
'output-encoding': 'utf8',
'indent': 1,
'wrap': 0,
}
def process_response(self, request, response):
if not _has_tidylib or not self._is_html(request, response):
return response
html, errors = tidylib.tidy_document(response.content,
self._options,
keep_doc=True)
if not errors:
return response
# Filter out what we care about
err_list = errors.rstrip().split('\n')
err_list = self._filter_warnings(err_list)
if not err_list:
return response
try:
fn = urlresolvers.resolve(request.path)[0]
fn_name = '%s.%s' % (fn.__module__, fn.__name__)
except:
LOG.exception('failed to resolve url')
fn_name = '<unresolved_url>'
# Write the two versions of html out for offline debugging
filename = os.path.join(self._outdir, fn_name)
result = "HTML tidy result: %s [%s]:" \
"\n\t%s" \
"\nPlease see %s.orig %s.tidy\n-------" % \
(request.path, fn_name, '\n\t'.join(err_list), filename, filename)
file(filename + '.orig', 'w').write(i18n.smart_str(response.content))
file(filename + '.tidy', 'w').write(i18n.smart_str(html))
file(filename + '.info', 'w').write(i18n.smart_str(result))
self._logger.error(result)
return response
def _filter_warnings(self, err_list):
"""A hacky way to filter out things that we don't care about."""
res = [ ]
for err in err_list:
for ignore in self._to_ignore:
if ignore.search(err):
break
else:
res.append(err)
return res
def _is_html(self, request, response):
return not request.is_ajax() and \
'html' in response['Content-Type'] and \
200 <= response.status_code < 300
class SpnegoMiddleware(object):
"""
Based on the WSGI SPNEGO middlware class posted here:
http://code.activestate.com/recipes/576992/
"""
def __init__(self):
if not 'desktop.auth.backend.SpnegoDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading SpnegoMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if 'GSS-String' in request.META:
response['WWW-Authenticate'] = request.META['GSS-String']
elif 'Return-401' in request.META:
response = HttpResponse("401 Unauthorized", content_type="text/plain",
status=401)
response['WWW-Authenticate'] = 'Negotiate'
response.status = 401
return response
def process_request(self, request):
"""
The process_request() method needs to communicate some state to the
process_response() method. The two options for this are to return an
HttpResponse object or to modify the META headers in the request object. In
order to ensure that all of the middleware is properly invoked, this code
currently uses the later approach. The following headers are currently used:
GSS-String:
This means that GSS authentication was successful and that we need to pass
this value for the WWW-Authenticate header in the response.
Return-401:
This means that the SPNEGO backend is in use, but we didn't get an
AUTHORIZATION header from the client. The way that the protocol works
(http://tools.ietf.org/html/rfc4559) is by having the first response to an
un-authenticated request be a 401 with the WWW-Authenticate header set to
Negotiate. This will cause the browser to re-try the request with the
AUTHORIZATION header set.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if 'HTTP_AUTHORIZATION' in request.META:
type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if type == 'Negotiate':
try:
result, context = kerberos.authGSSServerInit('HTTP')
if result != 1:
return
gssstring=''
r=kerberos.authGSSServerStep(context,authstr)
if r == 1:
gssstring=kerberos.authGSSServerResponse(context)
request.META['GSS-String'] = 'Negotiate %s' % gssstring
else:
kerberos.authGSSServerClean(context)
return
username = kerberos.authGSSServerUserName(context)
kerberos.authGSSServerClean(context)
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
user = authenticate(username=username)
if user:
request.user = user
login(request, user)
return
except:
LOG.exception('Unexpected error when authenticating against KDC')
return
else:
request.META['Return-401'] = ''
return
else:
if not request.user.is_authenticated():
request.META['Return-401'] = ''
return
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError:
pass
return username
class HueRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware to delegate authentication to a proxy server. The proxy server
will set an HTTP header (defaults to Remote-User) with the name of the
authenticated user. This class extends the RemoteUserMiddleware class
built into Django with the ability to configure the HTTP header and to
unload the middleware if the RemoteUserDjangoBackend is not currently
in use.
"""
def __init__(self):
if not 'desktop.auth.backend.RemoteUserDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading HueRemoteUserMiddleware')
raise exceptions.MiddlewareNotUsed
self.header = desktop.conf.AUTH.REMOTE_USER_HEADER.get()
class EnsureSafeMethodMiddleware(object):
"""
Middleware to white list configured HTTP request methods.
"""
def process_request(self, request):
if request.method not in desktop.conf.HTTP_ALLOWED_METHODS.get():
return HttpResponseNotAllowed(desktop.conf.HTTP_ALLOWED_METHODS.get())
class EnsureSafeRedirectURLMiddleware(object):
"""
Middleware to white list configured redirect URLs.
"""
def process_response(self, request, response):
if response.status_code in (301, 302, 303, 305, 307, 308) and response.get('Location'):
redirection_patterns = desktop.conf.REDIRECT_WHITELIST.get()
location = response['Location']
if any(regexp.match(location) for regexp in redirection_patterns):
return response
if is_safe_url(location, request.get_host()):
return response
response = render("error.mako", request, dict(error=_('Redirect to %s is not allowed.') % response['Location']))
response.status_code = 403
return response
else:
return response
class MetricsMiddleware(object):
"""
Middleware to track the number of active requests.
"""
def process_request(self, request):
self._response_timer = metrics.response_time.time()
metrics.active_requests.inc()
def process_exception(self, request, exception):
self._response_timer.stop()
metrics.request_exceptions.inc()
def process_response(self, request, response):
self._response_timer.stop()
metrics.active_requests.dec()
return response
| apache-2.0 | 6,306,637,644,091,341,000 | 34.735376 | 167 | 0.671292 | false |
Toofifty/Oracle2 | oracle/modules/lottery.py | 1 | 5609 | from threading import Thread
import time, traceback, random
from format import BOLD, RESET, CYAN, GREEN
lotto = None
def _init(b):
print '\t%s loaded' % __name__
def lottery(l, b, i):
"""!parent-command
!c new
!d Create a new lottery (cost: 10 points)
!a [duration] [min-bet] [max-bet]
!r user
!c info
!d Get info about the current lottery
!r user
!c bet
!d Place a bet in the current lottery
!a <amount>
!r user
"""
def new(l, b, i):
if lotto is not None:
b.l_say('There\'s already a lottery running.', i, 0)
return True
if i.user.get_points() < 10:
b.l_say('You don\'t have enough points to begin a lottery.', i, 0)
return True
duration = 600
min_bet = 10
max_bet = 200
if len(i.args) > 1:
try:
if 'm' in i.args[1]:
duration = 60 * int(i.args[1].replace('m', ''))
else:
duration = int(i.args[1])
except:
traceback.print_exc()
b.l_say('Please only use digits or \'m\' for the duration.', i, 0)
return True
if len(i.args) > 2:
try:
min_bet = max(min_bet, int(i.args[2]))
except:
b.l_say('The minimum bet must be a number.', i, 0)
return True
if len(i.args) > 3:
try:
max_bet = max(min_bet, int(i.args[3]))
except:
b.l_say('The maximum bet must be a number.', i, 0)
return True
global lotto
lotto = Lotto(b, duration, min_bet, max_bet)
lotto.start()
i.user.add_points(-10)
b.l_say('You have %d points left.' % i.user.get_points(), i, 0)
def info(l, b, i):
global lotto
if lotto is None:
b.l_say('There is no lottery at the moment.', i, 0)
return True
m, s = divmod(lotto.time_left, 60)
b.l_say(
'%s Time left: %02d:%02d, Prize pool: %d, Bet range: %d - %d' % (
lotto.format, m, s, lotto.get_pool(), lotto.min_bet,
lotto.max_bet
), i, 0
)
def bet(l, b, i):
global lotto
if lotto is None:
b.l_say('There is no lottery at the moment.', i, 0)
return True
if len(i.args) > 1:
try:
global lotto
bet = lotto.add_bet(i.nick, int(i.args[1]))
if not bet:
b.l_say('You don\'t have enough points.', i, 0)
return True
i.user.add_points(-1 * bet)
b.l_say('You have %d points left.' % i.user.get_points(), i, 0)
except:
traceback.print_exc()
b.l_say('The amount must be a number.', i, 0)
return True
b.l_say('You need to specify a bet amount.', i, 0)
try:
exec ('%s(l, b, i)' % i.args[0]) in globals(), locals()
except Exception, e:
traceback.print_exc()
b.l_say('Usage: %s.lottery new|bet|info' % CYAN, i, 0)
return True
class Lotto(Thread):
def __init__(self, bot, duration, min_bet, max_bet):
Thread.__init__(self)
self.min_bet = min_bet
self.max_bet = max_bet
self.time_left = duration
self.bets = {}
self.bot = bot
self.dead = False
self.format = '[%sLottery%s]' % (CYAN, RESET)
print '\t\tNew %s started' % __name__
m, s = divmod(duration, 60)
self.bot.say(
'%s New lottery started! Will run for %02d:%02d. Bet range: %d - %d'\
% (self.format, m, s, self.min_bet, self.max_bet),
'all'
)
def add_bet(self, nick, bet):
if bet < self.min_bet:
return False
elif bet > self.max_bet:
bet = self.max_bet
if nick in self.bets:
return False
self.bets[nick] = bet
pool = self.get_pool()
self.bot.say(
'%s %s bet %dp. Pool is now %dp.' % (self.format, nick, bet, pool),
'all'
)
return bet
def get_pool(self):
pool = 0
for k, v in self.bets.iteritems():
pool += v
return pool
def find_winner(self, num):
for k, v in self.bets.iteritems():
if num < v:
return k
else:
num -= v
return None
def kill():
self.dead = True
def end(self):
pool = self.get_pool()
winning_num = random.randint(1, pool)
winner = self.find_winner(winning_num)
if winner is None:
return False
self.bot.say(
'%s %s%s%s is the lucky winner of this round and receives %s%d%s points!' % \
(self.format, BOLD, winner, RESET, GREEN, pool, RESET),
'all'
)
win_user = self.bot.get_user(winner)
win_user.add_points(pool)
self.bot.msg(
win_user, 'You now have %s%d%s points.' % (BOLD,
win_user.get_points(), RESET)
)
self.kill()
def run(self):
while not self.dead:
while self.time_left > 0 and not self.dead:
self.time_left -= 2
time.sleep(2)
if self.dead:
return
self.end()
| mit | 8,233,873,189,446,941,000 | 28.062176 | 89 | 0.463541 | false |
fedora-conary/conary-policy | policy/normalize.py | 1 | 38175 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import stat
import tempfile
import filecmp
import shutil
from conary.lib import magic, util
from conary.build import policy, recipe
from conary.local import database
def _findProgPath(prog, db, recipe):
# ignore arguments
prog = prog.split(' ')[0]
if prog.startswith('/'):
progPath = prog
else:
macros = recipe.macros
searchPath = [macros.essentialbindir,
macros.bindir,
macros.essentialsbindir,
macros.sbindir]
searchPath.extend([x for x in ['/bin', '/usr/bin', '/sbin', '/usr/sbin']
if x not in searchPath])
searchPath.extend([x for x in os.getenv('PATH', '').split(os.path.pathsep)
if x not in searchPath])
progPath = util.findFile(prog, searchPath)
progTroveName = [ x.getName() for x in db.iterTrovesByPath(progPath) ]
if progTroveName:
progTroveName = progTroveName[0]
try:
if progTroveName in recipe._getTransitiveBuildRequiresNames():
recipe.reportExcessBuildRequires(progTroveName)
else:
recipe.reportMisingBuildRequires(progTroveName)
except AttributeError:
# older conary
pass
return progPath
class NormalizeCompression(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeCompression()}} - Compress files with maximum compression
SYNOPSIS
========
C{r.NormalizeCompression([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeCompression()} policy compresses files with maximum
compression, and without data which may change from invocation, to
invocation.
Recompresses .gz files with -9 -n, and .bz2 files with -9, to get maximum
compression and avoid meaningless changes overpopulating the database.
Ignores man/info pages, as they are encountered separately while making other
changes to man/info pages later.
EXAMPLES
========
C{r.NormalizeCompression(exceptions='%(thistestdir)s/.*')}
This package has test files that are tested byte-for-byte and
cannot be modified at all and still pass the tests.
"""
processUnmodified = False
invariantexceptions = [
'%(mandir)s/man.*/',
'%(infodir)s/',
]
invariantinclusions = [
('.*\.(gz|bz2)', None, stat.S_IFDIR),
]
db = None
gzip = None
bzip = None
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
m = self.recipe.magic[path]
if not m:
return
# Note: uses external gzip/bunzip if they exist because a
# pipeline is faster in a multiprocessing environment
def _mktmp(fullpath):
fd, path = tempfile.mkstemp('.temp', '', os.path.dirname(fullpath))
os.close(fd)
return path
def _move(tmppath, fullpath):
os.chmod(tmppath, os.lstat(fullpath).st_mode)
os.rename(tmppath, fullpath)
def _findProg(prog):
if not self.db:
self.db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return _findProgPath(prog, self.db, self.recipe)
fullpath = self.macros.destdir+path
if m.name == 'gzip' and \
(m.contents['compression'] != '9' or 'name' in m.contents):
tmppath = _mktmp(fullpath)
if not self.gzip:
self.gzip = _findProg('gzip')
util.execute('%s -dc %s | %s -f -n -9 > %s'
%(self.gzip, fullpath, self.gzip, tmppath))
_move(tmppath, fullpath)
del self.recipe.magic[path]
if m.name == 'bzip' and m.contents['compression'] != '9':
tmppath = _mktmp(fullpath)
if not self.bzip:
self.bzip = _findProg('bzip2')
util.execute('%s -dc %s | %s -9 > %s'
%(self.bzip, fullpath, self.bzip, tmppath))
_move(tmppath, fullpath)
del self.recipe.magic[path]
class NormalizeManPages(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeManPages()}} - Make all man pages follow sane system policy
SYNOPSIS
========
C{r.NormalizeManPages([I{filterexp}], [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeManPages()} policy makes all system manual pages
follow sane system policy
Note: This policy class is not called directly from recipes, and does not
honor exceptions.
Some of the following tasks are performed against system manual pages via
C{r.NormalizeManPages}:
- Fix all man pages' contents:
- remove instances of C{/?%(destdir)s} from all man pages
- C{.so foo.n} becomes a symlink to foo.n
- (re)compress all man pages with gzip -f -n -9
- change all symlinks to point to .gz (if they don't already)
- make all man pages be mode 644
"""
requires = (
('ReadableDocs', policy.CONDITIONAL_SUBSEQUENT),
)
def _findProg(self, prog):
if not self.db:
self.db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return _findProgPath(prog, self.db, self.recipe)
# Note: not safe for derived packages; needs to check in each
# internal function for unmodified files
def _uncompress(self, dirname, names):
for name in names:
path = dirname + os.sep + name
if name.endswith('.gz') and util.isregular(path):
if not self.gunzip:
self.gunzip = self._findProg('gunzip')
util.execute('gunzip ' + dirname + os.sep + name)
try:
self.recipe.recordMove(util.joinPaths(dirname, name),
util.joinPaths(dirname, name)[:-3])
except AttributeError:
pass
if name.endswith('.bz2') and util.isregular(path):
if not self.bunzip:
self.bunzip = self._findProg('bunzip2')
util.execute('bunzip2 ' + dirname + os.sep + name)
try:
self.recipe.recordMove(util.joinPaths(dirname, name),
util.joinPaths(dirname, name)[:-4])
except AttributeError:
pass
def _touchup(self, dirname, names):
"""
remove destdir, fix up modes, ensure that it is legal UTF-8
"""
mode = os.lstat(dirname)[stat.ST_MODE]
if mode & 0777 != 0755:
os.chmod(dirname, 0755)
for name in names:
path = dirname + os.sep + name
mode = os.lstat(path)[stat.ST_MODE]
# avoid things like symlinks
if not stat.S_ISREG(mode):
continue
if mode & 0777 != 0644:
os.chmod(path, 0644)
f = file(path, 'r+')
data = f.read()
write = False
try:
data.decode('utf-8')
except:
try:
data = data.decode('iso-8859-1').encode('utf-8')
write = True
except:
self.error('unable to decode %s as utf-8 or iso-8859-1',
path)
if data.find(self.destdir) != -1:
write = True
# I think this is cheaper than using a regexp
data = data.replace('/'+self.destdir, '')
data = data.replace(self.destdir, '')
if write:
f.seek(0)
f.truncate(0)
f.write(data)
def _sosymlink(self, dirname, names):
section = os.path.basename(dirname)
for name in names:
path = dirname + os.sep + name
if util.isregular(path):
# if only .so, change to symlink
f = file(path)
lines = f.readlines(512) # we really don't need the whole file
f.close()
# delete comment lines first
newlines = []
for line in lines:
# newline means len(line) will be at least 1
if len(line) > 1 and not self.commentexp.search(line[:-1]):
newlines.append(line)
lines = newlines
# now see if we have only a .so line to replace
# only replace .so with symlink if the file exists
# in order to deal with searchpaths
if len(lines) == 1:
line = lines[0]
# remove newline and other trailing whitespace if it exists
line = line.rstrip()
match = self.soexp.search(line)
if match:
matchlist = match.group(1).split('/')
l = len(matchlist)
if l == 1 or matchlist[l-2] == section:
# no directory specified, or in the same
# directory:
targetpath = os.sep.join((dirname, matchlist[l-1]))
if (os.path.exists(targetpath) and
os.path.isfile(targetpath)):
self.info('replacing %s (%s) with symlink %s',
name, match.group(0),
os.path.basename(match.group(1)))
os.remove(path)
os.symlink(os.path.basename(match.group(1)),
path)
else:
# either the canonical .so manN/foo.N or an
# absolute path /usr/share/man/manN/foo.N
# .so is relative to %(mandir)s and the other
# man page is in a different dir, so add ../
target = "../%s/%s" %(matchlist[l-2],
matchlist[l-1])
targetpath = os.sep.join((dirname, target))
if os.path.exists(targetpath):
self.info('replacing %s (%s) with symlink %s',
name, match.group(0), target)
os.remove(path)
os.symlink(target, path)
def _compress(self, dirname, names):
for name in names:
path = dirname + os.sep + name
if util.isregular(path):
if not self.gzip:
self.gzip = self._findProg('gzip')
util.execute('gzip -f -n -9 ' + dirname + os.sep + name)
try:
self.recipe.recordMove(dirname + os.sep + name,
dirname + os.sep + name + '.gz')
except AttributeError:
pass
def _gzsymlink(self, dirname, names):
for name in names:
path = dirname + os.sep + name
if os.path.islink(path):
# change symlinks to .gz -> .gz
contents = os.readlink(path)
os.remove(path)
if not contents.endswith('.gz'):
contents = contents + '.gz'
if not path.endswith('.gz'):
path = path + '.gz'
os.symlink(util.normpath(contents), path)
def __init__(self, *args, **keywords):
policy.DestdirPolicy.__init__(self, *args, **keywords)
self.soexp = re.compile(r'^\.so (.*\...*)$')
self.commentexp = re.compile(r'^\.\\"')
self.db = None
self.gzip = None
self.gunzip = None
self.bunzip = None
def test(self):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return True
def do(self):
for manpath in sorted(list(set((
self.macros.mandir,
os.sep.join((self.macros.x11prefix, 'man')),
os.sep.join((self.macros.krbprefix, 'man')),)))
):
manpath = self.macros.destdir + manpath
self.destdir = self.macros['destdir'][1:] # without leading /
# uncompress all man pages
os.path.walk(manpath, NormalizeManPages._uncompress, self)
# remove '/?%(destdir)s' and fix modes
os.path.walk(manpath, NormalizeManPages._touchup, self)
# .so foo.n becomes a symlink to foo.n
os.path.walk(manpath, NormalizeManPages._sosymlink, self)
# recompress all man pages
os.path.walk(manpath, NormalizeManPages._compress, self)
# change all symlinks to point to .gz (if they don't already)
os.path.walk(manpath, NormalizeManPages._gzsymlink, self)
class NormalizeInfoPages(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInfoPages()}} - Compress files with maximum compression
SYNOPSIS
========
C{r.NormalizeInfoPages([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInfoPages()} policy properly compresses info files,
and removes the info directory file.
EXAMPLES
========
The only recipe invocation possible for C{r.NormalizeInfoPages} is
C{r.NormalizeInfoPages(exceptions='%(infodir)s/dir')} in the recipe that
should own the info directory file (normally texinfo).
"""
requires = (
('ReadableDocs', policy.CONDITIONAL_SUBSEQUENT),
)
def test(self):
# Not safe for derived packages in this form, needs explicit checks
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return True
def do(self):
dir = self.macros['infodir']+'/dir'
fsdir = self.macros['destdir']+dir
if os.path.exists(fsdir):
if not self.policyException(dir):
util.remove(fsdir)
if os.path.isdir('%(destdir)s/%(infodir)s' %self.macros):
infofilespath = '%(destdir)s/%(infodir)s' %self.macros
infofiles = os.listdir(infofilespath)
for file in infofiles:
self._moveToInfoRoot(file)
infofiles = os.listdir(infofilespath)
for file in infofiles:
self._processInfoFile(file)
def __init__(self, *args, **keywords):
policy.DestdirPolicy.__init__(self, *args, **keywords)
self.db = None
self.gzip = None
self.gunzip = None
self.bunzip = None
def _findProg(self, prog):
if not self.db:
self.db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return _findProgPath(prog, self.db, self.recipe)
def _moveToInfoRoot(self, file):
infofilespath = '%(destdir)s/%(infodir)s' %self.macros
fullfile = util.joinPaths(infofilespath, file)
if os.path.isdir(fullfile):
for subfile in os.listdir(fullfile):
self._moveToInfoRoot(util.joinPaths(file, subfile))
shutil.rmtree(fullfile)
elif os.path.dirname(fullfile) != infofilespath:
destPath = util.joinPaths(infofilespath,
os.path.basename(fullfile))
shutil.move(fullfile, destPath)
try:
self.recipe.recordMove(fullfile, destPath)
except AttributeError:
pass
def _processInfoFile(self, file):
syspath = '%(destdir)s/%(infodir)s/' %self.macros + file
path = '%(infodir)s/' %self.macros + file
if not self.policyException(path):
m = self.recipe.magic[path]
if not m or m.name not in ('gzip', 'bzip'):
# not compressed
if not self.gzip:
self.gzip = self._findProg('gzip')
util.execute('gzip -f -n -9 %s' %syspath)
try:
self.recipe.recordMove(syspath, syspath + '.gz')
except AttributeError:
pass
del self.recipe.magic[path]
elif m.name == 'gzip' and \
(m.contents['compression'] != '9' or \
'name' in m.contents):
if not self.gzip:
self.gzip = self._findProg('gzip')
if not self.gunzip:
self.gunzip = self._findProg('gunzip')
util.execute('gunzip %s; gzip -f -n -9 %s'
%(syspath, syspath[:-3]))
# filename didn't change, so don't record it in the manifest
del self.recipe.magic[path]
elif m.name == 'bzip':
# should use gzip instead
if not self.gzip:
self.gzip = self._findProg('gzip')
if not self.bunzip:
self.bunzip = self._findProg('bunzip2')
util.execute('bunzip2 %s; gzip -f -n -9 %s'
%(syspath, syspath[:-4]))
try:
self.recipe.recordMove(syspath, syspath[:-4] + '.gz')
except AttributeError:
pass
del self.recipe.magic[path]
class NormalizeInitscriptLocation(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInitscriptLocation()}} - Properly locates init scripts
SYNOPSIS
========
C{r.NormalizeInitscriptLocation([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInitscriptLocation()} policy puts init scripts in their
proper location, resolving ambiguity about their proper location.
Moves all init scripts from /etc/rc.d/init.d/ to their official location.
"""
requires = (
('RelativeSymlinks', policy.CONDITIONAL_SUBSEQUENT),
('NormalizeInterpreterPaths', policy.CONDITIONAL_SUBSEQUENT),
)
processUnmodified = False
# need both of the next two lines to avoid following /etc/rc.d/init.d
# if it is a symlink
invariantsubtrees = [ '/etc/rc.d' ]
invariantinclusions = [ '/etc/rc.d/init.d/' ]
def test(self):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return self.macros['initdir'] != '/etc/rc.d/init.d'
def doFile(self, path):
basename = os.path.basename(path)
target = util.joinPaths(self.macros['initdir'], basename)
if os.path.exists(self.macros['destdir'] + os.sep + target):
raise policy.PolicyError(
"Conflicting initscripts %s and %s installed" %(
path, target))
util.mkdirChain(self.macros['destdir'] + os.sep +
self.macros['initdir'])
util.rename(self.macros['destdir'] + path,
self.macros['destdir'] + target)
try:
self.recipe.recordMove(self.macros['destdir'] + path,
self.macros['destdir'] + target)
except AttributeError:
pass
class NormalizeInitscriptContents(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInitscriptContents()}} - Fixes common errors within init scripts
SYNOPSIS
========
C{r.NormalizeInitscriptContents([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInitscriptContents()} policy fixes common errors within
init scripts, and adds some dependencies if needed.
EXAMPLES
========
C{r.NormalizeInitscriptContents(exceptions='%(initdir)s/foo')}
Use this in the unprecedented case that C{r.NormalizeInitscriptContents}
damages an init script.
"""
requires = (
# for invariantsubtree to be sufficient
('NormalizeInitscriptLocation', policy.REQUIRED_PRIOR),
('RelativeSymlinks', policy.REQUIRED_PRIOR),
# for adding requirements
('Requires', policy.REQUIRED_SUBSEQUENT),
)
processUnmodified = False
invariantsubtrees = [ '%(initdir)s' ]
invariantinclusions = [ ('.*', 0400, stat.S_IFDIR), ]
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
m = self.recipe.macros
fullpath = '/'.join((m.destdir, path))
if os.path.islink(fullpath):
linkpath = os.readlink(fullpath)
if m.destdir not in linkpath:
# RelativeSymlinks has already run. linkpath is relative to
# fullpath
newpath = util.joinPaths(os.path.dirname(fullpath), linkpath)
if os.path.exists(newpath):
fullpath = newpath
else:
# If the target of an init script is not present, don't
# error, DanglingSymlinks will address this situation.
self.warn('%s is a symlink to %s, which does not exist.' % \
(path, linkpath))
return
contents = file(fullpath).read()
modified = False
if ('/etc/rc.d/init.d' != m.initdir and
'/etc/rc.d/init.d' in contents):
contents = contents.replace('/etc/rc.d/init.d', m.initdir)
modified = True
elif ('/etc/init.d' != m.initdir and
'/etc/init.d' in contents):
contents = contents.replace('/etc/init.d', m.initdir)
modified = True
if '%(initdir)s/functions' %m in contents:
self.recipe.Requires('file: %(initdir)s/functions',
util.literalRegex(path))
if modified:
file(fullpath, 'w').write(contents)
class NormalizeAppDefaults(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeAppDefaults()}} - Locate X application defaults files
SYNOPSIS
========
C{r.NormalizeAppDefaults([I{filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeAppDefaults()} policy locates X application defaults
files.
No exceptions to this policy are honored.
"""
def test(self):
# not safe in this form for derived packages
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return False
return True
def do(self):
e = '%(destdir)s/%(sysconfdir)s/X11/app-defaults' % self.macros
if not os.path.isdir(e):
return
x = '%(destdir)s/%(x11prefix)s/lib/X11/app-defaults' % self.macros
self.warn('app-default files misplaced in'
' %(sysconfdir)s/X11/app-defaults' % self.macros)
if os.path.islink(x):
util.remove(x)
util.mkdirChain(x)
for file in os.listdir(e):
util.rename(util.joinPaths(e, file),
util.joinPaths(x, file))
try:
self.recipe.recordMove(util.joinPaths(e, file),
util.joinPaths(x, file))
except AttributeError:
pass
class NormalizeInterpreterPaths(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizeInterpreterPaths()}} - Rewrites interpreter paths in
scripts
SYNOPSIS
========
C{r.NormalizeInterpreterPaths([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizeInterpreterPaths()} policy re-writes the paths, in
particular changing indirect calls through env to direct calls.
Exceptions to this policy should only be made when they are part of the
explicit calling convention of a script where the location of the final
interpreter depend on the user's C{PATH}.
EXAMPLES
========
C{r.NormalizeInterpreterPaths(exceptions=".*")}
Do not modify any interpreter paths for this package. Not
generally recommended.
"""
processUnmodified = False
invariantexceptions = [ '%(thisdocdir.literalRegex)s/', ]
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
mode = os.lstat(d)[stat.ST_MODE]
if not mode & 0111:
# we care about interpreter paths only in executable scripts
return
m = self.recipe.magic[path]
if m and m.name == 'script':
if self._correctInterp(m, path):
del self.recipe.magic[path]
m = self.recipe.magic[path]
if self._correctEnv(m, path):
del self.recipe.magic[path]
def _correctInterp(self, m, path):
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
interp = m.contents['interpreter']
interpBase = os.path.basename(interp)
found = False
if not os.path.exists('/'.join((destdir, interp))) and not os.path.exists(interp):
#try tro remove 'local' part
if '/local/' in interp:
normalized = interp.replace('/local', '')
if os.path.exists('/'.join((destdir, normalized))) or os.path.exists(normalized):
found = True
if not found:
cadidates = (
self.recipe.macros.bindir,
self.recipe.macros.sbindir,
self.recipe.macros.essentialbindir,
self.recipe.macros.essentialsbindir,
)
for i in cadidates:
if os.path.exists('/'.join((destdir, i, interpBase))):
normalized = util.joinPaths(i, interpBase)
found = True
break
if not found:
#try to find in '/bin', '/sbin', '/usr/bin', '/usr/sbin'
for i in '/usr/bin', '/bin', '/usr/sbin', '/sbin':
normalized = '/'.join((i, interpBase))
if os.path.exists(normalized):
found = True
break
if not found:
self.warn('The interpreter path %s in %s does not exist!', interp, path)
if found:
line = m.contents['line']
normalized = line.replace(interp, normalized)
self._changeInterpLine(d, '#!' + normalized + '\n')
self.info('changing %s to %s in %s',
line, normalized, path)
return found
def _correctEnv(self, m, path):
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
interp = m.contents['interpreter']
if interp.find('/bin/env') != -1: #finds /usr/bin/env too...
line = m.contents['line']
# rewrite to not have env
wordlist = [ x for x in line.split() ]
if len(wordlist) == 1:
self.error("Interpreter is not given for %s in %s", wordlist[0], path)
return
wordlist.pop(0) # get rid of env
# first look in package
fullintpath = util.checkPath(wordlist[0], root=destdir)
if fullintpath == None:
# then look on installed system
fullintpath = util.checkPath(wordlist[0])
if fullintpath == None:
self.error("Interpreter %s for file %s not found, could not convert from /usr/bin/env syntax", wordlist[0], path)
return False
wordlist[0] = fullintpath
self._changeInterpLine(d, '#!'+" ".join(wordlist)+'\n')
self.info('changing %s to %s in %s',
line, " ".join(wordlist), path)
return True
return False
def _changeInterpLine(self, path, newline):
mode = os.lstat(path)[stat.ST_MODE]
# we need to be able to write the file
os.chmod(path, mode | 0600)
f = file(path, 'r+')
l = f.readlines()
l[0] = newline
f.seek(0)
f.truncate(0)# we may have shrunk the file, avoid garbage
f.writelines(l)
f.close()
# revert any change to mode
os.chmod(path, mode)
class NormalizePamConfig(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizePamConfig()}} - Adjust PAM configuration files
SYNOPSIS
========
C{r.NormalizePamConfig([I{filterexp}] I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.NormalizePamConfig()} policy adjusts PAM configuration files, and
remove references to older module paths such as: C{/lib/security/$ISA} as
there is no need for such paths in modern PAM libraries.
Exceptions to this policy should never be required.
"""
processUnmodified = False
invariantsubtrees = [
'%(sysconfdir)s/pam.d/',
]
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
d = util.joinPaths(self.recipe.macros.destdir, path)
mode = os.lstat(d)[stat.ST_MODE]
if stat.S_ISLNK(mode):
# we'll process whatever this is pointing to whenever we
# get there.
return
if not (mode & 0200):
os.chmod(d, mode | 0200)
f = file(d, 'r+')
l = f.readlines()
l = [x.replace('/lib/security/$ISA/', '') for x in l]
stackRe = re.compile('(.*)required.*pam_stack.so.*service=(.*)')
def removeStack(line):
m = stackRe.match(line)
if m:
return '%s include %s\n'%(m.group(1), m.group(2))
return line
l = [removeStack(x) for x in l]
f.seek(0)
f.truncate(0) # we may have shrunk the file, avoid garbage
f.writelines(l)
f.close()
os.chmod(d, mode)
class NormalizePythonInterpreterVersion(policy.DestdirPolicy):
"""
NAME
====
B{C{r.NormalizePythonInterpreterVersion()}} - Provides version-specific path to python interpreter in python program files
SYNOPSIS
========
C{r.NormalizePythonInterpreterVersion([I{filterexp}], [I{exceptions=filterexp}i], [I{versionMap=((from, to), ...)}])}
DESCRIPTION
===========
The C{r.NormalizePythonInterpreterVersion()} policy ensures that
python script files have a version-specific path to the
interpreter if possible.
KEYWORDS
========
B{versionMap} : Specify mappings of interpreter version changes
to make for python scripts.
EXAMPLES
========
C{r.NormalizePythonInterpreterVersion(versionMap=(
('%(bindir)s/python', '%(bindir)s/python2.5'),
('%(bindir)s/python25', '%(bindir)s/python2.5')
))}
Specify that any scripts with an interpreter of C{/usr/bin/python}
or C{/usr/bin/python25} should be changed to C{/usr/bin/python2.5}.
"""
requires = (
('NormalizeInterpreterPaths', policy.CONDITIONAL_PRIOR),
)
keywords = {'versionMap': {}}
processUnmodified = False
def updateArgs(self, *args, **keywords):
if 'versionMap' in keywords:
versionMap = keywords.pop('versionMap')
if type(versionMap) in (list, tuple):
versionMap = dict(versionMap)
self.versionMap.update(versionMap)
policy.DestdirPolicy.updateArgs(self, *args, **keywords)
def preProcess(self):
self.interpreterRe = re.compile(".*python[-0-9.]+$")
self.interpMap = {}
versionMap = {}
for item in self.versionMap.items():
versionMap[item[0]%self.macros] = item[1]%self.macros
self.versionMap = versionMap
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
destdir = self.recipe.macros.destdir
d = util.joinPaths(destdir, path)
mode = os.lstat(d)[stat.ST_MODE]
m = self.recipe.magic[path]
if m and m.name == 'script':
interp = m.contents['interpreter']
if '/python' not in interp:
# we handle only python scripts here
return
if interp in self.versionMap.keys():
normalized = self.versionMap[interp]
elif not self._isNormalizedInterpreter(interp):
# normalization
if self.interpMap.has_key(interp):
normalized = self.interpMap[interp]
else:
normalized = self._normalize(interp)
if normalized:
self.interpMap[interp] = normalized
else:
self.warn('No version-specific python interpreter '
'found for %s in %s', interp, path)
return
else:
return
# we need to be able to write the file
os.chmod(d, mode | 0600)
f = file(d, 'r+')
l = f.readlines()
l[0] = l[0].replace(interp, normalized)
# we may have shrunk the file, avoid garbage
f.seek(0)
f.truncate(0)
f.writelines(l)
f.close()
# revert any change to mode
os.chmod(d, mode)
self.info('changed %s to %s in %s', interp, normalized, path)
del self.recipe.magic[path]
def _isNormalizedInterpreter(self, interp):
return os.path.basename(interp).startswith('python') and self.interpreterRe.match(interp)
def _normalize(self, interp):
dir = self.recipe.macros.destdir
interpFull = '/'.join((dir, interp))
interpFullBase = os.path.basename(interpFull)
interpFullDir = os.path.dirname(interpFull)
interpDir = os.path.dirname(interp)
links = []
if os.path.exists(interpFull):
for i in os.listdir(interpFullDir):
if os.path.samefile(interpFull, '/'.join((interpFullDir, i))):
links += [i]
path = sorted(links, key=len, reverse=True)
if path and self._isNormalizedInterpreter('/'.join((interpFullDir, path[0]))):
return os.path.join(interpDir, path[0])
links = []
for i in os.listdir(interpFullDir):
try:
if filecmp.cmp(interpFull, '/'.join((interpFullDir, i))):
links += [i]
except IOError:
# this is a fallback for a bad install anyway, so
# a failure here is both unusual and not important
pass
path = sorted(links, key=len, reverse=True)
if path and self._isNormalizedInterpreter('/'.join((interpFullDir, path[0]))):
return os.path.join(interpDir, path[0])
else:
db = database.Database('/', self.recipe.cfg.dbPath)
pythonTroveList = db.iterTrovesByPath(interp)
for trove in pythonTroveList:
pathList = [x[1] for x in trove.iterFileList()]
links += [x for x in pathList if x.startswith(interp)]
path = sorted(links, key=len, reverse=True)
if path and self._isNormalizedInterpreter(path[0]):
return path[0]
return None
class NormalizePythonEggs(policy.DestdirPolicy):
invariantinclusions = [
('.*/python[^/]*/site-packages/.*\.egg', stat.S_IFREG),
]
requires = (
('RemoveNonPackageFiles', policy.CONDITIONAL_PRIOR),
)
def doFile(self, path):
if hasattr(self.recipe, '_getCapsulePathsForFile'):
if self.recipe._getCapsulePathsForFile(path):
return
dir = self.recipe.macros.destdir
fullPath = util.joinPaths(dir, path)
m = magic.magic(fullPath)
if not (m and m.name == 'ZIP'):
# if it's not a zip, we can't unpack it, PythonEggs will raise
# an error on this path
return
tmpPath = tempfile.mkdtemp(dir = self.recipe.macros.builddir)
util.execute("unzip -q -o -d '%s' '%s'" % (tmpPath, fullPath))
self._addActionPathBuildRequires(['unzip'])
os.unlink(fullPath)
shutil.move(tmpPath, fullPath)
# Note: NormalizeLibrarySymlinks is in libraries.py
| apache-2.0 | -4,332,780,223,955,068,400 | 35.184834 | 129 | 0.540982 | false |
hungpham2511/toppra | toppra/solverwrapper/cvxpy_solverwrapper.py | 1 | 5009 | from .solverwrapper import SolverWrapper
import logging
import numpy as np
from ..constraint import ConstraintType
from ..constants import CVXPY_MAXX, CVXPY_MAXU
logger = logging.getLogger(__name__)
try:
import cvxpy
FOUND_CVXPY = True
except ImportError:
logger.info("CVXPY installation not found.")
FOUND_CVXPY = False
try:
import mosek
FOUND_MOSEK = True
except ImportError:
logger.info("Mosek installation not found!")
FOUND_MOSEK = False
class cvxpyWrapper(SolverWrapper):
"""A solver wrapper using `cvxpy`.
NOTE: the two constants CVXPY_MAXX and CVXPY_MAXU is used to
guarantee that the solution is not too large, in which case cvxpy
can't handle very well.
`cvxpyWrapper` should not be used in production due to robustness
issue.
Parameters
----------
constraint_list: list of :class:`.Constraint`
The constraints the robot is subjected to.
path: :class:`.Interpolator`
The geometric path.
path_discretization: array
The discretized path positions.
"""
def __init__(self, constraint_list, path, path_discretization):
super(cvxpyWrapper, self).__init__(constraint_list, path, path_discretization)
valid_types = [ConstraintType.CanonicalLinear, ConstraintType.CanonicalConic]
# Currently only support Canonical Linear Constraint
for constraint in constraint_list:
if constraint.get_constraint_type() not in valid_types:
raise NotImplementedError
def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max):
assert i <= self.N and 0 <= i
ux = cvxpy.Variable(2)
u = ux[0]
x = ux[1]
cvxpy_constraints = [-CVXPY_MAXU <= u, u <= CVXPY_MAXU, 0 <= x, x <= CVXPY_MAXX]
if not np.isnan(x_min):
cvxpy_constraints.append(x_min <= x)
if not np.isnan(x_max):
cvxpy_constraints.append(x <= x_max)
if i < self.N:
delta = self.get_deltas()[i]
if not np.isnan(x_next_min):
cvxpy_constraints.append(x_next_min <= x + 2 * delta * u)
if not np.isnan(x_next_max):
cvxpy_constraints.append(x + 2 * delta * u <= x_next_max)
for k, constraint in enumerate(self.constraints):
if constraint.get_constraint_type() == ConstraintType.CanonicalLinear:
a, b, c, F, h, ubound, xbound = self.params[k]
if a is not None:
v = a[i] * u + b[i] * x + c[i]
if constraint.identical:
cvxpy_constraints.append(F * v <= h)
else:
cvxpy_constraints.append(F[i] * v <= h[i])
# ecos (via cvxpy in this class) is very bad at
# handling badly scaled problems. Problems with very
# large bound. The below max(), min() operators is a
# workaround to get pass this issue.
if ubound is not None:
cvxpy_constraints.append(max(-CVXPY_MAXU, ubound[i, 0]) <= u)
cvxpy_constraints.append(u <= min(CVXPY_MAXU, ubound[i, 1]))
if xbound is not None:
cvxpy_constraints.append(xbound[i, 0] <= x)
cvxpy_constraints.append(x <= min(CVXPY_MAXX, xbound[i, 1]))
elif constraint.get_constraint_type() == ConstraintType.CanonicalConic:
a, b, c, P, ubound, xbound = self.params[k]
if a is not None:
d = a.shape[1]
for j in range(d):
cvxpy_constraints.append(
a[i, j] * u
+ b[i, j] * x
+ c[i, j]
+ cvxpy.norm(P[i, j].T[:, :2] * ux + P[i, j].T[:, 2])
<= 0
)
if ubound is not None:
cvxpy_constraints.append(max(-CVXPY_MAXU, ubound[i, 0]) <= u)
cvxpy_constraints.append(u <= min(CVXPY_MAXU, ubound[i, 1]))
if xbound is not None:
cvxpy_constraints.append(xbound[i, 0] <= x)
cvxpy_constraints.append(x <= min(CVXPY_MAXX, xbound[i, 1]))
if H is None:
H = np.zeros((self.get_no_vars(), self.get_no_vars()))
objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux)
problem = cvxpy.Problem(objective, constraints=cvxpy_constraints)
try:
problem.solve(verbose=False)
except cvxpy.SolverError:
# solve fail
pass
if (
problem.status == cvxpy.OPTIMAL
or problem.status == cvxpy.OPTIMAL_INACCURATE
):
return np.array(ux.value).flatten()
else:
res = np.empty(self.get_no_vars())
res[:] = np.nan
return res
| mit | 2,394,813,359,082,985,000 | 35.562044 | 88 | 0.539429 | false |
ctripcorp/tars | tars/server/models.py | 1 | 19445 | import logging
import traceback
import itertools
from django.db import models, IntegrityError, transaction
from django.db.models import Q
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
from constance import config
from rest_client import get_salt_client
from roll_engine.constants import REVOKED
from roll_engine.exceptions import ActionNotAllowed, ActionFailed
from roll_engine.db import TimestampedModel, SoftDeleteManager
from .lb import choose_lb_facade
from .agency import choose_deploy_agency
from tars.deployment import constants
from tars.application.models import Application
from tars.exceptions import SyncError
from tars.utils import ConstantSet, InstanceScopePyringBean
logger = logging.getLogger(__name__)
class Server(TimestampedModel):
hostname = models.CharField(max_length=255, null=True, blank=True)
ip_address = models.CharField(max_length=64, null=True, blank=True)
group = models.ForeignKey('Group', related_name='servers', null=True,
blank=True, db_constraint=False)
is_fort = models.BooleanField(default=False)
idc = models.CharField(max_length=255, null=True, blank=True)
is_deleted = models.BooleanField(default=False, editable=False)
objects = SoftDeleteManager()
class Meta:
db_table = 'servers'
def __unicode__(self):
return "<Server {}>{}".format(self.pk, self.hostname)
def simple_download(self, container, path, local_path):
salt = get_salt_client(self.group.application.salt_client)
return salt.run_module(self.hostname, 'tars_utils.simple_download', 30,
container, path, local_path)
def update(self, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if v is not None}
Server.objects.filter(id=self.id).update(**kwargs)
class Group(TimestampedModel, InstanceScopePyringBean):
""" Represent a CMS group """
# constance
G_TYPE_ENUM = ConstantSet('Ansible', 'join')
# Fields
name = models.CharField(max_length=255, null=True)
application = models.ForeignKey(Application, related_name='groups',
null=True, blank=True, db_constraint=False)
vdir_path = models.CharField(max_length=255, null=True)
physic_path = models.CharField(max_length=255, null=True, blank=True)
fort = models.CharField(max_length=255, null=True, blank=True) # will override server's is_fort
idc = models.CharField(max_length=255, null=True, blank=True)
health_check_url = models.CharField(max_length=255, null=True, blank=True)
is_ssl = models.BooleanField(default=False)
g_type = models.CharField(max_length=32, null=True, blank=True, default=G_TYPE_ENUM.Ansible)
is_deleted = models.BooleanField(default=False, editable=False)
objects = SoftDeleteManager()
class Meta:
db_table = 'groups'
def __init__(self, *args, **kwargs):
super(Group, self).__init__(*args, **kwargs)
self._merge_deploys = None
self.origin_g_type = self.g_type
def __unicode__(self):
return u"<Group {}>{}".format(self.pk, self.name)
def get_delegation(self, *args, **kwargs):
if self.g_type == self.G_TYPE_ENUM.join and self.__class__ != JoinedGroup: # stop recursion
return self.joinedgroup
else:
return self
def save(self, *args, **kwargs):
if (isinstance(self.vdir_path, basestring) and len(self.vdir_path) > 0
and self.vdir_path[0] != '/'):
self.vdir_path = '/{}'.format(self.vdir_path)
if self.is_deleted:
self.servers.update(is_deleted=True) # clean servers of removed one
super(Group, self).save(*args, **kwargs)
def get_forts(self, valid_servers=None):
if valid_servers is None:
valid_servers = []
queryset = default_qs = self.servers.order_by('hostname')
if valid_servers:
queryset = default_qs = queryset.filter(hostname__in=valid_servers)
if self.fort is None:
queryset = queryset.filter(is_fort=True)
else:
queryset = queryset.filter(hostname__exact=self.fort)
if not queryset.exists():
first_svr = default_qs.first()
return [] if first_svr is None else [first_svr.hostname]
return queryset.values_list('hostname', flat=True)
forts = property(get_forts)
def get_lb(self, extra_hints=None):
""" give a slb facade instance bind to this group """
facade_cls = choose_lb_facade(self, extra_hints)
return facade_cls(self)
def get_deploy_agency(self):
return choose_deploy_agency(self)
@property
def merge_deploys(self):
if not self._merge_deploys:
self._merge_deploys = MergeDeploymentManager(self)
return self._merge_deploys
@property
def rerollable_deployment_ids(self):
rerollable_deployment_ids = self.merge_deploys.order_by('-created_at')\
.filter(status=constants.SUCCESS, package__is_deleted=False)[:10]\
.values_list('id', flat=True)
return list(rerollable_deployment_ids)
@property
def rollback_deployment(self):
try:
latest_deployment = self.merge_deploys.latest()
except ObjectDoesNotExist:
rollback_deployment = None
else:
if (latest_deployment.flavor == latest_deployment.ROLLBACK and
latest_deployment.status == REVOKED):
# rollback A fails and next rollback B actually should be the same purpose
rollback_deployment = latest_deployment
else:
rollback_deployment = latest_deployment.parent
if rollback_deployment is None:
raise Exception('Application {0.application} group {0} has no '
'deployment can be used for rollback'.format(self))
# Slice batches with MAX_PERCENTAGE for rollback deployment
# so modify its batch_pattern
rollback_deployment.config.batch_pattern = '50%'
# re-assign group to self, cuz it may be a JoinedGroup
rollback_deployment.group = self
return rollback_deployment
@property
def current_deployment(self):
queryset = self._running_deployments()
if queryset.count() >= 1:
return queryset.latest()
else:
return None
def _running_deployments(self):
return self.merge_deploys.exclude(status__in=constants.HALTED)
@property
def last_success_deployment(self):
try:
return self.merge_deploys.exclude(category="scaleout").filter(status=constants.SUCCESS).latest('id')
except ObjectDoesNotExist:
return None
def sync_cms(self):
return set(), set()
def summarize_packages(self):
'''
Return dict indicates the server number grouped by their package version
'''
from tars.deployment.models import TarsDeploymentTarget
latest_targets = [
TarsDeploymentTarget.objects.filter(
_hostname=h,
batch__deployment__application=self.application,
batch__deployment__status=constants.SUCCESS
)
.order_by('-updated_at').first()
for h in self.servers.values_list('hostname', flat=True)
]
packages_on_servers = [t.batch.deployment.package for t in latest_targets if t]
ordered = sorted(packages_on_servers, lambda a, b: int(a.pk - b.pk))
grouped = itertools.groupby(ordered)
r = [(key, len(list(val_it))) for key, val_it in grouped]
return r
def rollback(self, deployment_id):
# for XMON
current_deployment = self.current_deployment
if (current_deployment is not None and
current_deployment.id == deployment_id):
if current_deployment.is_braked():
rollback_deployment = self.rollback_deployment
try:
# reset deployment
rollback_deployment.pk = None
rollback_deployment.status = constants.PENDING
rollback_deployment.flavor = rollback_deployment.ROLLBACK
with transaction.atomic():
rollback_deployment.save()
# start to rollout immediately
rollback_deployment.start()
except IntegrityError:
raise ActionFailed('Rollback failed for application {0}'
.format(self.name))
else:
return rollback_deployment
else:
raise ActionNotAllowed(
'This rollback api only works for braked deployment')
else:
raise ActionNotAllowed(
'This rollback api is forbidden for deployment {0}'
.format(deployment_id))
def is_idle(self, rop_id):
related_deps = self.merge_deploys.all()
if self._running_deployments().exists():
if rop_id is not None:
if not related_deps.filter(rop_id=rop_id).exists():
failed_deps = related_deps.filter(
status=constants.FAILURE)
if failed_deps.exists():
return True
return False
else:
if rop_id is not None:
if rop_id != -255:
same_rop_deps = related_deps.filter(rop_id=rop_id)
if (same_rop_deps.exists() and
same_rop_deps.latest().status == constants.SUCCESS):
return False
return True
else:
return False
def fetch_batches(self, batch_pattern, flavor=None):
from tars.deployment.models import TarsDeployment
if flavor is None:
flavor = TarsDeployment.STANDARD
temp_deployment = TarsDeployment(
application=self.application, group=self, flavor=flavor)
return temp_deployment.preview_batches(batch_pattern)
def precreate_deployment(self):
for deployment in self.merge_deploys.exclude(
status__in=constants.HALTED):
deployment.trans(REVOKED)
class JoinedGroup(Group):
""" Act as a super group which combines from multiple ones """
# constants
_MIRROR_FIELD_NAMES = ('site_name', 'vdir_path', 'app_pool_name', 'physic_path',
'business_port', 'shutdown_port', 'health_check_url')
# position means it
_JOIN_CRITERIA_MASKS = ['check_identical_server', 'check_identical_version']
# django fields
objects = SoftDeleteManager()
aggregation = models.ManyToManyField(Group, related_name='contained_by',
symmetrical=False,
db_table='groups_joins_junction')
class Meta:
db_table = 'groups_joins'
def __init__(self, *args, **kwargs):
super(JoinedGroup, self).__init__(*args, **kwargs)
self._join_group_servers_set = None
def delete(self):
self.is_deleted = True
self.save()
def save(self, *args, **kwargs):
if not self.pk:
assert self.application is not None, \
"JoinedGroup must have a associated application"
self.name = self.name or "joined_group_%s" % self.application.name
self.g_type = Group.G_TYPE_ENUM.join
self.site_name = None # bypass super default site_name 'Ctrip'
# call grandpa, bypass Group.save() logic
super(Group, self).save(*args, **kwargs)
def is_idle(self, rop_id):
""" origin is_idle check merge_deploys (self and base) deployment, we
need lock JoinedGroup when non-base sub-group starts a deploy
"""
is_lock_caused_by_self_base_deploys = super(JoinedGroup, self).is_idle(rop_id)
if is_lock_caused_by_self_base_deploys is False:
return False
else:
for g in self.aggregation.all():
if g.is_idle(rop_id) is False:
return False
return True
@property
def servers(self):
""" A hack to shadow origin servers to ensure upper-level API consistency. Instead
of returing simple RelatedManager, a dynamic created custom server manager which
contains servers of all aggregated groups will be returned
"""
if not self._join_group_servers_set:
self._join_group_servers_set = JoinedGroupServerManager(self)
return self._join_group_servers_set
########################
# meta handling
########################
def rebuild_meta(self):
# we trust and assume aggregation have same meta
one = self.aggregation.first()
if one is None:
return
for m_field in self._MIRROR_FIELD_NAMES:
setattr(self, m_field, getattr(one, m_field))
idc_set = set([g.idc for g in self.aggregation.all() if g.idc is not None])
idc_count = len(idc_set)
self.idc = idc_set.pop() if idc_count == 1 else "CROSS-IDC"
self.save()
def check_group_meta_is_identical(self, target):
for v_field in self._MIRROR_FIELD_NAMES:
my_meta = getattr(self, v_field)
if my_meta is not None:
assert my_meta == getattr(target, v_field), \
"Inconsist meta field '{}' found on {}".format(v_field, target)
def check_identical_server(self, target):
curr_server_union_set = set(self.servers.values_list("ip_address", flat=True))
target_server_set = set(target.servers.values_list("ip_address", flat=True))
overlapped_set = curr_server_union_set & target_server_set
if overlapped_set:
raise AttributeError("Cannot join overlapped groups, conjunction ip {}".format(overlapped_set))
def check_identical_version(self, target):
join_last = self.last_success_deployment
target_last = target.last_success_deployment
if join_last and target_last and join_last.package.version != target_last.package.version:
raise AttributeError("Inconsist deploy package version, JoinedGroup: %s, Target: %s" % (self, target))
########################
# expose APIs
########################
def join(self, group, validation_switch_bitmap):
""" absorb a group , do validation based on first aggregated """
assert group.g_type != Group.G_TYPE_ENUM.join, "join a JoinedGroup is not allowed"
assert group.application_id == self.application_id, \
"Group {} should belongs to same app".format(group)
self.check_group_meta_is_identical(group)
# check if bitmap for each switch is on, run validation
for position, func_name in enumerate(self._JOIN_CRITERIA_MASKS):
if (1 << position) & validation_switch_bitmap:
getattr(self, func_name)(group)
self.aggregation.add(group)
self.rebuild_meta()
################################
# overrides for API consistency
################################
def get_forts(self, valid_servers=None):
return list(set(itertools.chain(*[g.get_forts(valid_servers) for g in self.aggregation.all()])))
# FIXME: @dalang, what's the point of keep BOTH forts and get_forts in Group ????
forts = property(get_forts)
def sync_cms(self):
# NOTE: union operation breaks servers order stability in single group sync
# but it seems currently we do NOT use returing value under any circumstance
added, removed = reduce(
lambda accum, add_remove_set: (accum[0] | add_remove_set[0], accum[1] | add_remove_set[1]),
[g.sync_cms() for g in self.aggregation.all()],
(set(), set())
)
self.rebuild_meta()
try:
for g in self.aggregation.all():
self.check_group_meta_is_identical(g)
except AssertionError as e:
logger.error(
"Join Group sync found inconsist meta of concrete group {}: {}".format(g, e.message)
)
return added, removed
class JoinedGroupServerManager(Server._default_manager.__class__):
def __init__(self, joined_group_instance):
super(JoinedGroupServerManager, self).__init__()
self.instance = joined_group_instance
self.model = Server
@property
def core_q(self):
# use Q represts group_id = xx OR group_id = yy,
# improve performance, quicker than __in: filter(group__id__in=[xxx])
NON_EXISTING_GROUP_ID = -2333
group_ids = self.instance.aggregation.values_list('id', flat=True) or [NON_EXISTING_GROUP_ID]
return reduce(
lambda join_q, or_group_id: join_q | Q(group__id=or_group_id),
group_ids,
Q()
)
def verbose_all(self):
""" return simple aggregation of sub group servers """
qs = super(JoinedGroupServerManager, self).get_queryset()
qs._add_hints(instance=self.instance)
return qs.filter(self.core_q)
def get_queryset(self):
""" return servers of UNIQUE hostname, for normal JoinedGroup.object.get().servers.all() """
# NOTE: here group_by is a Django private API, may break
# Django sucks by not supporting group by/distinct single column, except using values()
qs = self.verbose_all()
qs.query.group_by = [('servers', 'hostname')]
return qs
class MergeDeploymentManager(models.Manager):
""" TarsDeployment manager for maintain group's deployments history, merge related groups
history into a chain
"""
def __init__(self, group):
super(MergeDeploymentManager, self).__init__()
self.instance = group
from tars.deployment.models import TarsDeployment
self.model = TarsDeployment
def get_queryset(self):
qs = super(MergeDeploymentManager, self).get_queryset()
if isinstance(self.instance, JoinedGroup):
# joined group's history based on first sub group's history, including
# any joinedgroup associate with concrete already deleted
first_sub_record = self.instance.aggregation.through.objects \
.filter(joinedgroup_id=self.instance.id).order_by("id").first()
if first_sub_record:
legacy_joined_groups = self.instance.aggregation.through.objects \
.filter(group_id=first_sub_record.group_id)
merge_ids = [first_sub_record.group_id] \
+ [join_group.joinedgroup_id for join_group in legacy_joined_groups]
else:
merge_ids = []
else:
merge_ids = self.instance.contained_by.through.objects \
.filter(group_id=self.instance.id).values_list("joinedgroup_id", flat=True)
core_q = reduce(
lambda join_q, g_id: join_q | Q(group_id=g_id),
merge_ids,
Q(group_id=self.instance.id)
)
return qs.filter(core_q).order_by('id')
| apache-2.0 | -6,384,645,263,817,918,000 | 36.684109 | 114 | 0.604886 | false |
zarafagroupware/zarafa-zsm | webservice/apiapp/management/commands/confcheck.py | 1 | 3757 | # Copyright 2012 - 2013 Zarafa B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation with the following additional
# term according to sec. 7:
#
# According to sec. 7 of the GNU Affero General Public License, version
# 3, the terms of the AGPL are supplemented with the following terms:
#
# "Zarafa" is a registered trademark of Zarafa B.V. The licensing of
# the Program under the AGPL does not imply a trademark license.
# Therefore any rights, title and interest in our trademarks remain
# entirely with us.
#
# However, if you propagate an unmodified version of the Program you are
# allowed to use the term "Zarafa" to indicate that you distribute the
# Program. Furthermore you may use our trademarks where it is necessary
# to indicate the intended purpose of a product or service provided you
# use it in accordance with honest practices in industrial or commercial
# matters. If you want to propagate modified versions of the Program
# under the name "Zarafa" or "Zarafa Server", you may only do so if you
# have a written permission by Zarafa B.V. (to acquire a permission
# please contact Zarafa at [email protected]).
#
# The interactive user interface of the software displays an attribution
# notice containing the term "Zarafa" and/or the logo of Zarafa.
# Interactive user interfaces of unmodified and modified versions must
# display Appropriate Legal Notices according to sec. 5 of the GNU
# Affero General Public License, version 3, when you propagate
# unmodified or modified versions of the Program. In accordance with
# sec. 7 b) of the GNU Affero General Public License, version 3, these
# Appropriate Legal Notices must retain the logo of Zarafa or display
# the words "Initial Development by Zarafa" if the display of the logo
# is not reasonably feasible for technical reasons. The use of the logo
# of Zarafa in Legal Notices is allowed for unmodified and modified
# versions of the software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from django.core.management.base import BaseCommand
from apiapp.management.io import io
from conf.settings import config
class Command(BaseCommand):
help = "Check configuration for errors."
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
# Warn about missing config file
if not config.have_zconf():
io.warn(
u"No config file found at {0}, using builtin config".format(
config.zconf_path))
return
io.info(u"Checking config file {0}".format(config.zconf_path))
# Warn about unknown/deprecated settings
for key in config.get_unknown_zconf_keys():
io.warn(u"Uknkown setting {0} ignored".format(key))
# Report error on any invalid settings values
invalids = config.get_invalid_zconf_keys()
for (key, exc) in invalids:
io.error(exc.message)
# Report error on any missing required settings
missing = config.get_missing_zconf_keys()
for key in missing:
io.error(u"Required setting {0} missing".format(key))
# Exit if we have errors
if invalids or missing:
sys.exit(1)
| agpl-3.0 | 3,788,263,537,973,142,500 | 41.693182 | 76 | 0.721054 | false |
sealevelresearch/tide-wrangler | tide_wrangler/parsers/task_2000.py | 1 | 1695 | #!/usr/bin/env python
import pytz
import datetime
from .row import Row
__all__ = ['get_parser']
class Parser():
def __init__(self, f):
self._fobj = f
def get(self):
return generate_rows(self._fobj)
def get_parser(fobj, options):
return Parser(fobj)
def generate_rows(f):
for line in f.readlines()[20:]:
line = line.strip(' \n\r')
if len(line) == 0:
continue
(_, ignore, year, day_365, hour_decimal, height_cm,
_, _, _, _) = line.split()
if int(ignore) != 0:
continue
when = make_datetime(int(year), int(day_365), float(hour_decimal))
height_m = float(height_cm) / 100
yield Row(when, observed_sea_level=height_m)
def make_datetime(year, day_365, hour_decimal):
return make_day_datetime(year, day_365) + make_timedelta(hour_decimal)
def make_day_datetime(year, days_365):
"""
January 1st is represented by 2013, 1
Febuary 1st is represented by 2013, 32
>>> make_day_datetime(2013, 1)
datetime.datetime(2013, 1, 1, 0, 0, tzinfo=<UTC>)
>>> make_day_datetime(2013, 32)
datetime.datetime(2013, 2, 1, 0, 0, tzinfo=<UTC>)
"""
return (datetime.datetime(year, 1, 1, tzinfo=pytz.UTC) +
datetime.timedelta(days=days_365 - 1))
def make_timedelta(hour_decimal):
"""
>>> make_timedelta(0.016)
datetime.timedelta(0, 60)
"""
delta = datetime.timedelta(hours=hour_decimal)
return datetime.timedelta(seconds=my_round(delta.total_seconds(), 60))
def my_round(x, base):
"""
>>> my_round(59, 60)
60
>>> my_round(61, 60)
60
"""
return int(base * round(float(x) / base))
| mit | 8,514,717,235,296,300,000 | 21.905405 | 74 | 0.589381 | false |
ati-ozgur/KDD99ReviewArticle | HelperCodes/create_table_JournalAndArticleCounts.py | 1 | 1930 | import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
#df_journal = df.groupby('journal')["ID"]
dfJournalList = df.groupby(['journal'])['ID'].count().order(ascending=False)
isOdd = (dfJournalList.size % 2 == 1)
if (isOdd):
table_row_length = dfJournalList.size / 2 +1
else:
table_row_length = dfJournalList.size / 2
table_content_inside=""
for index in range(table_row_length):
journal_name_1column = dfJournalList.index[index]
journal_count_1column = dfJournalList[index]
second_column_index = index + table_row_length
if(second_column_index < dfJournalList.size):
journal_name_2column = dfJournalList.index[second_column_index]
journal_count_2column = dfJournalList[second_column_index]
else:
journal_name_2column = ""
journal_count_2column = ""
line = "{journal_name_1column} & {journal_count_1column} & {journal_name_2column} & {journal_count_2column} \\\\ \n".format(
journal_name_1column = journal_name_1column
,journal_count_1column = journal_count_1column
,journal_name_2column = journal_name_2column
,journal_count_2column = journal_count_2column
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table*}[!ht]
\\caption{ \\textbf{Journals and Article Counts} }
\\label{table-JournalAndArticleCounts}
\\centering
\\begin{adjustbox}{max width=\\textwidth}
\\normalsize
\\begin{tabular}{llll}
\\toprule
Journal Name & Article Count & Journal Name & Article Count \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{adjustbox}
\\end{table*}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
filename = "../latex/table-JournalAndArticleCounts.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
| mit | 6,558,627,946,973,043,000 | 25.081081 | 130 | 0.688601 | false |
PokeHunterProject/pogom-linux | pogom/pgoapi/__init__.py | 1 | 2430 | """
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
# from __future__ import absolute_import
from .exceptions import PleaseInstallProtobufVersion3
import pkg_resources
import logging
__title__ = 'pgoapi'
__version__ = '1.1.7'
__author__ = 'tjado'
__license__ = 'MIT License'
__copyright__ = 'Copyright (c) 2016 tjado <https://github.com/tejado>'
__patchedBy__ = 'Patched for 0.45.0 by the PokeHunter Project <https://github.com/PokeHunterProject>'
protobuf_exist = False
protobuf_version = "0"
try:
protobuf_version = pkg_resources.get_distribution("protobuf").version
protobuf_exist = True
except:
pass
if (not protobuf_exist) or (int(protobuf_version[:1]) < 3):
print int(protobuf_version[:1])
raise PleaseInstallProtobufVersion3()
from .pgoapi import PGoApi
from .rpc_api import RpcApi
from .auth import Auth
logging.getLogger("pgoapi").addHandler(logging.NullHandler())
logging.getLogger("rpc_api").addHandler(logging.NullHandler())
logging.getLogger("utilities").addHandler(logging.NullHandler())
logging.getLogger("auth").addHandler(logging.NullHandler())
logging.getLogger("auth_ptc").addHandler(logging.NullHandler())
logging.getLogger("auth_google").addHandler(logging.NullHandler())
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
| mit | -520,971,139,557,488,900 | 35.268657 | 101 | 0.767078 | false |
FJFranklin/wifi-py-rpi-car-controller | RTSim/RTRobot.py | 1 | 2700 | from RTSim import RTSim
class RTRobot(RTSim):
"""
RTRobot Controller for RTSim real-time robot simulation.
https://github.com/FJFranklin/wifi-py-rpi-car-controller/tree/master/RTSim
"""
def __init__(self, seconds=180, test_name='default'):
# usage: RTRobot (seconds, test_name)
# where test_name is one of 'default', 'random', 'TNT', 'CWC' or 'BSB'
# This is the Python version of the coursework 'Matlab Robot':
# In the following line, replace the number with your Student ID
id_number = 170000000;
RTSim.__init__(self, seconds, test_name, id_number)
def setup(self):
# setup() is called once at the beginning
self.target = self.get_target() # where we're trying to get to
# For example:
self.last_ping_time = 0 # see ping_receive()
self.last_ping_distance = -1
# To work out which of the trials we're running:
results_so_far = self.get_result()
test_name = results_so_far['Trial']
print('This trial is:', test_name)
def loop(self):
# loop() is called repeatedly
# For example:
currentTime = self.millis() / 1000
self.position = self.get_GPS() # roughly where we are
self.orientation = self.get_compass() # which direction we're looking
if currentTime > 4:
self.ping_send() # it won't actually send more often than every 0.1s
self.set_ping_angle(180)
self.set_wheel_speeds(-127, -126)
def ping_receive(self, distance):
# response to an self.ping_send()
# For example:
self.last_ping_time = self.millis() # the last time we received a ping [in milliseconds]
self.last_ping_distance = distance # distance measured (-ve if no echo)
if distance >= 0: # a -ve distance implies nothing seen
print('position=(', self.position[0], ',', self.position[1], '), orientation=', self.orientation, '; distance=', distance, sep='')
if __name__ == "__main__":
# Option to run from command line
import argparse
parser = argparse.ArgumentParser(description="RTRobot Coursework - Guide a two-wheeled robot round the map.")
parser.add_argument('--duration', help='How many seconds to run [40].', default=40, type=int)
parser.add_argument('--trial', help='Specify map type [default].', default='default', choices=['default', 'random', 'TNT', 'CWC', 'BSB'])
args = parser.parse_args()
R = RTRobot(args.duration, args.trial)
print(R.get_result())
| mit | -2,143,932,824,514,484,700 | 36.028169 | 146 | 0.590741 | false |
shreyasp/erpnext | erpnext/hr/doctype/salary_slip/salary_slip.py | 1 | 14795 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, flt, getdate, nowdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.accounts.utils import get_fiscal_year
from erpnext.setup.utils import get_company_currency
from erpnext.hr.utils import set_employee_name
from erpnext.hr.doctype.process_payroll.process_payroll import get_month_details
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
self.set_month_dates()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount:
self.update_component_row(struct_row, amount, key)
def update_component_row(self, struct_row, amount, key):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component
})
else:
component_row.amount = amount
def eval_condition_and_formula(self, d, data):
try:
if d.condition:
if not eval(d.condition, None, data):
return None
amount = d.amount
if d.amount_based_on_formula:
if d.formula:
amount = eval(d.formula, None, data)
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except:
frappe.throw(_("Error in formula or condition"))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
for d in self._salary_structure_doc.employees:
if d.employee == self.employee:
data.base, data.variable = d.base, d.variable
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for salary_component in salary_components:
data[salary_component.salary_component_abbr] = 0
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
self.set_month_dates()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def set_month_dates(self):
if self.month and not self.salary_slip_based_on_timesheet:
m = get_month_details(self.fiscal_year, self.month)
self.start_date = m['month_start_date']
self.end_date = m['month_end_date']
def check_sal_struct(self, joining_date, relieving_date):
st_name = frappe.db.sql("""select parent from `tabSalary Structure Employee`
where employee=%s
and parent in (select name from `tabSalary Structure`
where is_active = 'Yes'
and (from_date <= %s or from_date <= %s)
and (to_date is null or to_date >= %s or to_date >= %s))
""",(self.employee, self.start_date, joining_date, self.end_date, relieving_date))
if st_name:
if len(st_name) > 1:
frappe.msgprint(_("Multiple active Salary Structures found for employee {0} for the given dates")
.format(self.employee), title=_('Warning'))
return st_name and st_name[0][0] or ''
else:
self.salary_structure = None
frappe.throw(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
make_salary_slip(self._salary_structure_doc.name, self)
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
self.add_earning_for_hourly_wages(self._salary_structure_doc.salary_component)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, salary_component):
default_type = False
for data in self.earnings:
if data.salary_component == salary_component:
data.amount = self.hour_rate * self.total_working_hours
default_type = True
break
if not default_type:
earnings = self.append('earnings', {})
earnings.salary_component = salary_component
earnings.amount = self.hour_rate * self.total_working_hours
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not self.fiscal_year:
# if default fiscal year is not set, get from nowdate
self.fiscal_year = get_fiscal_year(nowdate())[0]
if not self.month:
self.month = "%02d" % getdate(nowdate()).month
self.set_month_dates()
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = self.calculate_lwp(holidays, working_days)
self.total_days_in_month = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if joining_date > getdate(self.start_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if relieving_date > start_date and relieving_date < getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where month = %s and fiscal_year = %s and docstatus != 2
and employee = %s and name != %s""",
(self.month, self.fiscal_year, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field):
for d in self.get(component_type):
if cint(d.depends_on_lwp) == 1 and not self.salary_slip_based_on_timesheet:
d.amount = rounded((flt(d.amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("amount", component_type))
elif not self.payment_days and not self.salary_slip_based_on_timesheet:
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
self.set(total_field, self.get(total_field) + flt(d.amount))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.gross_pay = flt(self.arrear_amount) + flt(self.leave_encashment_amount)
self.total_deduction = 0
self.sum_components('earnings', 'gross_pay')
self.sum_components('deductions', 'total_deduction')
self.net_pay = flt(self.gross_pay) - flt(self.total_deduction)
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
if(frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")):
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
subj = 'Salary Slip - from {0} to {1}, fiscal year {2}'.format(self.start_date, self.end_date, self.fiscal_year)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)], reference_doctype= self.doctype, reference_name= self.name)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
if self.journal_entry:
status = "Paid"
elif self.docstatus == 2:
status = "Cancelled"
return status
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "status", "Submitted")
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
| gpl-3.0 | -4,528,810,867,627,106,000 | 36.173367 | 176 | 0.693748 | false |
Konovalov-Nik/storyboard | storyboard/projects/models.py | 1 | 1893 | # Copyright 2011 Thierry Carrez <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=50, primary_key=True)
title = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class ProjectGroup(models.Model):
name = models.CharField(max_length=50, primary_key=True)
title = models.CharField(max_length=100)
members = models.ManyToManyField(Project)
def __unicode__(self):
return self.name
class Branch(models.Model):
BRANCH_STATUS = (
('M', 'master'),
('R', 'release'),
('S', 'stable'),
('U', 'unsupported'))
name = models.CharField(max_length=50)
short_name = models.CharField(max_length=20)
status = models.CharField(max_length=1, choices=BRANCH_STATUS)
release_date = models.DateTimeField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['release_date']
class Milestone(models.Model):
name = models.CharField(max_length=50)
branch = models.ForeignKey(Branch)
released = models.BooleanField(default=False)
undefined = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
| apache-2.0 | 9,213,235,775,598,121,000 | 28.578125 | 78 | 0.673006 | false |
OpenBfS/dokpool-plone | Plone/src/docpool.localbehavior/docpool/localbehavior/localbehavior.py | 1 | 1955 | from Acquisition import aq_inner
from docpool.localbehavior import MessageFactory as _
from plone.autoform import directives
from plone.autoform.interfaces import IFormFieldProvider
from plone.supermodel import model
from z3c.form.browser.checkbox import CheckBoxFieldWidget
from zope import schema
from zope.component import getMultiAdapter
from zope.interface import provider
from zope.interface import Interface
from zope.schema.interfaces import IContextAwareDefaultFactory
@provider(IContextAwareDefaultFactory)
def initializeLocalBehaviors(context):
dp_app_state = getMultiAdapter((context, context.REQUEST), name=u'dp_app_state')
return dp_app_state.effectiveAppsHere()
@provider(IFormFieldProvider)
class ILocalBehaviorSupport(model.Schema):
directives.widget(local_behaviors=CheckBoxFieldWidget)
local_behaviors = schema.List(
title=u'Behaviors',
description=_(
u'description_local_behaviors',
default=u'Select applications supported for this content,'
' changes will be applied after saving',
),
required=False,
defaultFactory=initializeLocalBehaviors,
value_type=schema.Choice(
title=u'Applications',
vocabulary="LocalBehaviors"),
)
class ILocalBehaviorSupporting(Interface):
"""Marker"""
class LocalBehaviorSupport(object):
def __init__(self, context):
self.context = context
def _get_local_behaviors(self):
return list(set(self.context.local_behaviors))
def _set_local_behaviors(self, value):
if isinstance(value, type([])) or (isinstance(value, type(tuple))):
value = list(set(value))
context = aq_inner(self.context)
if value is not None:
context.local_behaviors = list(set(value))
else:
context.local_behaviors = []
local_behaviors = property(_get_local_behaviors, _set_local_behaviors)
| gpl-3.0 | -6,956,147,756,234,758,000 | 32.135593 | 84 | 0.707417 | false |
rsmz/dtrange | test/test_calendar.py | 1 | 6615 | from dtrange.calendar import day_of_year, day_of_year_date, date_plus_days, ordinal
from datetime import datetime
import unittest
class TestCalendar(unittest.TestCase):
def test_day_of_year(self):
dt = datetime(2000, 2, 1)
doy = day_of_year(dt, 'julian')
expect = 32
self.assertEqual(expect, doy)
dt = datetime(2000, 3, 1)
doy = day_of_year(dt, 'julian')
expect = 61
self.assertEqual(expect, doy)
dt = datetime(2000, 12, 31)
doy = day_of_year(dt, 'julian')
expect = 366
self.assertEqual(expect, doy)
dt = datetime(2012, 2, 1)
doy = day_of_year(dt, 'gregorian')
expect = 32
self.assertEqual(expect, doy)
dt = datetime(2012, 3, 1)
doy = day_of_year(dt, 'gregorian')
expect = 61
self.assertEqual(expect, doy)
dt = datetime(2012, 12, 31)
doy = day_of_year(dt, 'gregorian')
expect = 366
self.assertEqual(expect, doy)
dt = datetime(2011, 12, 31)
doy = day_of_year(dt, 'gregorian')
expect = 365
self.assertEqual(expect, doy)
dt = datetime(2012, 3, 1)
doy = day_of_year(dt, '360')
expect = 61
self.assertEqual(expect, doy)
dt = datetime(2012, 12, 30)
doy = day_of_year(dt, '360')
expect = 360
self.assertEqual(expect, doy)
dt = datetime(2012, 3, 1)
doy = day_of_year(dt, 'noleap')
expect = 60
self.assertEqual(expect, doy)
dt = datetime(2012, 12, 31)
doy = day_of_year(dt, 'noleap')
expect = 365
self.assertEqual(expect, doy)
dt = datetime(2011, 2, 1)
doy = day_of_year(dt, 'leap')
expect = 32
self.assertEqual(expect, doy)
dt = datetime(2011, 3, 1)
doy = day_of_year(dt, 'leap')
expect = 61
self.assertEqual(expect, doy)
dt = datetime(2011, 12, 31)
doy = day_of_year(dt, 'leap')
expect = 366
self.assertEqual(expect, doy)
def test_day_of_year_date(self):
ymd = day_of_year_date(31, 2000, 'julian')
expect = (2000,1,31)
self.assertEqual(expect, ymd)
ymd = day_of_year_date(32, 2000, 'julian')
expect = (2000,2,1)
self.assertEqual(expect, ymd)
ymd = day_of_year_date(366, 2000, 'julian')
expect = (2000,12,31)
self.assertEqual(expect, ymd)
ymd = day_of_year_date(367, 2000, 'julian')
expect = (2001,1,1)
self.assertEqual(expect, ymd)
def test_date_plus_days(self):
d = datetime(2000,1,1)
n = 30
c = 'julian'
res = date_plus_days(d, n, c)
expect = datetime(2000,1,31)
self.assertEqual(expect, res)
d = datetime(2000,1,1)
n = 60
c = 'julian'
res = date_plus_days(d, n, c)
expect = datetime(2000,3,1)
self.assertEqual(expect, res)
d = datetime(2001,1,1)
n = 60
c = 'julian'
res = date_plus_days(d, n, c)
expect = datetime(2001,3,2)
self.assertEqual(expect, res)
d = datetime(2000,1,1)
n = 365
c = 'julian'
res = date_plus_days(d, n, c)
expect = datetime(2000,12,31)
self.assertEqual(expect, res)
d = datetime(2000,1,1)
n = 366
c = 'julian'
res = date_plus_days(d, n, c)
expect = datetime(2001,1,1)
self.assertEqual(expect, res)
d = datetime(2011,1,1)
n = 30
c = 'gregorian'
res = date_plus_days(d, n, c)
expect = datetime(2011,1,31)
self.assertEqual(expect, res)
d = datetime(2011,1,1)
n = 59
c = 'gregorian'
res = date_plus_days(d, n, c)
expect = datetime(2011,3,1)
self.assertEqual(expect, res)
d = datetime(2012,1,1)
n = 60
c = 'gregorian'
res = date_plus_days(d, n, c)
expect = datetime(2012,3,1)
self.assertEqual(expect, res)
d = datetime(2011,1,1)
n = 365
c = 'gregorian'
res = date_plus_days(d, n, c)
expect = datetime(2012,1,1)
self.assertEqual(expect, res)
d = datetime(2012,1,1)
n = 366
c = 'gregorian'
res = date_plus_days(d, n, c)
expect = datetime(2013,1,1)
self.assertEqual(expect, res)
d = datetime(2012,1,1)
n = 365
c = 'noleap'
res = date_plus_days(d, n, c)
expect = datetime(2013,1,1)
self.assertEqual(expect, res)
d = datetime(2012,1,1)
n = 366
c = 'noleap'
res = date_plus_days(d, n, c)
expect = datetime(2013,1,2)
self.assertEqual(expect, res)
d = datetime(2011,1,1)
n = 365
c = 'leap'
res = date_plus_days(d, n, c)
expect = datetime(2011,12,31)
self.assertEqual(expect, res)
d = datetime(2011,1,1)
n = 366
c = 'leap'
res = date_plus_days(d, n, c)
expect = datetime(2012,1,1)
self.assertEqual(expect, res)
d = datetime(2011,1,1)
n = 360
c = '360'
res = date_plus_days(d, n, c)
expect = datetime(2012,1,1)
self.assertEqual(expect, res)
def test_ordinal(self):
dt = datetime(1,1,1)
self.assertEqual(1, ordinal(dt, 'gregorian'))
self.assertEqual(1, ordinal(dt, 'julian'))
self.assertEqual(1, ordinal(dt, 'noleap'))
self.assertEqual(1, ordinal(dt, 'leap'))
self.assertEqual(1, ordinal(dt, '360'))
dt = datetime(2,1,1)
self.assertEqual(366, ordinal(dt, 'gregorian'))
self.assertEqual(366, ordinal(dt, 'julian'))
self.assertEqual(366, ordinal(dt, 'noleap'))
self.assertEqual(367, ordinal(dt, 'leap'))
self.assertEqual(361, ordinal(dt, '360'))
dt = datetime(5,1,1)
self.assertEqual(1462, ordinal(dt, 'gregorian'))
self.assertEqual(1462, ordinal(dt, 'julian'))
self.assertEqual(1461, ordinal(dt, 'noleap'))
self.assertEqual(1465, ordinal(dt, 'leap'))
self.assertEqual(1441, ordinal(dt, '360'))
dt = datetime(5,3,1)
self.assertEqual(1521, ordinal(dt, 'gregorian'))
self.assertEqual(1521, ordinal(dt, 'julian'))
self.assertEqual(1520, ordinal(dt, 'noleap'))
self.assertEqual(1525, ordinal(dt, 'leap'))
self.assertEqual(1501, ordinal(dt, '360'))
if '__main__' == __name__:
unittest.main(verbosity=2)
| gpl-3.0 | -9,199,504,948,931,522,000 | 27.390558 | 83 | 0.531973 | false |
samhatfield/mkdocs | mkdocs/tests/config/config_tests.py | 1 | 6957 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
import six
from mkdocs import config
from mkdocs.config import config_options
from mkdocs.exceptions import ConfigurationError
from mkdocs.tests.base import dedent
def ensure_utf(string):
return string.encode('utf-8') if six.PY2 else string
class ConfigTests(unittest.TestCase):
def test_missing_config_file(self):
def load_missing_config():
config.load_config(config_file='bad_filename.yaml')
self.assertRaises(ConfigurationError, load_missing_config)
def test_missing_site_name(self):
c = config.Config(schema=config.DEFAULT_SCHEMA)
c.load_dict({})
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'site_name')
self.assertEqual(str(errors[0][1]), 'Required configuration not provided.')
self.assertEqual(len(warnings), 0)
def test_empty_config(self):
def load_empty_config():
config.load_config(config_file='/dev/null')
self.assertRaises(ConfigurationError, load_empty_config)
def test_nonexistant_config(self):
def load_empty_config():
config.load_config(config_file='/path/that/is/not/real')
self.assertRaises(ConfigurationError, load_empty_config)
def test_invalid_config(self):
file_contents = dedent("""
- ['index.md', 'Introduction']
- ['index.md', 'Introduction']
- ['index.md', 'Introduction']
""")
config_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
config_file.write(ensure_utf(file_contents))
config_file.flush()
config_file.close()
self.assertRaises(
ConfigurationError,
config.load_config, config_file=open(config_file.name, 'rb')
)
finally:
os.remove(config_file.name)
def test_config_option(self):
"""
Users can explicitly set the config file using the '--config' option.
Allows users to specify a config other than the default `mkdocs.yml`.
"""
expected_result = {
'site_name': 'Example',
'pages': [
{'Introduction': 'index.md'}
],
}
file_contents = dedent("""
site_name: Example
pages:
- ['index.md', 'Introduction']
""")
config_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
config_file.write(ensure_utf(file_contents))
config_file.flush()
config_file.close()
result = config.load_config(config_file=config_file.name)
self.assertEqual(result['site_name'], expected_result['site_name'])
self.assertEqual(result['pages'], expected_result['pages'])
finally:
os.remove(config_file.name)
def test_theme(self):
mytheme = tempfile.mkdtemp()
custom = tempfile.mkdtemp()
configs = [
dict(), # default theme
{"theme": "readthedocs"}, # builtin theme
{"theme_dir": mytheme}, # custom only
{"theme": "cosmo", "theme_dir": custom}, # builtin and custom
]
abs_path = os.path.abspath(os.path.dirname(__file__))
mkdocs_dir = os.path.abspath(os.path.join(abs_path, '..', '..'))
theme_dir = os.path.abspath(os.path.join(mkdocs_dir, 'themes'))
search_asset_dir = os.path.abspath(os.path.join(
mkdocs_dir, 'assets', 'search'))
results = (
[os.path.join(theme_dir, 'mkdocs'), search_asset_dir],
[os.path.join(theme_dir, 'readthedocs'), search_asset_dir],
[mytheme, search_asset_dir],
[custom, os.path.join(theme_dir, 'cosmo'), search_asset_dir],
)
for config_contents, result in six.moves.zip(configs, results):
c = config.Config(schema=(
('theme', config_options.Theme(default='mkdocs')),
('theme_dir', config_options.ThemeDir(exists=True)),
))
c.load_dict(config_contents)
c.validate()
self.assertEqual(c['theme_dir'], result)
def test_default_pages(self):
tmp_dir = tempfile.mkdtemp()
try:
open(os.path.join(tmp_dir, 'index.md'), 'w').close()
open(os.path.join(tmp_dir, 'about.md'), 'w').close()
conf = config.Config(schema=config.DEFAULT_SCHEMA)
conf.load_dict({
'site_name': 'Example',
'docs_dir': tmp_dir
})
conf.validate()
self.assertEqual(['index.md', 'about.md'], conf['pages'])
finally:
shutil.rmtree(tmp_dir)
def test_default_pages_nested(self):
tmp_dir = tempfile.mkdtemp()
try:
open(os.path.join(tmp_dir, 'index.md'), 'w').close()
open(os.path.join(tmp_dir, 'about.md'), 'w').close()
os.makedirs(os.path.join(tmp_dir, 'sub'))
open(os.path.join(tmp_dir, 'sub', 'sub.md'), 'w').close()
os.makedirs(os.path.join(tmp_dir, 'sub', 'sub2'))
open(os.path.join(tmp_dir, 'sub', 'sub2', 'sub2.md'), 'w').close()
conf = config.Config(schema=config.DEFAULT_SCHEMA)
conf.load_dict({
'site_name': 'Example',
'docs_dir': tmp_dir
})
conf.validate()
self.assertEqual([
'index.md',
'about.md',
{'Sub': [
os.path.join('sub', 'sub.md'),
{'Sub2': [
os.path.join('sub', 'sub2', 'sub2.md'),
]}
]}
], conf['pages'])
finally:
shutil.rmtree(tmp_dir)
def test_doc_dir_in_site_dir(self):
j = os.path.join
test_configs = (
{'docs_dir': j('site', 'docs'), 'site_dir': 'site'},
{'docs_dir': 'docs', 'site_dir': '.'},
{'docs_dir': '.', 'site_dir': '.'},
{'docs_dir': 'docs', 'site_dir': ''},
{'docs_dir': '', 'site_dir': ''},
{'docs_dir': j('..', 'mkdocs', 'docs'), 'site_dir': 'docs'},
)
conf = {
'site_name': 'Example',
}
for test_config in test_configs:
patch = conf.copy()
patch.update(test_config)
# Same as the default schema, but don't verify the docs_dir exists.
c = config.Config(schema=(
('docs_dir', config_options.Dir(default='docs')),
('site_dir', config_options.SiteDir(default='site')),
))
c.load_dict(patch)
self.assertRaises(config_options.ValidationError, c.validate)
| bsd-2-clause | -6,825,038,675,139,912,000 | 33.270936 | 83 | 0.535144 | false |
scopatz/regolith | regolith/broker.py | 1 | 2663 | """API for accessing the metadata and file storage"""
from regolith.database import dump_database, open_dbs
from regolith.runcontrol import DEFAULT_RC, load_rcfile, filter_databases
from regolith.storage import store_client, push
def load_db(rc_file="regolithrc.json"):
"""Create a Broker instance from an rc file"""
rc = DEFAULT_RC
rc._update(load_rcfile(rc_file))
filter_databases(rc)
return Broker(rc)
class Broker:
"""Interface to the database and file storage systems
Examples
--------
>>> # Load the db
>>> db = Broker.from_rc()
>>> # Get a docment from the broker
>>> ergs =db['group']['ergs']
>>> # Store a file
>>> db.add_file(ergs, 'myfile', '/path/to/file/hello.txt')
>>> # Get a file from the store
>>> path = db.get_file_path(ergs, 'myfile')
"""
def __init__(self, rc=DEFAULT_RC):
self.rc = rc
# TODO: Lazy load these
with store_client(rc) as sclient:
self.store = sclient
rc.client = open_dbs(rc)
self._dbs = rc.client.dbs
self.md = rc.client.chained_db
self.db_client = rc.client
def add_file(self, document, name, filepath):
"""Add a file to a document in a collection.
Parameters
----------
document : dict
The document to add the file to
name : str
Name of the reference to the file
filepath : str
Location of the file on local disk
"""
output_path = self.store.copydoc(filepath)
if "files" not in document:
document["files"] = {}
document["files"][name] = output_path
for db in self.rc.databases:
dump_database(db, self.db_client, self.rc)
push(self.store.store, self.store.path)
@classmethod
def from_rc(cls, rc_file="regolithrc.json"):
"""Return a Broker instance"""
return load_db(rc_file)
def get_file_path(self, document, name):
""" Get a file from the file storage associated with the document and
name
Parameters
----------
document : dict
The document which stores the reference to the file
name : str
The name of the file stored (note that this can be different from
the filename itself)
Returns
-------
path : str or None
The file path, if not in the storage None
"""
if "files" in document:
return self.store.retrieve(document["files"][name])
else:
return None
def __getitem__(self, item):
return self.md[item]
| cc0-1.0 | -2,406,156,181,989,504,000 | 28.921348 | 77 | 0.575667 | false |
christodoulos/pycompgeom | pycompgeom/algorithms.py | 1 | 1904 | from primitives import *
from predicates import *
import random
def jarvis(points):
r0 = min(points)
hull = [r0]
r = r0
while True:
u = random.choice(points)
for t in points:
if cw(r, u, t) or collinear(r, u, t) and between(r, t, u):
u = t
if u == r0: break
else:
r = u
points.remove(r)
hull.append(r)
return hull
def find_bridge(poly1, poly2, upper=True):
max1, min2 = max(poly1.vertices), min(poly2.vertices)
i, j = poly1.index(max_p1), poly2.index(min_p2)
bridge_found = False
while not bridge_found:
if upper:
if not ccw(poly1[i], poly1[i+1], poly2[j]):
i += 1; i_changed = True
else: i_changed = False
if not cw(poly2[j], poly2[j-1], poly1[i]):
j -= 1; j_changed = True
else: j_changed = False
else:
if not cw(poly1[i], poly1[i-1], poly2[j]):
i -= 1; i_changed = True
else: i_changed = False
if not ccw(poly2[j], poly2[j+1], poly1[i]):
j -= 1; j_changed = True
else: j_changed = False
bridge_found = not i_changed and not j_changed
return Segment2(poly1[i], poly2[j])
def andrew(points, return_hull=True):
upper = []
lower = []
for point in sorted(points):
while len(upper) > 1 and ccwon(upper[-2], upper[-1], point):
upper.pop()
while len(lower) > 1 and cwon(lower[-2], lower[-1], point):
lower.pop()
upper.append(point)
lower.append(point)
if return_hull:
return lower[:-1]+ [x for x in reversed(upper[1:])]
else:
return upper, lower
def andipodal_pairs(points):
U, L = andrew(points, return_hull=False)
i, j = 0, len(L)-1
while i<len(U)-1 or j>0:
yield U[i], L[j]
if i == len(U)-1: j -= 1
elif j == 0: i += 1
elif (U[i+1].y-U[i].y) * (L[j].x-L[j-1].x) > \
(L[j].y-L[j-1].y) * (U[i+1].x-U[i].x):
i += 1
else: j -= 1
def diameter(points):
dlist = [((p.x-q.x)**2+(p.y-q.y)**2,(p,q)) \
for p,q in antipodal_pairs(points)]
diam, pair = max(dlist)
return pair
| gpl-3.0 | 9,201,103,693,256,576,000 | 24.052632 | 62 | 0.596113 | false |
paramite/blazar | climate/api/v2/app.py | 1 | 2477 | # Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.middleware import auth_token
from oslo.config import cfg
import pecan
from climate.api.v2 import hooks
from climate.api.v2 import middleware
from climate.openstack.common.middleware import debug
auth_opts = [
cfg.StrOpt('auth_strategy',
default='keystone',
help='The strategy to use for auth: noauth or keystone.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
CONF.import_opt('log_exchange', 'climate.config')
OPT_GROUP_NAME = 'keystone_authtoken'
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(),
hooks.RPCHook(),
]
# TODO(sbauza): Add stevedore extensions for loading hooks
if extra_hooks:
app_hooks.extend(extra_hooks)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.debug,
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
guess_content_type_from_ext=False
)
# WSGI middleware for debugging
if CONF.log_exchange:
app = debug.Debug.factory(pecan_config)(app)
# WSGI middleware for Keystone auth
# NOTE(sbauza): ACLs are always active unless for unittesting where
# enable_acl could be set to False
if pecan_config.app.enable_acl:
CONF.register_opts(auth_token.opts, group=OPT_GROUP_NAME)
keystone_config = dict(CONF.get(OPT_GROUP_NAME))
app = auth_token.AuthProtocol(app, conf=keystone_config)
return app
def make_app():
config = {
'app': {
'modules': ['climate.api.v2'],
'root': 'climate.api.root.RootController',
'enable_acl': True,
}
}
# NOTE(sbauza): Fill Pecan config and call modules' path app.setup_app()
app = pecan.load_app(config)
return app
| apache-2.0 | 363,380,826,227,969,860 | 28.843373 | 76 | 0.659265 | false |
Instanssi/Instanssi.org | Instanssi/admin_profile/forms.py | 1 | 3306 | # -*- coding: utf-8 -*-
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder
from django.contrib.auth.models import User
class InformationChangeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(InformationChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'',
'first_name',
'last_name',
'email',
ButtonHolder(
Submit('submit', 'Tallenna')
)
)
)
self.fields['email'].required = True
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class PasswordChangeForm(forms.Form):
old_pw = forms.CharField(
widget=forms.PasswordInput,
label='Vanha salasana',
help_text='Kirjoita vanha salasanasi turvallisuussyistä.')
new_pw = forms.CharField(
widget=forms.PasswordInput,
label='Uusi salasana',
help_text='Kirjoita uusi salasanasi. Tulee olla vähintään 8 merkkiä pitkä.')
new_pw_again = forms.CharField(
widget=forms.PasswordInput,
label='Uusi salasana uudelleen',
help_text='Kirjoita uusi salasanasi toistamiseen varmistukseksi.')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(PasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'',
'old_pw',
'new_pw',
'new_pw_again',
ButtonHolder(
Submit('submit', 'Tallenna')
)
)
)
def save(self):
password = self.cleaned_data['new_pw']
self.user.set_password(password)
self.user.save()
def clean_old_pw(self):
# Make sure this is valid
old = self.cleaned_data['old_pw']
if not self.user.check_password(old):
raise forms.ValidationError('Vanha salasana väärin!')
# Remember to return cleaned data
return old
def clean_new_pw(self):
pw = self.cleaned_data['new_pw']
if len(pw) < 8:
raise forms.ValidationError('Salasanan tulee olla vähintään 8 merkkiä pitkä!')
return pw
def clean_new_pw_again(self):
pw = self.cleaned_data['new_pw_again']
if len(pw) < 8:
raise forms.ValidationError('Salasanan tulee olla vähintään 8 merkkiä pitkä!')
return pw
def clean(self):
cleaned_data = super(PasswordChangeForm, self).clean()
# Make sure new pw fields match
pwa = cleaned_data.get('new_pw')
pwb = cleaned_data.get('new_pw_again')
if pwa != pwb:
msg = 'Salasana ei vastaa edelliseen kenttään annettua!'
self._errors["new_pw_again"] = self.error_class([msg])
del cleaned_data["new_pw_again"]
# Remember to return cleaned data
return cleaned_data
| mit | 8,177,352,354,657,648,000 | 31.86 | 90 | 0.550822 | false |
LinkageIO/LocusPocus | tests/test_Ontology.py | 1 | 1800 |
"""
Unit tests for Ontology
"""
import minus80 as m80
import locuspocus as lp
def test_init(testOnt):
try:
testOnt
return True
except NameError:
return False
def test_len(testOnt):
assert len(testOnt) == 10
def test_iter(testOnt):
for term in testOnt:
assert isinstance(term, lp.Term)
def test_access_loci(testOnt):
assert isinstance(testOnt.loci, lp.Loci)
def test_add_term():
try:
x = lp.Ontology("empty")
assert len(x) == 0
x.add_term(lp.Term("test",loci=[lp.Locus(1,1,1)]))
assert len(x) == 1
finally:
m80.delete("Ontology","empty")
def test_num_terms(testOnt):
assert testOnt.num_terms() == len(testOnt)
def test_num_terms_with_min(testOnt):
# should match 6,7,8,9,10
assert testOnt.num_terms(min_term_size=6) == 5
def test_num_terms_with_max(testOnt):
# should match 1,2,3,4,5
assert testOnt.num_terms(max_term_size=5) == 5
def test_get_item(testOnt):
assert isinstance(testOnt['term_1'], lp.Term)
def test_get_item_by_TID(testOnt):
# TIDs start at 1
assert isinstance(testOnt[1], lp.Term)
def test_terms_containing(testOnt):
# Locus 1,1,1 should be in all terms
assert len(testOnt.terms_containing([lp.Locus(1,1,1)])) == len(testOnt)
def test_terms_function(testOnt):
for term in testOnt.terms():
assert isinstance(term, lp.Term)
def test_terms_with_min(testOnt):
# should match 6,7,8,9,10
assert len(list(testOnt.terms(min_term_size=6))) == 5
def test_terms_with_max(testOnt):
# should match 1,2,3,4,5
assert len(list(testOnt.terms(max_term_size=5))) == 5
def test_summary(testOnt):
assert isinstance(testOnt.summary(),str)
def test_rand(testOnt):
assert isinstance(testOnt.rand(), lp.Term)
| mit | 7,979,349,418,811,028,000 | 23 | 76 | 0.650556 | false |
the-it/WS_THEbotIT | archive/online/2016/160711_replace_citations.py | 1 | 1373 | # -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
import re
import pywikibot
def add_zeros(number, digits):
number_str = str(number)
if number < 10:
for members in range(digits-1):
number_str = "0" + number_str
elif number < 100:
for members in range(digits-2):
number_str = "0" + number_str
elif number < 1000:
for members in range(digits-3):
number_str = "0" + number_str
return number_str
wiki = pywikibot.Site()
regex = re.compile("\{\{Zitierempfehlung\|Projekt=Karl Zeumer: ''Quellensammlung zur Geschichte der Deutschen Reichsverfassung in Mittelalter und Neuzeit''\.Tübingen: Verlag von J\.C\.B\. Mohr \(Paul Siebeck\), 1913\|Seite=(\d{1,3})\}\}")
for i in range(1, 563):
print(i)
page = pywikibot.Page(wiki, 'Seite:De Zeumer V2 {}.jpg'.format(add_zeros(i, 3)))
temp_text = page.text
if regex.search(temp_text):
if int(regex.search(temp_text).group(1)) != i:
temp_text = regex.sub("{{Zitierempfehlung|Projekt=Karl Zeumer: ''Quellensammlung zur Geschichte der Deutschen Reichsverfassung in Mittelalter und Neuzeit''.Tübingen: Verlag von J.C.B. Mohr (Paul Siebeck), 1913|Seite=" + str(i) +"}}", temp_text)
page.text = temp_text
page.save(summary='Zitierempfehlung korrigiert', botflag=True)
| mit | 4,581,316,253,179,235,300 | 39.323529 | 256 | 0.636032 | false |
mdauphin/pycvnode | pycvnode/connector.py | 1 | 3391 | import cv2
import numpy as np
class Connector(object):
class Direction:
OUTPUT = 1
INPUT = 2
def __init__(self,node,name,direction,type):
self.node = node
self.name = name
self.direction = direction
self.value = None
self.type = type
self.parser = ConnectorParser(self)
self.render = ConnectorRenderer(self)
def setValue(self,value):
self.value = self.parser.parse(value)
def generate(self):
return None
def evaluate(self):
raise Exception('Connector','Can not evaluate generic Connector')
class ConnectorInput(Connector):
def __init__(self,node,name,type):
self.connection = None
super( ConnectorInput, self ).__init__( node, name,
Connector.Direction.INPUT, type );
def generate(self):
if self.connection != None:
return self.connection.output_connector.generate()
if self.value != None:
if isinstance(self.value, str):
return "'%s'" % self.value
return str(self.value)
def evaluate(self):
if self.connection != None:
return self.connection.output_connector.evaluate()
elif self.value != None:
return self.value
else:
raise Exception('ConnectorInput','No connection no value to evaluate')
class ConnectorOutput(Connector):
_cpt = 0
def __init__(self,node,name,type):
self.varname = self.generate_uniq_var()
self.connections = []
super( ConnectorOutput, self ).__init__( node, name,
Connector.Direction.OUTPUT, type )
def generate_uniq_var(self):
ConnectorOutput._cpt += 1
return "var%d" % ( ConnectorOutput._cpt )
def generate(self):
return self.varname
def evaluate(self):
return self.node.evaluate()
class ConnectorParser(object):
def __init__(self,connector):
self.connector = connector
self.converter = {
'str' : self.toStr,
'int' : self.toInt,
'float' : self.toFloat,
'tuple' : self.toTuple,
}
def parse(self,value):
return self.converter[self.connector.type](value)
def toStr(self,value):
return value
def toInt(self,value):
return int(value)
def toFloat(self,value):
return foat(value)
def toTuple(self,value):
return eval(value)
class ConnectorRenderer(object):
def __init__(self,connector):
self.connector = connector
self.converter = {
'str' : self.toStr,
'int' : self.toStr,
'float' : self.toStr,
'tuple' : self.toStr,
'numpy.ndarray' : self.toImg,
}
def render(self):
return self.converter[self.connector.type](self.connector.evaluate())
def toImg(self, value ):
ret, buf = cv2.imencode( '.png', value )
return buf.tobytes()
def toStr(self,value):
return '<p>%s</p>' % value
class ConnectorJson(object):
def __init__(self,connector):
self.connector = connector;
def render(self):
#{ 'dir' : Direction.Input , 'name' : 'conIn' },
dir = 'Input'
if ( self.connector is ConnectorOutput ):
dir = 'Output'
return { 'dir': dir, 'name' : self.connector.name }
| gpl-2.0 | 670,151,130,487,789,400 | 27.737288 | 82 | 0.576821 | false |
openatv/enigma2 | lib/python/Components/VfdSymbols.py | 2 | 12566 | # -*- coding: utf-8 -*-
from twisted.internet import threads
from config import config
from enigma import eDBoxLCD, eTimer, iPlayableService, pNavigation, iServiceInformation
import NavigationInstance
from Tools.Directories import fileExists
from Components.ParentalControl import parentalControl
from Components.ServiceEventTracker import ServiceEventTracker
from Components.SystemInfo import SystemInfo
from boxbranding import getBoxType, getMachineBuild
from time import time
import Components.RecordingConfig
POLLTIME = 5 # seconds
def SymbolsCheck(session, **kwargs):
global symbolspoller, POLLTIME
if getBoxType() in ('alien5','osninopro','osnino','osninoplus','tmtwin4k','mbmicrov2','revo4k','force3uhd','wetekplay', 'wetekplay2', 'wetekhub', 'ixussone', 'ixusszero', 'mbmicro', 'e4hd', 'e4hdhybrid', 'dm7020hd', 'dm7020hdv2', '9910lx', '9911lx', '9920lx') or getMachineBuild() in ('dags7362' , 'dags73625', 'dags5','ustym4kpro','beyonwizv2','viper4k','sf8008','sf8008m','gbmv200','cc1'):
POLLTIME = 1
symbolspoller = SymbolsCheckPoller(session)
symbolspoller.start()
class SymbolsCheckPoller:
def __init__(self, session):
self.session = session
self.blink = False
self.led = "0"
self.timer = eTimer()
self.onClose = []
self.__event_tracker = ServiceEventTracker(screen=self,eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
})
def __onClose(self):
pass
def start(self):
if self.symbolscheck not in self.timer.callback:
self.timer.callback.append(self.symbolscheck)
self.timer.startLongTimer(0)
def stop(self):
if self.symbolscheck in self.timer.callback:
self.timer.callback.remove(self.symbolscheck)
self.timer.stop()
def symbolscheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(POLLTIME)
def JobTask(self):
self.Recording()
self.PlaySymbol()
self.timer.startLongTimer(POLLTIME)
def __evUpdatedInfo(self):
self.service = self.session.nav.getCurrentService()
if getMachineBuild() == 'u41':
self.Resolution()
self.Audio()
self.Crypted()
self.Teletext()
self.Hbbtv()
self.PauseSymbol()
self.PlaySymbol()
self.PowerSymbol()
self.Timer()
self.Subtitle()
self.ParentalControl()
del self.service
def Recording(self):
if fileExists("/proc/stb/lcd/symbol_circle"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_circle", "w").write("3")
else:
open("/proc/stb/lcd/symbol_circle", "w").write("0")
elif getBoxType() in ('alphatriple','mixosf5', 'mixoslumi', 'mixosf7', 'gi9196m', 'sf3038') and fileExists("/proc/stb/lcd/symbol_recording"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
elif getMachineBuild() == 'u41' and fileExists("/proc/stb/lcd/symbol_pvr2"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_pvr2", "w").write("1")
else:
open("/proc/stb/lcd/symbol_pvr2", "w").write("0")
elif getBoxType() in ('alien5','osninopro','wetekplay', 'wetekplay2', 'wetekhub', 'ixussone', 'ixusszero', '9910lx', '9911lx', 'osnino', 'osninoplus', '9920lx') and fileExists("/proc/stb/lcd/powerled"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("0")
elif getBoxType() in ('mbmicrov2','mbmicro', 'e4hd', 'e4hdhybrid') and fileExists("/proc/stb/lcd/powerled"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("1")
elif getBoxType() in ('dm7020hd', 'dm7020hdv2') and fileExists("/proc/stb/fp/led_set"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/fp/led_set", "w").write("0x00000000")
self.led = "1"
else:
open("/proc/stb/fp/led_set", "w").write("0xffffffff")
self.led = "0"
else:
open("/proc/stb/fp/led_set", "w").write("0xffffffff")
elif getMachineBuild() in ('dags7362' , 'dags73625', 'dags5') or getBoxType() in ('tmtwin4k','revo4k','force3uhd') and fileExists("/proc/stb/lcd/symbol_rec"):
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/symbol_rec", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/symbol_rec", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/symbol_rec", "w").write("0")
elif getMachineBuild() in ('sf8008','sf8008m','cc1','ustym4kpro','beyonwizv2','viper4k') and fileExists("/proc/stb/fp/ledpowercolor"):
import Screens.Standby
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/fp/ledpowercolor", "w").write("0")
self.led = "1"
else:
if Screens.Standby.inStandby:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledstandbycolor.value)
else:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledpowercolor.value)
self.led = "0"
elif self.led == "1":
if Screens.Standby.inStandby:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledstandbycolor.value)
else:
open("/proc/stb/fp/ledpowercolor", "w").write(config.usage.lcd_ledpowercolor.value)
else:
if not fileExists("/proc/stb/lcd/symbol_recording") or not fileExists("/proc/stb/lcd/symbol_record_1") or not fileExists("/proc/stb/lcd/symbol_record_2"):
return
recordings = len(NavigationInstance.instance.getRecordings(False,Components.RecordingConfig.recType(config.recording.show_rec_symbol_for_rec_types.getValue())))
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
if recordings == 1:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
elif recordings >= 2:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
open("/proc/stb/lcd/symbol_record_1", "w").write("0")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
def Subtitle(self):
if not fileExists("/proc/stb/lcd/symbol_smartcard") and not fileExists("/proc/stb/lcd/symbol_subtitle"):
return
subtitle = self.service and self.service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
subtitles = len(subtitlelist)
if fileExists("/proc/stb/lcd/symbol_subtitle"):
if subtitles > 0:
f = open("/proc/stb/lcd/symbol_subtitle", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_subtitle", "w")
f.write("0")
f.close()
else:
if subtitles > 0:
f = open("/proc/stb/lcd/symbol_smartcard", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_smartcard", "w")
f.write("0")
f.close()
else:
if fileExists("/proc/stb/lcd/symbol_subtitle"):
f = open("/proc/stb/lcd/symbol_subtitle", "w")
f.write("0")
f.close()
else:
f = open("/proc/stb/lcd/symbol_smartcard", "w")
f.write("0")
f.close()
def ParentalControl(self):
if not fileExists("/proc/stb/lcd/symbol_parent_rating"):
return
service = self.session.nav.getCurrentlyPlayingServiceReference()
if service:
if parentalControl.getProtectionLevel(service.toCompareString()) == -1:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("1")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
def PlaySymbol(self):
if not fileExists("/proc/stb/lcd/symbol_play"):
return
if SystemInfo["SeekStatePlay"]:
file = open("/proc/stb/lcd/symbol_play", "w")
file.write('1')
file.close()
else:
file = open("/proc/stb/lcd/symbol_play", "w")
file.write('0')
file.close()
def PauseSymbol(self):
if not fileExists("/proc/stb/lcd/symbol_pause"):
return
if SystemInfo["StatePlayPause"]:
file = open("/proc/stb/lcd/symbol_pause", "w")
file.write('1')
file.close()
else:
file = open("/proc/stb/lcd/symbol_pause", "w")
file.write('0')
file.close()
def PowerSymbol(self):
if not fileExists("/proc/stb/lcd/symbol_power"):
return
if SystemInfo["StandbyState"]:
file = open("/proc/stb/lcd/symbol_power", "w")
file.write('0')
file.close()
else:
file = open("/proc/stb/lcd/symbol_power", "w")
file.write('1')
file.close()
def Resolution(self):
if not fileExists("/proc/stb/lcd/symbol_hd"):
return
info = self.service and self.service.info()
if not info:
return ""
videosize = int(info.getInfo(iServiceInformation.sVideoWidth))
if videosize >= 1280:
f = open("/proc/stb/lcd/symbol_hd", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_hd", "w")
f.write("0")
f.close()
def Crypted(self):
if not fileExists("/proc/stb/lcd/symbol_scrambled"):
return
info = self.service and self.service.info()
if not info:
return ""
crypted = info.getInfo(iServiceInformation.sIsCrypted)
if crypted == 1:
f = open("/proc/stb/lcd/symbol_scrambled", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_scrambled", "w")
f.write("0")
f.close()
def Teletext(self):
if not fileExists("/proc/stb/lcd/symbol_teletext"):
return
info = self.service and self.service.info()
if not info:
return ""
tpid = int(info.getInfo(iServiceInformation.sTXTPID))
if tpid != -1:
f = open("/proc/stb/lcd/symbol_teletext", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_teletext", "w")
f.write("0")
f.close()
def Hbbtv(self):
if not fileExists("/proc/stb/lcd/symbol_epg"):
return
info = self.service and self.service.info()
if not info:
return ""
hbbtv = info.getInfoString(iServiceInformation.sHBBTVUrl)
if hbbtv != "":
f = open("/proc/stb/lcd/symbol_epg", "w")
f.write("1")
f.close()
else:
f = open("/proc/stb/lcd/symbol_epg", "w")
f.write("0")
f.close()
def Audio(self):
if not fileExists("/proc/stb/lcd/symbol_dolby_audio"):
return
audio = self.service.audioTracks()
if audio:
n = audio.getNumberOfTracks()
idx = 0
while idx < n:
i = audio.getTrackInfo(idx)
description = i.getDescription();
if "AC3" in description or "AC-3" in description or "DTS" in description:
f = open("/proc/stb/lcd/symbol_dolby_audio", "w")
f.write("1")
f.close()
return
idx += 1
f = open("/proc/stb/lcd/symbol_dolby_audio", "w")
f.write("0")
f.close()
def Timer(self):
if fileExists("/proc/stb/lcd/symbol_timer"):
timer = NavigationInstance.instance.RecordTimer.getNextRecordingTime()
if timer > 0:
open("/proc/stb/lcd/symbol_timer", "w").write("1")
else:
open("/proc/stb/lcd/symbol_timer", "w").write("0")
| gpl-2.0 | -1,859,345,489,814,616,800 | 32.87062 | 393 | 0.668073 | false |
arenadata/ambari | ambari-server/src/main/resources/stacks/ADH/1.0/services/PIG/package/scripts/pig_client.py | 1 | 1908 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
import os
from resource_management import *
from resource_management.libraries.functions import conf_select
from pig import pig
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
class PigClient(Script):
def configure(self, env):
import params
env.set_params(params)
pig()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class PigClientLinux(PigClient):
def get_stack_to_component(self):
return {"HDP": "hadoop-client"}
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
def install(self, env):
self.install_packages(env)
self.configure(env)
Execute(('tar', '-czf', '/usr/lib/pig/pig.tar.gz', '-C', '/usr/lib/pig/lib/', '.'), sudo = True)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class PigClientWindows(PigClient):
def install(self, env):
import params
if params.pig_home is None:
self.install_packages(env)
self.configure(env)
if __name__ == "__main__":
PigClient().execute()
| apache-2.0 | -5,231,814,969,549,273,000 | 29.285714 | 100 | 0.745283 | false |
ARM-software/armnn | python/pyarmnn/test/test_supported_backends.py | 1 | 1398 | # Copyright © 2020 Arm Ltd. All rights reserved.
# SPDX-License-Identifier: MIT
import os
import platform
import pytest
import pyarmnn as ann
@pytest.fixture()
def get_supported_backends_setup(shared_data_folder):
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
get_device_spec = runtime.GetDeviceSpec()
supported_backends = get_device_spec.GetSupportedBackends()
yield supported_backends
def test_ownership():
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
device_spec = runtime.GetDeviceSpec()
assert not device_spec.thisown
def test_to_string():
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
device_spec = runtime.GetDeviceSpec()
expected_str = "IDeviceSpec {{ supportedBackends: [" \
"{}" \
"]}}".format(', '.join(map(lambda b: str(b), device_spec.GetSupportedBackends())))
assert expected_str == str(device_spec)
def test_get_supported_backends_cpu_ref(get_supported_backends_setup):
assert "CpuRef" in map(lambda b: str(b), get_supported_backends_setup)
@pytest.mark.aarch64
class TestNoneCpuRefBackends:
@pytest.mark.parametrize("backend", ["CpuAcc"])
def test_get_supported_backends_cpu_acc(self, get_supported_backends_setup, backend):
assert backend in map(lambda b: str(b), get_supported_backends_setup)
| mit | 1,352,998,812,462,088,700 | 26.94 | 101 | 0.69864 | false |
ah-anssi/SecuML | SecuML/core/Classification/Configuration/DecisionTreeConfiguration.py | 1 | 6621 | # SecuML
# Copyright (C) 2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
from SecuML.core.Classification.Classifiers.DecisionTree import DecisionTree
from . import ClassifierConfFactory
from .ClassifierConfiguration import ClassifierConfiguration
from .TestConfiguration import TestConfFactory
class DecisionTreeConfiguration(ClassifierConfiguration):
def __init__(self, num_folds, sample_weight, families_supervision,
criterion, splitter, max_depth, min_samples_split, min_samples_leaf,
min_weight_fraction_leaf, max_features, max_leaf_nodes,
min_impurity_decrease,
test_conf,
logger=None):
ClassifierConfiguration.__init__(self, num_folds, sample_weight,
families_supervision,
test_conf=test_conf,
logger=logger)
self.model_class = DecisionTree
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
def getModelClassName(self):
return 'DecisionTree'
def generateSuffix(self):
suffix = ClassifierConfiguration.generateSuffix(self)
return suffix
def getParamGrid(self):
return None
def setBestValues(self, grid_search):
return
def getBestValues(self):
return None
@staticmethod
def fromJson(obj):
test_conf = TestConfFactory.getFactory().fromJson(obj['test_conf'])
conf = DecisionTreeConfiguration(obj['num_folds'], obj['sample_weight'],
obj['families_supervision'],
obj['criterion'],
obj['splitter'],
obj['max_depth'],
obj['min_samples_split'],
obj['min_samples_leaf'],
obj['min_weight_fraction_leaf'],
obj['max_features'],
obj['max_leaf_nodes'],
obj['min_impurity_decrease'],
test_conf)
return conf
def toJson(self):
conf = ClassifierConfiguration.toJson(self)
conf['__type__'] = 'DecisionTreeConfiguration'
conf['criterion'] = self.criterion
conf['splitter'] = self.splitter
conf['max_depth'] = self.max_depth
conf['min_samples_split'] = self.min_samples_split
conf['min_samples_leaf'] = self.min_samples_leaf
conf['min_weight_fraction_leaf'] = self.min_weight_fraction_leaf
conf['max_features'] = self.max_features
conf['max_leaf_nodes'] = self.max_leaf_nodes
conf['min_impurity_decrease'] = self.min_impurity_decrease
return conf
def probabilistModel(self):
return False
def semiSupervisedModel(self):
return False
def featureImportance(self):
return 'score'
@staticmethod
def generateParser(parser):
ClassifierConfiguration.generateParser(parser)
help_message = 'See the scikit-learn documentation.'
parser.add_argument('--criterion',
choices=['gini', 'entropy'],
default='gini',
help=help_message)
parser.add_argument('--splitter',
choices=['best', 'random'],
default='best',
help=help_message)
parser.add_argument('--max-depth',
type=int,
default=None,
help=help_message)
parser.add_argument('--min-samples-split',
type=int,
default=2,
help=help_message)
parser.add_argument('--min-samples-leaf',
type=int,
default=1,
help=help_message)
parser.add_argument('--min-weight-fraction-leaf',
type=float,
default=0,
help=help_message)
parser.add_argument('--max-features',
type=int,
default=None,
help=help_message)
parser.add_argument('--max-leaf_nodes',
type=int,
default=None,
help=help_message)
parser.add_argument('--min-impurity-decrease',
type=float,
default=0,
help=help_message)
@staticmethod
def generateParamsFromArgs(args):
params = ClassifierConfiguration.generateParamsFromArgs(args)
params['criterion'] = args.criterion
params['splitter'] = args.splitter
params['max_depth'] = args.max_depth
params['min_samples_split'] = args.min_samples_split
params['min_samples_leaf'] = args.min_samples_leaf
params['min_weight_fraction_leaf'] = args.min_weight_fraction_leaf
params['max_features'] = args.max_features
params['max_leaf_nodes'] = args.max_leaf_nodes
params['min_impurity_decrease'] = args.min_impurity_decrease
return params
ClassifierConfFactory.getFactory().registerClass('DecisionTreeConfiguration',
DecisionTreeConfiguration)
| gpl-2.0 | 600,809,728,340,241,900 | 40.38125 | 85 | 0.542214 | false |
mpatacchiola/pyERA | examples/ex_nao_head_imitation/head_pose_estimation.py | 1 | 8515 | #!/usr/bin/env python
import numpy as np
import tensorflow as tf
import cv2
import os.path
DEBUG = False
class CnnHeadPoseEstimator:
def __init__(self, tf_session):
""" Init the class
@param tf_session An external tensorflow session
"""
self._sess = tf_session
def print_allocated_variables(self):
""" Print all the Tensorflow allocated variables
"""
all_vars = tf.all_variables()
print("[DEEPGAZE] Printing all the Allocated Tensorflow Variables:")
for k in all_vars:
print(k.name)
def load_yaw_variables(self, YawFilePath):
""" Load varibles from a checkpoint file
@param YawFilePath Path to a valid checkpoint
"""
#It is possible to use the checkpoint file
#y_ckpt = tf.train.get_checkpoint_state(YawFilePath)
#.restore(self._sess, y_ckpt.model_checkpoint_path)
#For future use, allocating a fraction of the GPU
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) #Allocate only half of the GPU memory
if(os.path.isfile(YawFilePath)==False): raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the yaw file path is incorrect.')
tf.train.Saver(({"conv1_yaw_w": self.hy_conv1_weights, "conv1_yaw_b": self.hy_conv1_biases,
"conv2_yaw_w": self.hy_conv2_weights, "conv2_yaw_b": self.hy_conv2_biases,
"conv3_yaw_w": self.hy_conv3_weights, "conv3_yaw_b": self.hy_conv3_biases,
"dense1_yaw_w": self.hy_dense1_weights, "dense1_yaw_b": self.hy_dense1_biases,
"out_yaw_w": self.hy_out_weights, "out_yaw_b": self.hy_out_biases
})).restore(self._sess, YawFilePath)
def allocate_yaw_variables(self):
""" Allocate variables in memory
"""
self._num_labels = 1
# Input data [batch_size, image_size, image_size, channels]
self.tf_yaw_input_vector = tf.placeholder(tf.float32, shape=(64, 64, 3))
# Variables.
#Conv layer
#[patch_size, patch_size, num_channels, depth]
self.hy_conv1_weights = tf.Variable(tf.truncated_normal([3, 3, 3, 64], stddev=0.1))
self.hy_conv1_biases = tf.Variable(tf.zeros([64]))
#Conv layer
#[patch_size, patch_size, depth, depth]
self.hy_conv2_weights = tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.1))
self.hy_conv2_biases = tf.Variable(tf.random_normal(shape=[128]))
#Conv layer
#[patch_size, patch_size, depth, depth]
self.hy_conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 128, 256], stddev=0.1)) #was[3, 3, 128, 256]
self.hy_conv3_biases = tf.Variable(tf.random_normal(shape=[256]))
#Dense layer
#[ 5*5 * previous_layer_out , num_hidden] wd1
#here 5*5 is the size of the image after pool reduction (divide by half 3 times)
self.hy_dense1_weights = tf.Variable(tf.truncated_normal([8 * 8 * 256, 256], stddev=0.1)) #was [5*5*256, 1024]
self.hy_dense1_biases = tf.Variable(tf.random_normal(shape=[256]))
#Dense layer
#[ , num_hidden] wd2
#self.hy_dense2_weights = tf.Variable(tf.truncated_normal([256, 256], stddev=0.01))
#self.hy_dense2_biases = tf.Variable(tf.random_normal(shape=[256]))
#Output layer
self.hy_out_weights = tf.Variable(tf.truncated_normal([256, self._num_labels], stddev=0.1))
self.hy_out_biases = tf.Variable(tf.random_normal(shape=[self._num_labels]))
# dropout (keep probability)
#self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# Model.
def model(data):
X = tf.reshape(data, shape=[-1, 64, 64, 3])
if(DEBUG == True): print("SHAPE X: " + str(X.get_shape()))
# Convolution Layer 1
conv1 = tf.tanh(tf.nn.bias_add(tf.nn.conv2d(X, self.hy_conv1_weights, strides=[1, 1, 1, 1], padding='SAME'),self.hy_conv1_biases))
if(DEBUG == True): print("SHAPE conv1: " + str(conv1.get_shape()))
# Max Pooling (down-sampling)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if(DEBUG == True): print("SHAPE pool1: " + str(pool1.get_shape()))
# Apply Normalization
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Apply Dropout
#norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer 2
conv2 = tf.tanh(tf.nn.bias_add(tf.nn.conv2d(norm1, self.hy_conv2_weights, strides=[1, 1, 1, 1], padding='SAME'),self.hy_conv2_biases))
if(DEBUG == True): print("SHAPE conv2: " + str(conv2.get_shape()))
# Max Pooling (down-sampling)
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if(DEBUG == True): print("SHAPE pool2: " + str(pool2.get_shape()))
# Apply Normalization
norm2 = tf.nn.lrn(pool2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Apply Dropout
#norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer 3
conv3 = tf.tanh(tf.nn.bias_add(tf.nn.conv2d(norm2, self.hy_conv3_weights, strides=[1, 1, 1, 1], padding='SAME'),self.hy_conv3_biases))
if(DEBUG == True): print("SHAPE conv3: " + str(conv3.get_shape()))
# Max Pooling (down-sampling)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if(DEBUG == True): print("SHAPE pool3: " + str(pool3.get_shape()))
# Apply Normalization
norm3 = tf.nn.lrn(pool3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Fully connected layer 4
dense1 = tf.reshape(norm3, [-1, self.hy_dense1_weights.get_shape().as_list()[0]]) # Reshape conv3
if(DEBUG == True): print("SHAPE dense1: " + str(dense1.get_shape()))
dense1 = tf.tanh(tf.matmul(dense1, self.hy_dense1_weights) + self.hy_dense1_biases)
#Fully connected layer 5
#dense2 = tf.tanh(tf.matmul(dense1, self.hy_dense2_weights) + self.hy_dense2_biases)
#if(DEBUG == True): print("SHAPE dense2: " + str(dense2.get_shape()))
#Output layer 6
out = tf.tanh(tf.matmul(dense1, self.hy_out_weights) + self.hy_out_biases)
if(DEBUG == True): print("SHAPE out: " + str(out.get_shape()))
return out
# Get the result from the model
self.cnn_output = model(self.tf_yaw_input_vector)
def return_yaw(self, image):
""" Return the yaw angle associated with the input image.
@param image It is a colour image. It must be >= 64 pixel
"""
#Uncomment if you want to see the image
#cv2.imshow('image',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
h, w, d = image.shape
#check if the image has the right shape
if(h == w and h==64 and d==3):
image_normalised = np.add(image, -127) #normalisation of the input
feed_dict = {self.tf_yaw_input_vector : image_normalised}
yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict)
yaw_vector = np.multiply(yaw_raw, 100.0)
#yaw = yaw_raw #* 100 #cnn out is in range [-1, +1] --> [-100, + 100]
return yaw_vector
#If the image is > 64 pixel then resize it
if(h == w and h>64 and d==3):
image_resized = cv2.resize(image, (64, 64), interpolation = cv2.INTER_AREA)
image_normalised = np.add(image_resized, -127) #normalisation of the input
feed_dict = {self.tf_yaw_input_vector : image_normalised}
yaw_raw = self._sess.run([self.cnn_output], feed_dict=feed_dict)
yaw_vector = np.multiply(yaw_raw, 100.0) #cnn-out is in range [-1, +1] --> [-100, + 100]
return yaw_vector
#wrong shape
if(h != w or w<64 or h<64):
raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input has wrong shape. Height and Width must be >= 64 pixel')
#wrong number of channels
if(d!=3):
raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the image given as input does not have 3 channels, this function accepts only colour images.')
| mit | 2,551,760,302,004,005,000 | 45.785714 | 158 | 0.582032 | false |
jrichte43/ProjectEuler | Problem-0345/solutions.py | 1 | 2222 |
__problem_title__ = "Matrix Sum"
__problem_url___ = "https://projecteuler.net/problem=345"
__problem_description__ = "We define the Matrix Sum of a matrix as the maximum sum of matrix " \
"elements with each element being the only one in his row and column. " \
"For example, the Matrix Sum of the matrix below equals 3315 ( = 863 + " \
"383 + 343 + 959 + 767): 7 53 183 439 497 563 79 973 287 63 169 583 " \
"627 343 773 943 473 103 699 303 Find the Matrix Sum of: 7 53 183 439 " \
"863 497 383 563 79 973 287 63 343 169 583 627 343 773 959 943 767 473 " \
"103 699 303 957 703 583 639 913 447 283 463 29 23 487 463 993 119 883 " \
"327 493 423 159 743 217 623 3 399 853 407 103 983 89 463 290 516 212 " \
"462 350 960 376 682 962 300 780 486 502 912 800 250 346 172 812 350 " \
"870 456 192 162 593 473 915 45 989 873 823 965 425 329 803 973 965 " \
"905 919 133 673 665 235 509 613 673 815 165 992 326 322 148 972 962 " \
"286 255 941 541 265 323 925 281 601 95 973 445 721 11 525 473 65 511 " \
"164 138 672 18 428 154 448 848 414 456 310 312 798 104 566 520 302 " \
"248 694 976 430 392 198 184 829 373 181 631 101 969 613 840 740 778 " \
"458 284 760 390 821 461 843 513 17 901 711 993 293 157 274 94 192 156 " \
"574 34 124 4 878 450 476 712 914 838 669 875 299 823 329 699 815 559 " \
"813 459 522 788 168 586 966 232 308 833 251 631 107 813 883 451 509 " \
"615 77 281 613 459 205 380 274 302 35 805"
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | -5,434,478,510,974,057,000 | 51.904762 | 100 | 0.536004 | false |
skdaccess/skdaccess | skdaccess/engineering/la/generic/stream.py | 2 | 3472 | # The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Standard library imports
from collections import OrderedDict
from io import StringIO
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party imports
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
import pandas as pd
class DataFetcher(DataFetcherStream):
"""
Class for handling data requests to data.lacity.org
"""
def __init__(self, endpoint, parameters, label, verbose=False, app_token = None, **pandas_kwargs):
"""
Initialize Data Fetcher for accessing data.lacity.org
@param endpoint: Data endpoint string
@param parameters: Parameters to use when retrieving dta
@param label: Label of pandas dataframe
@param verbose: Print out extra information
@param app_token: Application token to use to avoid throttling issues
@param date_columns
@param pandas_kwargs: Any additional key word arguments are passed to pandas.read_csv
"""
self.base_url = 'https://data.lacity.org/resource/'
self.base_url_and_endpoint = self.base_url + endpoint + '.csv?'
self.parameters = parameters
self.label = label
self.app_token = app_token
self.pandas_kwargs = pandas_kwargs
if '$$app_token' in parameters:
raise RuntimeError("Use app_token option in constructor instead of manually " +
"adding it into the the parameters")
if app_token != None:
self.parameters['$$app_token'] = app_token
super(DataFetcher, self).__init__([], verbose)
def output(self):
"""
Retrieve data from data.lacity.org
@return Table wrapper of containing specified data
"""
data_dict = OrderedDict()
url_query = self.base_url_and_endpoint + urlencode(self.parameters)
with urlopen(url_query) as remote_resource:
raw_string = remote_resource.read().decode()
string_data = StringIO(raw_string)
data_dict[self.label] = pd.read_csv(string_data, **self.pandas_kwargs)
return TableWrapper(data_dict)
| mit | -1,057,145,586,396,667,600 | 38.908046 | 102 | 0.702477 | false |
saullocastro/pyNastran | pyNastran/converters/dev/ansys/ansys.py | 1 | 5748 | from numpy import zeros, array
class Ansys(object):
def __init__(self, log=None, debug=False):
pass
def read_ansys(self, ansys_filename):
with open(ansys_filename, 'r') as ansys_file:
lines = ansys_file.readlines()
nodes = []
elements = {}
i = 0
nlines = len(lines)
while i < nlines:
line = lines[i].strip()
if line.startswith(r'/nolist'):
i += 4
# line = (1i9,3e20.9e3)
snodes = []
i += 1
line = lines[i]
nnodes = 0
while not line.startswith('-1'):
#print('a =', line)
#snode = [float(val) for val in line.strip().split()[1:]]
snode = line.strip().split()[1:]
if len(snode) != 3:
print(snode)
print(line)
print(lines[i])
print(lines[i-1])
print(lines[i-2])
asdf1
snodes.append(snode)
line = lines[i]
#print(line)
i += 1
nnodes += 1
#print(snodes[:5])
#nodes = array(snodes, dtype='float32')
print('****%r' % line)
# nnodes = 793310
#asdf2
#line = lines[i]
#print(line)
i -= 1
#asdf
elif line.startswith('/wb,elem,start'):
i += 1
line = lines[i]
while line.startswith('/com'):
i += 1
et_line = lines[i].strip()
fmt_line = lines[i+2].strip()
i += 3
line = lines[i]
if fmt_line == '(19i9)':
# eblock,19,solid,,71892
while not line.startswith('-1'):
# 1 1 1 1 0 0 0 0 10 0 697401 1297419 1304724 1297455 1302783 2097856 2097997 2097853 2097855
# 2109421 2097995
# 27 27 27 27 0 0 0 0 10 0 387759 631841 659167 639072 631842 675592 723723 675588 675585
# 675599 675595
line = lines[i].strip() + lines[i+1].strip()
i += 2
print(line)
sline = line.split()
a = sline[0]
b = sline[1]
c = sline[2]
d = sline[3]
assert a == b, 'a=%r b=%r c=%r d=%r' % (a, b, c, d)
assert a == c, 'a=%r b=%r c=%r d=%r' % (a, b, c, d)
assert a == d, 'a=%r b=%r c=%r d=%r' % (a, b, c, d)
e = sline[3]
f = sline[4]
g = sline[5]
h = sline[6]
assert e == f, 'e=%r f=%r g=%r h=%r' % (e, f, g, h)
#asdf
else:
raise NotImplementedError(fmt_line)
print(line)
asdf
else:
if line.startswith('/'):
print(line)
i += 1
def main():
model = Ansys()
ansys_filename = 'ds.dat'
model.read_ansys(ansys_filename)
if __name__ == '__main__': # pragma: no cover
main()
"""
/com,*********** Create Remote Point "Internal Remote Point 39" ***********
! -------- Remote Point Used by "Fixed - Line Body To EndCap 14054021-1 d" --------
*set,_npilot,803315
_npilot474=_npilot
et,332,170
type,332
real,332
mat,332
keyo,332,2,1 ! don't fix pilot node
keyo,332,4,0 ! MPC for all DOF's
tshape,pilo
en,501901,803315 ! create pilot node for rigid link
tshape
en,501902,803315,127827
/com,*********** Create Remote Point "Internal Remote Point 40" ***********
! -------- Remote Point Used by "Fixed - Line Body To EndCap 14054021-1 d" --------
*set,tid,334
*set,cid,333
et,cid,175
et,tid,170
keyo,tid,2,1 ! Don't fix the pilot node
keyo,tid,4,111111
keyo,cid,12,5 ! Bonded Contact
keyo,cid,4,0 ! Rigid CERIG style load
keyo,cid,2,2 ! MPC style contact
mat,333
real,333
type,333
en,501903,418114
en,501904,418115
en,501905,418116
en,501906,418117
en,501907,418118
en,501908,418119
en,501909,418120
en,501910,418121
en,501911,418122
en,501912,418123
en,501913,418124
en,501914,427511
en,501915,427512
en,501916,427518
en,501917,427524
en,501918,427528
en,501919,427533
en,501920,427539
en,501921,427544
en,501922,427551
en,501923,427562
en,501924,427569
*set,_npilot,803316
_npilot475=_npilot
type,tid
mat ,cid
real,cid
tshape,pilo
en,501925,_npilot
tshape
"""
"""
et,2,187
et,27,187 # element, group 27, element_type=187 -> tet10
et,30,188
etype nastran_name
187 tet10
186 hexa20
188 beam
eblock,19,solid,,213
eblock,19,solid,,8
#----------------------------------------------------------------
et,_jid,184
et,tid,170
et,cid,174
keyo,tid,2,1 ! Don't fix the pilot node
keyo,tid,4,111111
keyo,cid,12,5 ! Bonded Contact
keyo,cid,4,2 ! Rigid CERIG style load
keyo,cid,2,2 ! MPC style contact
eblock,10,,,16
""" | lgpl-3.0 | -8,193,591,645,404,051,000 | 28.634021 | 200 | 0.428323 | false |
hyperspy/hyperspyUI | hyperspyui/modelwrapper.py | 1 | 8552 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Tue Nov 04 16:25:54 2014
@author: Vidar Tonaas Fauske
"""
from qtpy import QtCore
#from hyperspy.model import Model
import hyperspy.models.eelsmodel
from .actionable import Actionable
from functools import partial
from hyperspyui.widgets.stringinput import StringInputDialog
# TODO: Add smartfit for EELSModel
def tr(text):
return QtCore.QCoreApplication.translate("ModelWrapper", text)
class ModelWrapper(Actionable):
added = QtCore.Signal((object, object), (object,))
removed = QtCore.Signal((object, object), (object,))
def __init__(self, model, signal_wrapper, name):
super(ModelWrapper, self).__init__()
self.model = model
self.signal = signal_wrapper
self.name = name
if self.signal.signal is not self.model.signal:
raise ValueError("SignalWrapper doesn't match model.signal")
self.components = {}
self.update_components()
self.fine_structure_enabled = False
# Default actions
self.add_action('plot', tr("&Plot"), self.plot)
self.add_action('fit', tr("&Fit"), self.fit)
self.add_action('multifit', tr("&Multifit"), self.multifit)
self.add_action('set_signal_range', tr("Set signal &range"),
self.set_signal_range)
if isinstance(self.model, hyperspy.models.eelsmodel.EELSModel):
self.add_action('lowloss', tr("Set low-loss"), self.set_lowloss)
self.add_action('fine_structure', tr("Enable fine &structure"),
self.toggle_fine_structure)
f = partial(self.signal.remove_model, self)
self.add_action('delete', tr("&Delete"), f)
def plot(self):
self.signal.keep_on_close = True
self.model.plot()
self.signal.keep_on_close = False
self.signal.update_figures()
self.signal.signal_plot.setProperty('hyperspyUI.ModelWrapper', self)
def update_plot(self):
self.model.update_plot()
def record_code(self, code):
self.signal.mainwindow.record_code("model = ui.get_selected_model()")
self.signal.mainwindow.record_code(code)
def _args_for_record(self, args, kwargs):
argstr = str(args)[1:-1]
kwargstr = str(kwargs)[1:-1]
kwargstr = kwargstr.replace(": ", "=")
if argstr and kwargstr:
return ", ".join((argstr, kwargstr))
else:
return argstr + kwargstr
def fit(self, *args, **kwargs):
self.signal.keep_on_close = True
self.model.fit(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.fit(%s)" %
self._args_for_record(args, kwargs))
def multifit(self, *args, **kwargs):
self.signal.keep_on_close = True
self.model.multifit(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.multifit(%s)" %
self._args_for_record(args, kwargs))
def smartfit(self, *args, **kwargs):
if hasattr(self.model, 'smartfit'):
self.signal.keep_on_close = True
self.model.smartfit(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.smartfit(%)" %
self._args_for_record(args, kwargs))
def fit_component(self, component):
# This is a non-blocking call, which means the normal keep_on_close +
# update_figures won't work. To make sure we keep our figures,
# we force a plot first if it is not active already.
if not self.model.signal._plot.is_active:
self.plot()
self.model.fit_component(component)
self.record_code("model.fit_component(%s)" % component.name)
def set_signal_range(self, *args, **kwargs):
self.signal.keep_on_close = True
self.model.set_signal_range(*args, **kwargs)
self.signal.keep_on_close = False
self.signal.update_figures()
self.record_code("model.set_signal_range(%s)" %
self._args_for_record(args, kwargs))
def set_lowloss(self, signal=None):
if signal is None:
signal = self.signal.mainwindow.select_x_signals(
1, ['Select low-loss'])
if signal is None:
return
self.model.lowloss = signal.signal
self.record_code("model.set_lowloss(low_loss_signal)")
def toggle_fine_structure(self):
if not isinstance(self.model, hyperspy.models.eelsmodel.EELSModel):
raise TypeError(
tr("Model is not EELS model. Can not toggle fine structure"))
if self.fine_structure_enabled:
self.model.disable_fine_structure()
self.actions['fine_structure'].setText(
tr("Enable fine &structure"))
self.record_code("model.disable_fine_structure()")
else:
self.model.enable_fine_structure()
self.actions['fine_structure'].setText(
tr("Disable fine &structure"))
self.record_code("model.enable_fine_structure()")
self.fine_structure_enabled = not self.fine_structure_enabled
def update_components(self):
"""
Updates internal compoenent list to match model's list (called e.g.
after console execute and in constructor)
"""
# Add missing
for c in self.model:
if c.name not in list(self.components.keys()):
self.components[c.name] = c
self.component_added(c)
# Remove lingering
ml = [c.name for c in self.model]
rm = [cn for cn in self.components.keys() if cn not in ml]
for n in rm:
c = self.components.pop(n)
self.component_removed(c)
def add_component(self, component):
if isinstance(component, type):
nec = ['EELSCLEdge', 'Spline', 'ScalableFixedPattern']
if component.__name__ in nec:
raise TypeError(
tr("Component of type %s currently not supported")
% component)
elif component.__name__ == 'Expression':
dlg = StringInputDialog(prompt="Enter expression:")
expression = dlg.prompt_modal(rejection=None)
if expression:
component = component(expression, 'Expression')
else:
return
else:
component = component()
added = False
if component not in self.model:
self.model.append(component)
added = True
self.record_code("model.append(%s)" % component.name)
if component.name not in self.components:
self.components[component.name] = component
added = True
if added:
self.component_added(component)
def remove_component(self, component):
removed = False
if component in self.model:
self.model.remove(component)
self.record_code("model.remove(%s)" % component.name)
removed = True
if component.name in self.components:
self.components.pop(component.name)
removed = True
if removed:
self.component_removed(component)
def component_added(self, component):
self.update_plot()
self.added[object, object].emit(component, self)
self.added[object].emit(component)
def component_removed(self, component):
self.update_plot()
self.removed[object, object].emit(component, self)
self.removed[object].emit(component)
| gpl-3.0 | 8,070,357,070,856,363,000 | 36.840708 | 77 | 0.605005 | false |
douglas/toxiproxy-python | toxiproxy/server.py | 1 | 3386 | # coding: utf-8
from future.utils import raise_with_traceback, viewitems, listvalues
from .api import APIConsumer
from .proxy import Proxy
from .exceptions import ProxyExists
from .utils import can_connect_to
class Toxiproxy(object):
""" Represents a Toxiproxy server """
def proxies(self):
""" Returns all the proxies registered in the server """
proxies = APIConsumer.get("/proxies").json()
proxies_dict = {}
for name, values in viewitems(proxies):
# Lets create a Proxy object to hold all its data
proxy = Proxy(**values)
# Add the new proxy to the toxiproxy proxies collection
proxies_dict.update({name: proxy})
return proxies_dict
def destroy_all(self):
proxies = listvalues(self.proxies())
for proxy in proxies:
self.destroy(proxy)
def get_proxy(self, proxy_name):
""" Retrive a proxy if it exists """
proxies = self.proxies()
if proxy_name in proxies:
return proxies[proxy_name]
else:
return None
def running(self):
""" Test if the toxiproxy server is running """
return can_connect_to(APIConsumer.host, APIConsumer.port)
def version(self):
""" Get the toxiproxy server version """
if self.running() is True:
return APIConsumer.get("/version").content
else:
return None
def reset(self):
""" Re-enables all proxies and disables all toxics. """
return bool(APIConsumer.post("/reset"))
def create(self, upstream, name, listen=None, enabled=None):
""" Create a toxiproxy proxy """
if name in self.proxies():
raise_with_traceback(ProxyExists("This proxy already exists."))
# Lets build a dictionary to send the data to the Toxiproxy server
json = {
"upstream": upstream,
"name": name
}
if listen is not None:
json["listen"] = listen
else:
json["listen"] = "127.0.0.1:0"
if enabled is not None:
json["enabled"] = enabled
proxy_info = APIConsumer.post("/proxies", json=json).json()
proxy_info["api_consumer"] = APIConsumer
# Lets create a Proxy object to hold all its data
proxy = Proxy(**proxy_info)
return proxy
def destroy(self, proxy):
""" Delete a toxiproxy proxy """
if isinstance(proxy, Proxy):
return proxy.destroy()
else:
return False
def populate(self, proxies):
""" Create a list of proxies from an array """
populated_proxies = []
for proxy in proxies:
existing = self.get_proxy(proxy["name"])
if existing is not None and (existing.upstream != proxy["upstream"] or existing.listen != proxy["listen"]):
self.destroy(existing)
existing = None
if existing is None:
proxy_instance = self.create(**proxy)
populated_proxies.append(proxy_instance)
return populated_proxies
def update_api_consumer(self, host, port):
""" Update the APIConsumer host and port """
APIConsumer.host = host
APIConsumer.port = port
APIConsumer.base_url = "http://%s:%s" % (host, port)
| mit | -5,460,193,500,709,897,000 | 27.453782 | 119 | 0.582989 | false |
studentofdata/market_share_mccvb | docs/conf.py | 1 | 7863 | # -*- coding: utf-8 -*-
#
# market_share_analysis documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'market_share_mccvb'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'market_share_mccvbdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'market_share_mccvb.tex',
u'market_share_mccvb Documentation',
u"Robert Row", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'market_share_mccvb', u'market_share_mccvb Documentation',
[u"Robert Row"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'market_share_mccvb', u'market_share_mccvb Documentation',
u"Robert Row", 'market_share_mccvb',
'A short description of the project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| mit | 5,821,172,262,195,132,000 | 31.093878 | 80 | 0.697317 | false |
robket/BioScripts | alignment.py | 1 | 9138 | import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import toimage
from collections import defaultdict, Counter
from types import SimpleNamespace
from PIL import ImageDraw
# This color table is sourced from https://github.com/trident01/BioExt-1/blob/master/AlignmentImage.java
LIGHT_GRAY = 196
FIXED_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"A": [255, 0, 0],
"C": [255, 255, 0],
"T": [0, 255, 0],
"G": [190, 0, 95],
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
GRAY_GAPS_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
BLACK_COLOR_TABLE = defaultdict(lambda: [0, 0, 0])
class Alignment:
def __init__(self, query_start, query_seq, target_start, target_seq, sequence_name, target_label, expected_errors):
self.name = sequence_name
self.target_label = target_label
self.expected_errors = expected_errors
self.query_start = int(query_start) - 1
self.query_seq = query_seq
query_gap_count = query_seq.count("-")
self.query_length = len(query_seq) - query_gap_count
self.target_start = int(target_start) - 1
self.target_seq = target_seq
target_gap_count = target_seq.count("-")
self.target_length = len(target_seq) - target_gap_count
self.no_gap_length = len(target_seq) - target_gap_count - query_gap_count
if len(target_seq) != len(query_seq):
raise ValueError("Length of target sequence not equal to length of query sequence")
def alignment_iterator(alignment, ignore_case=True, include_gaps=False):
target_index = 0
target_offset = 0
query_index = 0
while target_index < len(alignment.target_seq) and query_index < len(alignment.query_seq):
if alignment.target_seq[target_index] == "-": # If it is an insertion
target_offset += 1
elif alignment.query_seq[query_index] != "-" or include_gaps:
reference_index = alignment.target_start + target_index - target_offset
query_nucleotide = alignment.query_seq[query_index].upper() if ignore_case else alignment.query_seq[query_index]
target_nucleotide = alignment.target_seq[target_index].upper() if ignore_case else alignment.target_seq[target_index]
yield SimpleNamespace(reference_index=reference_index,
target_nucleotide=target_nucleotide,
query_nucleotide=query_nucleotide)
target_index += 1
query_index += 1
def count_mismatches(alignment, ignore_case=True):
mismatch_count = 0
for position in alignment_iterator(alignment, ignore_case):
if position.target_nucleotide != position.query_nucleotide:
mismatch_count += 1
return mismatch_count
def save_expected_error_rates(alignments, output_file):
expected_error_rates = [a.expected_errors / a.query_length for a in alignments]
plt.cla()
plt.hist(expected_error_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Expected Error Rate')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Expected Error Rates')
plt.grid(True)
plt.savefig(output_file)
def save_mismatch_rates(alignments, output_file, ignore_case=True):
mismatch_rates = [count_mismatches(a, ignore_case) / a.no_gap_length for a in alignments]
plt.cla()
plt.hist(mismatch_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Rate of mismatches')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Mismatch Rates')
plt.grid(True)
plt.savefig(output_file)
def gap_distribution(sequence):
dist = Counter()
count_length = 0
for char in sequence:
if char == "-":
count_length += 1
elif count_length > 0:
dist[count_length] += 1
count_length = 0
if count_length > 0:
dist[count_length] += 1
return dist
def save_insertion_or_deletion_dist(alignments, output_file, insertion_not_deletion=True):
size_counter = Counter()
for a in alignments:
size_counter += gap_distribution(a.target_seq if insertion_not_deletion else a.query_seq)
sizes, counts = zip(*size_counter.items())
number_of_bins = max(sizes)
number_of_bins = round(number_of_bins / np.ceil(number_of_bins/50))
plt.cla()
n, bins, patches = plt.hist(sizes, number_of_bins, weights=counts, log=True)
plt.ylim(ymin=0.9)
plt.xlim(xmin=1)
plt.xlabel('Size of insertion' if insertion_not_deletion else 'Size of deletion')
plt.ylabel('Count')
plt.tick_params(which='both', direction='out')
plt.title('Insertion size distribution' if insertion_not_deletion else 'Deletion size distribution')
plt.grid(True)
plt.savefig(output_file)
# Get nucleotide distribution
def nucleotide_distribution(alignments, ignore_case=False, include_gaps=True):
max_index = 0
distribution = defaultdict(Counter)
for a in alignments:
for position in alignment_iterator(a, ignore_case, include_gaps):
distribution[position.reference_index][position.query_nucleotide] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [distribution[i] for i in range(max_index)]
def save_nucleotide_map(alignments, output, ignore_case=True, include_gaps=True):
nucleotides = nucleotide_distribution(alignments, ignore_case, include_gaps)
width = len(nucleotides)
keys = set()
for distribution_at_base in nucleotides:
keys.update(set(distribution_at_base.keys()))
keys = sorted(list(keys), key=lambda x: "ZZZ" if x == "-" else x)
nucleotide_count_array = np.zeros((len(keys), width), dtype=np.uint32)
for i, key in enumerate(keys):
for j, counts in enumerate(nucleotides):
nucleotide_count_array[i, j] = counts[key]
cum_sum = nucleotide_count_array.cumsum(axis=0)
height = cum_sum[-1,].max()
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
for i, key in enumerate(keys):
start = 0 if i == 0 else cum_sum[i - 1, x]
end = cum_sum[i, x]
data_matrix[start:end, x, 0:3] = FIXED_COLOR_TABLE[key]
img = to_image(data_matrix[::-1,], ruler_underneath=True)
img.save(output)
# Get coverage map
def coverage_map(alignments, include_gaps=False):
max_index = 0
coverage = Counter()
for a in alignments:
for position in alignment_iterator(a, True, include_gaps):
coverage[position.reference_index] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [coverage[i] for i in range(max_index)]
def save_coverage_map(alignments, output):
coverage_with_gaps = coverage_map(alignments, True)
coverage_without_gaps = coverage_map(alignments, False)
width = len(coverage_with_gaps)
height = max(coverage_with_gaps)
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
y1 = coverage_without_gaps[x]
y2 = coverage_with_gaps[x]
data_matrix[0:y1, x, 0:3] = 0
data_matrix[y1:y2, x, 0:3] = 127
img = to_image(data_matrix[::-1], add_ruler=True, ruler_underneath=True)
img.save(output)
def save_alignment_map(coords, output_file, sort_key=sum, crop=True, no_ruler=False):
if crop:
minimum = min(coords, key=lambda x: x[0])[0]
else:
minimum = 0
maximum = max(coords, key=lambda x: x[1])[1]
dimensions = (len(coords), maximum - minimum)
data_matrix = np.full((dimensions[0], dimensions[1] + 1), 255, dtype=np.uint8)
if sort_key is not None:
coords.sort(key=sort_key)
is_multiple_alignment = len(coords[0]) > 3 and type(coords[0][3]) == list
# Greyscale over the bounds (or black if not multiple alignment)
for i, coord in enumerate(coords):
start = coord[0]
end = coord[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = LIGHT_GRAY if is_multiple_alignment else 0
# Black over the subalignments, if any
if is_multiple_alignment:
for i, coord in enumerate(coords):
for subalignment in coord[3]:
start = subalignment[0]
end = subalignment[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = 0
img = to_image(data_matrix, not no_ruler, offset=minimum)
img.save(output_file)
def to_image(data_matrix, add_ruler=True, ruler_underneath = False, offset=1):
maximum = offset + data_matrix.shape[1]
if add_ruler:
shape = list(data_matrix.shape)
shape[0] = 12 # Number of rows
ruler_matrix = np.full(shape, 255, dtype=data_matrix.dtype)
# tens ticks
ruler_matrix[0 if ruler_underneath else 11, 10-(offset%10)::10] = 0
# 50s ticks
ruler_matrix[1 if ruler_underneath else 10, 50-(offset%50)::50] = 0
if ruler_underneath:
img = toimage(np.vstack([data_matrix, ruler_matrix]))
else:
img = toimage(np.vstack([ruler_matrix, data_matrix]))
draw = ImageDraw.Draw(img)
# Hundreds words
for i in range((offset//100) + 1, maximum // 100 + 1):
centering = (6 * (int(np.log10(i)) + 3) - 1) // 2
draw.text((i * 100 - centering - offset, (data_matrix.shape[0] + 2) if ruler_underneath else 0), str(i) + "00", fill="black")
else:
img = toimage(data_matrix)
return img
| mit | -6,908,880,567,246,453,000 | 37.075 | 131 | 0.680893 | false |
sunrenjie/py-windows-tools | py_windows_tools/windows/events.py | 1 | 3139 | # -*- coding: utf-8 -*-
import re
from oslo_log import log as logging
from py_windows_tools.utilities import misc
LOG = logging.getLogger(__name__)
class WindowsEvents(object):
@staticmethod
def get_command_get_events(category, n):
return ['powershell', 'Get-EventLog %s -newest %d' % (category, n)]
@staticmethod
def get_command_get_parsed_events(category, num_events=None):
if num_events:
return ['powershell', 'Get-EventLog %s -newest %d | Format-List' % (category, num_events)]
else:
return ['powershell', 'Get-EventLog %s | Format-List' % category]
@staticmethod
def get_command_clear_events(category):
return ['powershell', 'Clear-EventLog %s' % category]
@classmethod
def clear_events(cls, category):
cmd = cls.get_command_clear_events(category)
for l in misc.create_process_and_yield_output_lines(cmd):
l = l.rstrip()
if len(l) > 0:
LOG.debug(l)
@staticmethod
def search_string_for_ip_address(s):
search = re.search('[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', s)
if search:
ip = search.group(0)
if ip != '0.0.0.0':
return ip
return None
@classmethod
def yield_lines_from_event_log_file(cls, f):
with open(f, 'r') as h:
for l in h:
yield misc.decode_string_with_unknown_encoding(l)
@classmethod
def yield_login_failure_ips(cls, num_events=None, data_source=None):
"""
Yield one ip (string) upon each request from the data source
:param num_events:
:param data_source: a yield object that emits one Windows event log
line upon every request; defaults to the Windows
event log system.
:return:
"""
if not data_source:
cmd = cls.get_command_get_parsed_events("Security", num_events)
data_source = misc.create_process_and_yield_output_lines(cmd)
within = False
for l in data_source:
if within:
if re.search('^TimeGenerated', l):
within = False
elif re.search(u'源网络地址', l): # TODO: ugly hacking
ip = cls.search_string_for_ip_address(l)
if ip:
yield ip
elif re.search(u'帐户登录失败。', l):
within = True
continue
@classmethod
def get_significant_login_failure_ips_by_count(cls, num_events, num_failures):
addr2count = {}
for ip in cls.yield_login_failure_ips(num_events):
if ip in addr2count:
addr2count[ip] += 1
else:
addr2count[ip] = 1
LOG.debug("login error statistics {IP => count} are: %s" % str(addr2count))
ips = set()
for a, c in addr2count.iteritems():
if c > num_failures:
ips.add(a)
LOG.debug("significant login error IPs are: %s" % ','.join(sorted(ips)))
return ips
| apache-2.0 | -7,706,443,120,105,495,000 | 34 | 102 | 0.547673 | false |
dnjohnstone/hyperspy | hyperspy/tests/utils/test_stack.py | 1 | 4213 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy import utils
from hyperspy.signal import BaseSignal
class TestUtilsStack:
def setup_method(self, method):
s = BaseSignal(np.random.random((3, 2, 5)))
s.axes_manager.set_signal_dimension(1)
s.axes_manager[0].name = "x"
s.axes_manager[1].name = "y"
s.axes_manager[2].name = "E"
s.axes_manager[2].scale = 0.5
s.metadata.General.title = 'test'
self.signal = s
def test_stack_default(self):
s = self.signal
s1 = s.deepcopy() + 1
s2 = s.deepcopy() * 4
test_axis = s.axes_manager[0].index_in_array
result_signal = utils.stack([s, s1, s2])
result_list = result_signal.split()
assert test_axis == s.axes_manager[0].index_in_array
assert len(result_list) == 3
np.testing.assert_array_almost_equal(
result_list[0].data, result_signal.inav[:, :, 0].data)
def test_stack_number_of_parts(self):
s = self.signal
s1 = s.deepcopy() + 1
s2 = s.deepcopy() * 4
test_axis = s.axes_manager[0].index_in_array
result_signal = utils.stack([s, s1, s2])
result_list = result_signal.split(number_of_parts=3)
assert test_axis == s.axes_manager[0].index_in_array
assert len(result_list) == 3
np.testing.assert_array_almost_equal(
result_list[0].data, result_signal.inav[:, :, 0].data)
def test_stack_of_stack(self):
s = self.signal
s1 = utils.stack([s] * 2)
s2 = utils.stack([s1] * 3)
s3 = s2.split()[0]
s4 = s3.split()[0]
np.testing.assert_array_almost_equal(s4.data, s.data)
assert not hasattr(s4.original_metadata, 'stack_elements')
assert s4.metadata.General.title == 'test'
def test_stack_not_default(self):
s = self.signal
s1 = s.inav[:, :-1] + 1
s2 = s.inav[:, ::2] * 4
result_signal = utils.stack([s, s1, s2], axis=1)
axis_size = s.axes_manager[1].size
axs1 = s1.axes_manager[1].size
axs2 = s2.axes_manager[1].size
result_list = result_signal.split()
assert len(result_list) == 3
for rs in [result_signal, utils.stack([s, s1, s2], axis='y')]:
np.testing.assert_array_almost_equal(
result_list[0].data, rs.inav[:, :axis_size].data)
np.testing.assert_array_almost_equal(
s.data, rs.inav[:, :axis_size].data)
np.testing.assert_array_almost_equal(
s1.data, rs.inav[:, axis_size:axis_size + axs1].data)
np.testing.assert_array_almost_equal(
s2.data, rs.inav[:, axis_size + axs1:].data)
def test_stack_bigger_than_ten(self):
s = self.signal
list_s = [s] * 12
list_s.append(s.deepcopy() * 3)
list_s[-1].metadata.General.title = 'test'
s1 = utils.stack(list_s)
res = s1.split()
np.testing.assert_array_almost_equal(list_s[-1].data, res[-1].data)
assert res[-1].metadata.General.title == 'test'
def test_stack_broadcast_number(self):
s = self.signal
rs = utils.stack([5, s])
np.testing.assert_array_equal(
rs.inav[..., 0].data, 5 * np.ones((3, 2, 5)))
def test_stack_broadcast_number_not_default(self):
s = self.signal
rs = utils.stack([5, s], axis='E')
np.testing.assert_array_equal(rs.isig[0].data, 5 * np.ones((3, 2)))
| gpl-3.0 | 5,465,673,633,688,604,000 | 37.3 | 75 | 0.59625 | false |
bram85/topydo | topydo/commands/ListCommand.py | 1 | 10992 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
from topydo.lib.Config import config
from topydo.lib.ExpressionCommand import ExpressionCommand
from topydo.lib.Filter import HiddenTagFilter, InstanceFilter
from topydo.lib.ListFormat import ListFormatError
from topydo.lib.prettyprinters.Format import PrettyPrinterFormatFilter
from topydo.lib.printers.PrettyPrinter import pretty_printer_factory
from topydo.lib.Sorter import Sorter
from topydo.lib.TodoListBase import InvalidTodoException
from topydo.lib.Utils import get_terminal_size
from topydo.lib.View import View
class ListCommand(ExpressionCommand):
def __init__(self, p_args, p_todolist, #pragma: no branch
p_out=lambda a: None,
p_err=lambda a: None,
p_prompt=lambda a: None):
super().__init__(
p_args, p_todolist, p_out, p_err, p_prompt)
self.printer = None
self.sort_expression = config().sort_string()
self.group_expression = config().group_string()
self.show_all = False
self.ids = None
self.format = config().list_format()
def _poke_icalendar(self):
"""
Attempts to import the icalendar package. Returns True if it
succeeds, otherwise False.
"""
try:
import icalendar as _
except ImportError: # pragma: no cover
self.error("icalendar package is not installed.")
return False
return True
def _process_flags(self):
opts, args = self.getopt('f:F:g:i:n:Ns:x')
for opt, value in opts:
if opt == '-x':
self.show_all = True
elif opt == '-s':
self.sort_expression = value
elif opt == '-f':
if value == 'json':
from topydo.lib.printers.Json import JsonPrinter
self.printer = JsonPrinter()
elif value == 'ical':
if self._poke_icalendar():
from topydo.lib.printers.Ical import IcalPrinter
self.printer = IcalPrinter(self.todolist)
elif value == 'dot':
from topydo.lib.printers.Dot import DotPrinter
self.printer = DotPrinter(self.todolist)
# a graph without dependencies is not so useful, hence
# show all
self.show_all = True
else:
self.printer = None
elif opt == '-F':
self.format = value
elif opt == '-g':
self.group_expression = value
elif opt == '-N':
# 2 lines are assumed to be taken up by printing the next prompt
# display at least one item
self.limit = ListCommand._N_lines()
elif opt == '-n':
try:
self.limit = int(value)
except ValueError:
pass # use default value in configuration
elif opt == '-i':
self.ids = value.split(',')
# when a user requests a specific ID, it should always be shown
self.show_all = True
self.args = args
def _filters(self):
"""
Additional filters to:
- select particular todo items given with the -i flag,
- hide appropriately tagged items in the absense of the -x flag.
"""
filters = super()._filters()
if self.ids:
def get_todo(p_id):
"""
Safely obtains a todo item given the user-supplied ID.
Returns None if an invalid ID was entered.
"""
try:
return self.todolist.todo(p_id)
except InvalidTodoException:
return None
todos = [get_todo(i) for i in self.ids]
filters.append(InstanceFilter(todos))
if not self.show_all:
filters.append(HiddenTagFilter())
return filters
def _print(self):
"""
Prints the todos in the right format.
Defaults to normal text output (with possible colors and other pretty
printing). If a format was specified on the commandline, this format is
sent to the output.
"""
if self.printer is None:
# create a standard printer with some filters
indent = config().list_indent()
final_format = ' ' * indent + self.format
filters = []
filters.append(PrettyPrinterFormatFilter(self.todolist, final_format))
self.printer = pretty_printer_factory(self.todolist, filters)
try:
if self.group_expression:
self.out(self.printer.print_groups(self._view().groups))
else:
self.out(self.printer.print_list(self._view().todos))
except ListFormatError:
self.error('Error while parsing format string (list_format config'
' option or -F)')
def _view(self):
sorter = Sorter(self.sort_expression, self.group_expression)
filters = self._filters()
return View(sorter, filters, self.todolist)
@staticmethod
def _N_lines():
''' Determine how many lines to print, such that the number of items
displayed will fit on the terminal (i.e one 'screen-ful' of items)
This looks at the environmental prompt variable, and tries to determine
how many lines it takes up.
On Windows, it does this by looking for the '$_' sequence, which indicates
a new line, in the environmental variable PROMPT.
Otherwise, it looks for a newline ('\n') in the environmental variable
PS1.
'''
lines_in_prompt = 1 # prompt is assumed to take up one line, even
# without any newlines in it
if "win32" in sys.platform:
lines_in_prompt += 1 # Windows will typically print a free line after
# the program output
a = re.findall(r'\$_', os.getenv('PROMPT', ''))
lines_in_prompt += len(a)
else:
a = re.findall('\\n', os.getenv('PS1', ''))
lines_in_prompt += len(a)
n_lines = get_terminal_size().lines - lines_in_prompt
# print a minimum of one item
n_lines = max(n_lines, 1)
return n_lines
def execute(self):
if not super().execute():
return False
try:
self._process_flags()
except SyntaxError: # pragma: no cover
# importing icalendar failed, most likely due to Python 3.2
self.error("icalendar is not supported in this Python version.")
return False
self._print()
return True
def usage(self):
return """Synopsis: ls [-x] [-s <SORT EXPRESSION>]
[-g <GROUP EXPRESSION>] [-f <OUTPUT FORMAT>] [-F <FORMAT STRING>]
[-i <NUMBER 1>[,<NUMBER 2> ...]] [-N | -n <INTEGER>] [EXPRESSION]"""
def help(self):
return """\
Lists all relevant todos. A todo is relevant when:
* has not been completed yet,
* the start date (if present) has passed, and
* there are no subitems that need to be completed.
When an EXPRESSION is given, only the todos matching that EXPRESSION are shown.
-f : Specify the OUTPUT format, being 'text' (default), 'dot' or 'ical' or
'json'.
* 'text' - Text output with colors and indentation if applicable.
* 'dot' - Prints a dependency graph for the selected items in GraphViz
Dot format.
* 'ical' - iCalendar (RFC 2445). Is not supported in Python 3.2. Be aware
that this is not a read-only operation, todo items may obtain
an 'ical' tag with a unique ID. Completed todo items may be
archived.
* 'json' - Javascript Object Notation (JSON)
-F : Specify the format of the text ('text' format), which may contain
placeholders that may be expanded if the todo has such attribute. If such
attribute does not exist, then it expands to an empty string.
%c: Absolute creation date.
%C: Relative creation date.
%d: Absolute due date.
%D: Relative due date.
%h: Relative due and start date (due in 3 days, started 3 days ago)
%H: Like %h with creation date.
%i: Todo number.
%I: Todo number padded with spaces (always 3 characters wide).
%k: List of tags separated by spaces (excluding hidden tags).
%K: List of all tags separated by spaces.
%l: Line number.
%L: Line number padded with spaces (always 3 characters wide).
%p: Priority.
%P: Priority or placeholder space if no priority.
%s: Todo text.
%S: Todo text, truncated such that an item fits on one line.
%t: Absolute creation date.
%T: Relative creation date.
%u: Todo's text-based ID.
%U: Todo's text-based ID padded with spaces.
%x: 'x' followed by absolute completion date.
%X: 'x' followed by relative completion date.
\%: Literal percent sign.
Conditional characters can be added with blocks surrounded by curly
braces, they will only appear when a placeholder expanded to a value.
E.g. %{(}p{)} will print '(C)' when the todo item has priority C, or ''
(empty string) when an item has no priority set.
A tab character serves as a marker to start right alignment.
-g : Group items according to a GROUP EXPRESSION. A group expression is similar
to a sort expression. Defaults to the group expression in the
configuration.
-i : Comma separated list of todo IDs to print.
-n : Number of items to display. Defaults to the value in the configuration.
-N : Limit number of items displayed such that they fit on the terminal.
-s : Sort the list according to a SORT EXPRESSION. Defaults to the sort
expression in the configuration.
-x : Show all todos (i.e. do not filter on dependencies, relevance, or hidden
status).\
"""
| gpl-3.0 | -670,478,974,875,376,300 | 37.704225 | 86 | 0.593705 | false |
andrewschaaf/pj-closure | js/goog/math.py | 1 | 3556 | #<pre>Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.</pre>
from goog.array import map, reduce
def randomInt(a):
return Math.floor(Math.random() * a)
def uniformRandom(a, b):
'sample from [a, b)'
return a + Math.random() * (b - a)
def clamp(value, min, max):
return Math.min(Math.max(value, min), max)
def modulo(a, b):
r = a % b;
# If r and b differ in sign, add b to wrap the result to the correct sign.
return r + b if (r * b < 0) else r
def lerp(a, b, x):
return a + x * (b - a)
def nearlyEquals(a, b, opt_tolerance):
return Math.abs(a - b) <= (opt_tolerance or 0.000001)
def standardAngle(angle):
return modulo(angle, 360)
def toRadians(angleDegrees):
return angleDegrees * Math.PI / 180
def toDegrees(angleRadians):
return angleRadians * 180 / Math.PI
def angleDx(degrees, radius):
return radius * Math.cos(toRadians(degrees))
def angleDy(degrees, radius):
return radius * Math.sin(toRadians(degrees))
def angle(x1, y1, x2, y2):
return standardAngle(toDegrees(Math.atan2(y2 - y1, x2 - x1)))
def angleDifference(startAngle, endAngle):
d = standardAngle(endAngle) - standardAngle(startAngle)
if (d > 180):
d = d - 360
elif (d <= -180):
d = 360 + d
return d
def sign(x):
return (0 if x == 0 else (
-1 if x < 0 else 1))
def longestCommonSubsequence(array1, array2, opt_compareFn, opt_collectorFn):
compare = opt_compareFn or (lambda a, b: a == b)
collect = opt_collectorFn or (lambda i1, i2: array1[i1])
length1 = array1.length;
length2 = array2.length;
arr = [];
for i in range(length1 + 1):
arr[i] = []
arr[i][0] = 0
for j in range(length2 + 1):
arr[0][j] = 0
for i in range(1, length1 + 1):
for j in range(1, length1 + 1):
if compare(array1[i - 1], array2[j - 1]):
arr[i][j] = arr[i - 1][j - 1] + 1
else:
arr[i][j] = Math.max(arr[i - 1][j], arr[i][j - 1])
# Backtracking
result = [];
i = length1
j = length2
while i > 0 and j > 0:
if compare(array1[i - 1], array2[j - 1]):
result.unshift(collect(i - 1, j - 1))
i -= 1
j -= 1
else:
if arr[i - 1][j] > arr[i][j - 1]:
i -= 1
else:
j -= 1
return result
def sum(var_args):
return reduce(
arguments,
lambda sum, value: sum + value,
0)
def average(var_args):
return sum.apply(None, arguments) / arguments.length
def standardDeviation(var_args):
sampleSize = arguments.length
if sampleSize < 2:
return 0
mean = average.apply(None, arguments)
variance = (
sum.apply(
None,
map(
arguments,
lambda val: Math.pow(val - mean, 2))) /
(sampleSize - 1))
return Math.sqrt(variance)
def isInt(num):
return isFinite(num) and num % 1 == 0
def isFiniteNumber(num):
return isFinite(num) and not isNaN(num)
| apache-2.0 | 244,956,752,805,644,600 | 20.950617 | 77 | 0.603487 | false |
schalkneethling/snippets-service | snippets/base/cache.py | 1 | 1854 | # FROM https://raw.githubusercontent.com/mozilla/bedrock/master/bedrock/base/cache.py
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.core.cache.backends.locmem import LocMemCache
class SimpleDictCache(LocMemCache):
"""A local memory cache that doesn't pickle values.
Only for use with simple immutable data structures that can be
inserted into a dict.
"""
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
if self._has_expired(key):
self._set(key, value, timeout)
return True
return False
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
value = None
with self._lock.reader():
if not self._has_expired(key):
value = self._cache[key]
if value is not None:
return value
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._set(key, value, timeout)
def incr(self, key, delta=1, version=None):
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
with self._lock.writer():
self._cache[key] = new_value
return new_value
| mpl-2.0 | -7,616,568,562,176,174,000 | 33.333333 | 85 | 0.591154 | false |
juniortada/signxml | setup.py | 1 | 1193 | #!/usr/bin/env python
import os, glob
from setuptools import setup, find_packages
install_requires = [line.rstrip() for line in open(os.path.join(os.path.dirname(__file__), "requirements.txt"))]
setup(
name='signxml',
version='0.4.2',
url='https://github.com/kislyuk/signxml',
license='Apache Software License',
author='Andrey Kislyuk',
author_email='[email protected]',
description='Python XML Signature library',
long_description=open('README.rst').read(),
install_requires=install_requires,
packages = find_packages(exclude=['test']),
platforms=['MacOS X', 'Posix'],
package_data={'signxml': ['schemas/*.xsd']},
zip_safe=False,
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| apache-2.0 | 3,744,242,557,129,585,700 | 34.088235 | 112 | 0.636211 | false |
FRBs/FRB | frb/surveys/panstarrs.py | 1 | 11891 | """
Slurp data from Pan-STARRS catalog using the MAST API.
A lot of this code has been directly taken from
http://ps1images.stsci.edu/ps1_dr2_api.html
"""
import numpy as np
from astropy import units as u,utils as astroutils
from astropy.io import fits
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.table import Table
from ..galaxies.defs import PanSTARRS_bands
from .images import grab_from_url
import warnings
import requests
try:
from astroquery.vizier import Vizier
except ImportError:
warnings.warn("Warning: You need to install astroquery to use the survey tools...")
from frb.surveys import surveycoord,catalog_utils,images
from IPython import embed
#TODO: It's potentially viable to use the same code for other
#catalogs in the VizieR database. Maybe a generalization wouldn't
#be too bad in the future.
# Define the data model for Pan-STARRS data
photom = {}
photom['Pan-STARRS'] = {}
for band in PanSTARRS_bands:
# Pre 180301 paper
#photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}'.format(band)] = '{:s}PSFmag'.format(band.lower())
#photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}_err'.format(band)] = '{:s}PSFmagErr'.format(band.lower())
photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}'.format(band)] = '{:s}KronMag'.format(band.lower())
photom["Pan-STARRS"]["Pan-STARRS"+'_{:s}_err'.format(band)] = '{:s}KronMagErr'.format(band.lower())
photom["Pan-STARRS"]["Pan-STARRS_ID"] = 'objID'
photom["Pan-STARRS"]['ra'] = 'raStack'
photom["Pan-STARRS"]['dec'] = 'decStack'
photom["Pan-STARRS"]["Pan-STARRS_field"] = 'field'
# Define the default set of query fields
# See: https://outerspace.stsci.edu/display/PANSTARRS/PS1+StackObjectView+table+fields
# for additional Fields
_DEFAULT_query_fields = ['objID','raStack','decStack','objInfoFlag','qualityFlag',
'rKronRad']#, 'rPSFMag', 'rKronMag']
_DEFAULT_query_fields +=['{:s}PSFmag'.format(band) for band in PanSTARRS_bands]
_DEFAULT_query_fields +=['{:s}PSFmagErr'.format(band) for band in PanSTARRS_bands]
_DEFAULT_query_fields +=['{:s}KronMag'.format(band) for band in PanSTARRS_bands]
_DEFAULT_query_fields +=['{:s}KronMagErr'.format(band) for band in PanSTARRS_bands]
class Pan_STARRS_Survey(surveycoord.SurveyCoord):
"""
A class to access all the catalogs hosted on the
Vizier database. Inherits from SurveyCoord. This
is a super class not meant for use by itself and
instead meant to instantiate specific children
classes like PAN-STARRS_Survey
"""
def __init__(self,coord,radius,**kwargs):
surveycoord.SurveyCoord.__init__(self,coord,radius,**kwargs)
self.Survey = "Pan_STARRS"
def get_catalog(self,query_fields=None,release="dr2",
table="stack",print_query=False,
use_psf=False):
"""
Query a catalog in the VizieR database for
photometry.
Args:
query_fields: list, optional
A list of query fields to
get in addition to the
default fields.
release: str, optional
"dr1" or "dr2" (default: "dr2").
Data release version.
table: str, optional
"mean","stack" or "detection"
(default: "stack"). The data table to
search within.
use_psf: bool, optional
If True, use PSFmag instead of KronMag
Returns:
catalog: astropy.table.Table
Contains all query results
"""
assert self.radius <= 0.5*u.deg, "Cone serches have a maximum radius"
#Validate table and release input
_check_legal(table,release)
url = "https://catalogs.mast.stsci.edu/api/v0.1/panstarrs/{:s}/{:s}.csv".format(release,table)
if query_fields is None:
query_fields = _DEFAULT_query_fields
else:
query_fields = _DEFAULT_query_fields+query_fields
#Validate columns
_check_columns(query_fields,table,release)
data = {}
data['ra'] = self.coord.ra.value
data['dec'] = self.coord.dec.value
data['radius'] = self.radius.to(u.deg).value
data['columns'] = query_fields
if print_query:
print(url)
ret = requests.get(url,params=data)
ret.raise_for_status()
if len(ret.text)==0:
self.catalog = Table()
self.catalog.meta['radius'] = self.radius
self.catalog.meta['survey'] = self.survey
# Validate
self.validate_catalog()
return self.catalog.copy()
photom_catalog = Table.read(ret.text,format="ascii.csv")
pdict = photom['Pan-STARRS'].copy()
# Allow for PSF
if use_psf:
for band in PanSTARRS_bands:
pdict["Pan-STARRS"+'_{:s}'.format(band)] = '{:s}PSFmag'.format(band.lower())
pdict["Pan-STARRS"+'_{:s}_err'.format(band)] = '{:s}PSFmagErr'.format(band.lower())
photom_catalog = catalog_utils.clean_cat(photom_catalog,pdict)
#Remove bad positions because Pan-STARRS apparently decided
#to flag some positions with large negative numbers. Why even keep
#them?
#import pdb; pdb.set_trace()
bad_ra = (photom_catalog['ra']<0)+(photom_catalog['ra']>360)
bad_dec = (photom_catalog['dec']<-90)+(photom_catalog['dec']>90)
bad_pos = bad_ra+bad_dec # bad_ra OR bad_dec
photom_catalog = photom_catalog[~bad_pos]
#
self.catalog = catalog_utils.sort_by_separation(photom_catalog, self.coord,
radec=('ra','dec'), add_sep=True)
# Meta
self.catalog.meta['radius'] = self.radius
self.catalog.meta['survey'] = self.survey
#Validate
self.validate_catalog()
#Return
return self.catalog.copy()
def get_cutout(self,imsize=30*u.arcsec,filt="irg",output_size=None):
"""
Grab a color cutout (PNG) from Pan-STARRS
Args:
imsize (Quantity): Angular size of image desired
filt (str): A string with the three filters to be used
output_size (int): Output image size in pixels. Defaults
to the original cutout size.
Returns:
PNG image, None (None for the header).
"""
assert len(filt)==3, "Need three filters for a cutout."
#Sort filters from red to blue
filt = filt.lower() #Just in case the user is cheeky about the filter case.
reffilt = "yzirg"
idx = np.argsort([reffilt.find(f) for f in filt])
newfilt = ""
for i in idx:
newfilt += filt[i]
#Get image url
url = _get_url(self.coord,imsize=imsize,filt=newfilt,output_size=output_size,color=True,imgformat='png')
self.cutout = images.grab_from_url(url)
self.cutout_size = imsize
return self.cutout.copy(),
def get_image(self,imsize=30*u.arcsec,filt="i",timeout=120):
"""
Grab a fits image from Pan-STARRS in a
specific band.
Args:
imsize (Quantity): Angular size of the image desired
filt (str): One of 'g','r','i','z','y' (default: 'i')
timeout (int): Number of seconds to timout the query (default: 120 s)
Returns:
hdu: fits header data unit for the downloaded image
"""
assert len(filt)==1 and filt in "grizy", "Filter name must be one of 'g','r','i','z','y'"
url = _get_url(self.coord,imsize=imsize,filt=filt,imgformat='fits')[0]
imagedat = fits.open(astroutils.data.download_file(url,cache=True,show_progress=False,timeout=timeout))[0]
return imagedat
def _get_url(coord,imsize=30*u.arcsec,filt="i",output_size=None,imgformat="fits",color=False):
"""
Returns the url corresponding to the requested image cutout
Args:
coord (astropy SkyCoord): Center of the search area.
imsize (astropy Angle): Length and breadth of the search area.
filt (str): 'g','r','i','z','y'
output_size (int): display image size (length) in pixels
imgformat (str): "fits","png" or "jpg"
"""
assert imgformat in ['jpg','png','fits'], "Image file can be only in the formats 'jpg', 'png' and 'fits'."
if color:
assert len(filt)==3,"Three filters are necessary for a color image"
assert imgformat in ['jpg','png'], "Color image not available in fits format"
pixsize = int(imsize.to(u.arcsec).value/0.25) #0.25 arcsec per pixel
service = "https://ps1images.stsci.edu/cgi-bin/ps1filenames.py"
filetaburl = ("{:s}?ra={:f}&dec={:f}&size={:d}&format=fits"
"&filters={:s}").format(service,coord.ra.value,
coord.dec.value, pixsize,filt)
file_extensions = Table.read(filetaburl, format='ascii')['filename']
url = "https://ps1images.stsci.edu/cgi-bin/fitscut.cgi?ra={:f}&dec={:f}&size={:d}&format={:s}".format(coord.ra.value,coord.dec.value,
pixsize,imgformat)
if output_size:
url += "&output_size={}".format(output_size)
if color:
cols = ['red','green','blue']
for col,extension in zip(cols,file_extensions):
url += "&{}={}".format(col,extension)
else:
urlbase = url + "&red="
url = []
for extensions in file_extensions:
url.append(urlbase+extensions)
return url
def _check_columns(columns,table,release):
"""
Checks if the requested columns are present in the
table from which data is to be pulled. Raises an error
if those columns aren't found.
Args:
columns (list of str): column names to retrieve
table (str): "mean","stack" or "detection"
release (str): "dr1" or "dr2"
"""
dcols = {}
for col in _ps1metadata(table,release)['name']:
dcols[col.lower()] = 1
badcols = []
for col in columns:
if col.lower().strip() not in dcols:
badcols.append(col)
if badcols:
raise ValueError('Some columns not found in table: {}'.format(', '.join(badcols)))
def _check_legal(table,release):
"""
Checks if this combination of table and release is acceptable
Raises a VelueError exception if there is problem.
Taken from http://ps1images.stsci.edu/ps1_dr2_api.html
Args:
table (str): "mean","stack" or "detection"
release (str): "dr1" or "dr2"
"""
releaselist = ("dr1", "dr2")
if release not in releaselist:
raise ValueError("Bad value for release (must be one of {})".format(', '.join(releaselist)))
if release=="dr1":
tablelist = ("mean", "stack")
else:
tablelist = ("mean", "stack", "detection")
if table not in tablelist:
raise ValueError("Bad value for table (for {} must be one of {})".format(release, ", ".join(tablelist)))
def _ps1metadata(table="stack",release="dr2",
baseurl="https://catalogs.mast.stsci.edu/api/v0.1/panstarrs"):
"""Return metadata for the specified catalog and table
Args:
table (string): mean, stack, or detection
release (string): dr1 or dr2
baseurl: base URL for the request
Returns an astropy table with columns name, type, description
"""
_check_legal(table,release)
url = "{baseurl}/{release}/{table}/metadata".format(**locals())
r = requests.get(url)
r.raise_for_status()
v = r.json()
# convert to astropy table
tab = Table(rows=[(x['name'],x['type'],x['description']) for x in v],
names=('name','type','description'))
return tab
| bsd-3-clause | -4,824,569,952,313,100,000 | 38.769231 | 137 | 0.600454 | false |
jledbetter/openhatch | mysite/customs/bugimporters/bugzilla.py | 1 | 14860 | # This file is part of OpenHatch.
# Copyright (C) 2010, 2011 Jack Grigg
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import lxml.etree
import twisted.web.error
import twisted.web.http
import urlparse
from mysite.base.decorators import cached_property
import mysite.base.helpers
from mysite.customs.bugimporters.base import BugImporter
import mysite.search.models
class BugzillaBugImporter(BugImporter):
def __init__(self, *args, **kwargs):
# Create a list to store bug ids obtained from queries.
self.bug_ids = []
# Call the parent __init__.
super(BugzillaBugImporter, self).__init__(*args, **kwargs)
def process_queries(self, queries):
# Add all the queries to the waiting list.
for query in queries:
# Get the query URL.
query_url = query.get_query_url()
# Get the query type and set the callback.
query_type = query.query_type
if query_type == 'xml':
callback = self.handle_query_html
else:
callback = self.handle_tracking_bug_xml
# Add the query URL and callback.
self.add_url_to_waiting_list(
url=query_url,
callback=callback)
# Update query.last_polled and save it.
query.last_polled = datetime.datetime.utcnow()
query.save()
# URLs are now all prepped, so start pushing them onto the reactor.
self.push_urls_onto_reactor()
def handle_query_html(self, query_html_string):
# Turn the string into an HTML tree that can be parsed to find the list
# of bugs hidden in the 'XML' form.
query_html = lxml.etree.HTML(query_html_string)
# Find all form inputs at the level we want.
# This amounts to around three forms.
query_form_inputs = query_html.xpath('/html/body/div/table/tr/td/form/input')
# Extract from this the inputs corresponding to 'ctype' fields.
ctype_inputs = [i for i in query_form_inputs if 'ctype' in i.values()]
# Limit this to inputs with 'ctype=xml'.
ctype_xml = [i for i in ctype_inputs if 'xml' in i.values()]
if ctype_xml:
# Get the 'XML' form.
xml_form = ctype_xml[0].getparent()
# Get all its children.
xml_inputs = xml_form.getchildren()
# Extract from this all bug id inputs.
bug_id_inputs = [i for i in xml_inputs if 'id' in i.values()]
# Convert this to a list of bug ids.
bug_id_list = [int(i.get('value')) for i in bug_id_inputs]
# Add them to self.bug_ids.
self.bug_ids.extend(bug_id_list)
def handle_tracking_bug_xml(self, tracking_bug_xml_string):
# Turn the string into an XML tree.
tracking_bug_xml = lxml.etree.XML(tracking_bug_xml_string)
# Find all the bugs that this tracking bug depends on.
depends = tracking_bug_xml.findall('bug/dependson')
# Add them to self.bug_ids.
self.bug_ids.extend([int(depend.text) for depend in depends])
def prepare_bug_urls(self):
# Pull bug_ids our of the internal storage. This is done in case the
# list is simultaneously being written to, in which case just copying
# the entire thing followed by deleting the contents could lead to
# lost IDs.
bug_id_list = []
while self.bug_ids:
bug_id_list.append(self.bug_ids.pop())
# Convert the obtained bug ids to URLs.
bug_url_list = [urlparse.urljoin(self.tm.get_base_url(),
"show_bug.cgi?id=%d" % bug_id) for bug_id in bug_id_list]
# Get the sub-list of URLs that are fresh.
fresh_bug_urls = mysite.search.models.Bug.all_bugs.filter(
canonical_bug_link__in = bug_url_list,
last_polled__lt = datetime.datetime.now() - datetime.timedelta(days = 1)
).values_list('canonical_bug_link', flat=True)
# Remove the fresh URLs to be let with stale or new URLs.
for bug_url in fresh_bug_urls:
bug_url_list.remove(bug_url)
# Put the bug list in the form required for process_bugs.
# The second entry of the tuple is None as Bugzilla doesn't supply data
# in the queries above (although it does support grabbing data for
# multiple bugs at once, when all the bug ids are known.
bug_list = [(bug_url, None) for bug_url in bug_url_list]
# And now go on to process the bug list
self.process_bugs(bug_list)
def process_bugs(self, bug_list):
# If there are no bug URLs, finish now.
if not bug_list:
self.determine_if_finished()
return
# Convert the bug URLs into bug ids.
bug_id_list = []
for bug_url, _ in bug_list:
base, num = bug_url.rsplit('=', 1)
bug_id = int(num)
bug_id_list.append(bug_id)
# Create a single URL to fetch all the bug data.
big_url = urlparse.urljoin(
self.tm.get_base_url(),
'show_bug.cgi?ctype=xml&excludefield=attachmentdata')
for bug_id in bug_id_list:
big_url += '&id=%d' % bug_id
# Fetch the bug data.
self.add_url_to_waiting_list(
url=big_url,
callback=self.handle_bug_xml,
c_args={},
errback=self.errback_bug_xml,
e_args={'bug_id_list': bug_id_list})
# URLs are now all prepped, so start pushing them onto the reactor.
self.push_urls_onto_reactor()
def errback_bug_xml(self, failure, bug_id_list):
# Check if the failure was related to the size of the request.
size_related_errors = [
twisted.web.http.REQUEST_ENTITY_TOO_LARGE,
twisted.web.http.REQUEST_TIMEOUT,
twisted.web.http.REQUEST_URI_TOO_LONG
]
if failure.check(twisted.web.error.Error) and failure.value.status in size_related_errors:
big_url_base = urlparse.urljoin(
self.tm.get_base_url(),
'show_bug.cgi?ctype=xml&excludefield=attachmentdata')
# Split bug_id_list into pieces, and turn each piece into a URL.
# Note that (floor division)+1 is used to ensure that for
# odd-numbered lists we don't end up with one bug id left over.
split_bug_id_list = []
num_ids = len(bug_id_list)
step = (num_ids//2)+1
for i in xrange(0, num_ids, step):
bug_id_list_fragment = bug_id_list[i:i+step]
# Check the fragment actually has bug ids in it.
if not bug_id_list_fragment:
# This is our recursive end-point.
continue
# Create the URL for the fragment of bug ids.
big_url = big_url_base
for bug_id in bug_id_list_fragment:
big_url += '&id=%d' % bug_id
# Fetch the reduced bug data.
self.add_url_to_waiting_list(
url=big_url,
callback=self.handle_bug_xml,
c_args={},
errback=self.errback_bug_xml,
e_args={'bug_id_list': bug_id_list_fragment})
else:
# Pass the Failure on.
return failure
def handle_bug_xml(self, bug_list_xml_string):
# Turn the string into an XML tree.
bug_list_xml = lxml.etree.XML(bug_list_xml_string)
for bug_xml in bug_list_xml.xpath('bug'):
# Create a BugzillaBugParser with the XML data.
bbp = BugzillaBugParser(bug_xml)
# Get the parsed data dict from the BugzillaBugParser.
data = bbp.get_parsed_data_dict(self.tm)
# Get or create a Bug object to put the parsed data in.
try:
bug = mysite.search.models.Bug.all_bugs.get(
canonical_bug_link=bbp.bug_url)
except mysite.search.models.Bug.DoesNotExist:
bug = mysite.search.models.Bug(canonical_bug_link=bbp.bug_url)
# Fill the Bug
for key in data:
value = data[key]
setattr(bug, key, value)
# Save the project onto it
# Project name is generated from the bug_project_name_format property
# of the TrackerModel.
project_from_name, _ = mysite.search.models.Project.objects.get_or_create(
name=self.generate_bug_project_name(bbp))
# Manually save() the Project to ensure that if it was created then it has
# a display_name.
project_from_name.save()
bug.project = project_from_name
# Store the tracker that generated the Bug, update last_polled and save it!
bug.tracker = self.tm
bug.last_polled = datetime.datetime.utcnow()
bug.save()
def generate_bug_project_name(self, bbp):
return self.tm.bug_project_name_format.format(
tracker_name=self.tm.tracker_name,
product=bbp.product,
component=bbp.component)
def determine_if_finished(self):
# If we got here then there are no more URLs in the waiting list.
# So if self.bug_ids is also empty then we are done.
if self.bug_ids:
self.prepare_bug_urls()
else:
self.finish_import()
class BugzillaBugParser:
@staticmethod
def get_tag_text_from_xml(xml_doc, tag_name, index = 0):
"""Given an object representing <bug><tag>text</tag></bug>,
and tag_name = 'tag', returns 'text'."""
tags = xml_doc.xpath(tag_name)
try:
return tags[index].text
except IndexError:
return ''
def __init__(self, bug_xml):
self.bug_xml = bug_xml
self.bug_id = self._bug_id_from_bug_data()
self.bug_url = None # This gets filled in the data parser.
def _bug_id_from_bug_data(self):
return int(self.get_tag_text_from_xml(self.bug_xml, 'bug_id'))
@cached_property
def product(self):
return self.get_tag_text_from_xml(self.bug_xml, 'product')
@cached_property
def component(self):
return self.get_tag_text_from_xml(self.bug_xml, 'component')
@staticmethod
def _who_tag_to_username_and_realname(who_tag):
username = who_tag.text
realname = who_tag.attrib.get('name', '')
return username, realname
@staticmethod
def bugzilla_count_people_involved(xml_doc):
"""Strategy: Create a set of all the listed text values
inside a <who ...>(text)</who> tag
Return the length of said set."""
everyone = [tag.text for tag in xml_doc.xpath('.//who')]
return len(set(everyone))
@staticmethod
def bugzilla_date_to_datetime(date_string):
return mysite.base.helpers.string2naive_datetime(date_string)
def get_parsed_data_dict(self, tm):
# Generate the bug_url.
self.bug_url = urlparse.urljoin(
tm.get_base_url(),
'show_bug.cgi?id=%d' % self.bug_id)
xml_data = self.bug_xml
date_reported_text = self.get_tag_text_from_xml(xml_data, 'creation_ts')
last_touched_text = self.get_tag_text_from_xml(xml_data, 'delta_ts')
u, r = self._who_tag_to_username_and_realname(xml_data.xpath('.//reporter')[0])
status = self.get_tag_text_from_xml(xml_data, 'bug_status')
looks_closed = status in ('RESOLVED', 'WONTFIX', 'CLOSED', 'ASSIGNED')
ret_dict = {
'title': self.get_tag_text_from_xml(xml_data, 'short_desc'),
'description': (self.get_tag_text_from_xml(xml_data, 'long_desc/thetext') or
'(Empty description)'),
'status': status,
'importance': self.get_tag_text_from_xml(xml_data, 'bug_severity'),
'people_involved': self.bugzilla_count_people_involved(xml_data),
'date_reported': self.bugzilla_date_to_datetime(date_reported_text),
'last_touched': self.bugzilla_date_to_datetime(last_touched_text),
'submitter_username': u,
'submitter_realname': r,
'canonical_bug_link': self.bug_url,
'looks_closed': looks_closed
}
keywords_text = self.get_tag_text_from_xml(xml_data, 'keywords')
keywords = map(lambda s: s.strip(),
keywords_text.split(','))
# Check for the bitesized keyword
if tm.bitesized_type:
ret_dict['bite_size_tag_name'] = tm.bitesized_text
b_list = tm.bitesized_text.split(',')
if tm.bitesized_type == 'key':
ret_dict['good_for_newcomers'] = any(b in keywords for b in b_list)
elif tm.bitesized_type == 'wboard':
whiteboard_text = self.get_tag_text_from_xml(xml_data, 'status_whiteboard')
ret_dict['good_for_newcomers'] = any(b in whiteboard_text for b in b_list)
else:
ret_dict['good_for_newcomers'] = False
else:
ret_dict['good_for_newcomers'] = False
# Check whether this is a documentation bug.
if tm.documentation_type:
d_list = tm.documentation_text.split(',')
if tm.documentation_type == 'key':
ret_dict['concerns_just_documentation'] = any(d in keywords for d in d_list)
elif tm.documentation_type == 'comp':
ret_dict['concerns_just_documentation'] = any(d == self.component for d in d_list)
elif tm.documentation_type == 'prod':
ret_dict['concerns_just_documentation'] = any(d == self.product for d in d_list)
else:
ret_dict['concerns_just_documentation'] = False
else:
ret_dict['concerns_just_documentation'] = False
# And pass ret_dict on.
return ret_dict
| agpl-3.0 | -8,360,372,221,718,884,000 | 41.824207 | 98 | 0.584859 | false |
quattor/aquilon | lib/aquilon/worker/formats/network_device.py | 1 | 6711 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NetworkDevice formatter."""
from collections import defaultdict
from operator import attrgetter
from aquilon.aqdb.model import NetworkDevice
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.hardware_entity import HardwareEntityFormatter
from aquilon.exceptions_ import ProtocolError
class NetworkDeviceFormatter(HardwareEntityFormatter):
def header_raw(self, device, details, indent="", embedded=True,
indirect_attrs=True):
details.append(indent + " Switch Type: %s" % device.switch_type)
def format_raw(self, device, indent="", embedded=True,
indirect_attrs=True):
details = [super(NetworkDeviceFormatter, self).format_raw(device, indent)]
for slot in device.chassis_slot:
details.append(indent + " {0:c}: {0!s}".format(slot.chassis))
details.append(indent + " Slot: %d" % slot.slot_number)
ports = defaultdict(list)
for om in device.observed_macs:
ports[om.port].append(om)
for port in sorted(ports):
# Show most recent data first, otherwise sort by MAC address. sort()
# is stable so we can call it multiple times
ports[port].sort(key=attrgetter('mac_address'))
ports[port].sort(key=attrgetter('last_seen'), reverse=True)
details.append(indent + " Port: %s" % port)
for om in ports[port]:
details.append(indent + " MAC: %s, created: %s, last seen: %s" %
(om.mac_address, om.creation_date, om.last_seen))
for pg in device.port_groups:
details.append(indent + " VLAN %d: %s" % (pg.network_tag,
pg.network.ip))
details.append(indent + " Created: %s" % pg.creation_date)
if device.host:
details.append(self.redirect_raw_host_details(device.host))
return "\n".join(details)
def csv_fields(self, device):
base_details = [device.fqdn,
device.primary_ip,
device.switch_type,
device.location.rack.name if device.location.rack else None,
device.location.building.name,
device.model.vendor.name,
device.model.name,
device.serial_no]
if not device.interfaces:
yield base_details + [None, None]
else:
for interface in device.interfaces:
yield base_details + [interface.name, interface.mac]
def fill_proto(self, device, skeleton, embedded=True,
indirect_attrs=True):
skeleton.primary_name = str(device.primary_name)
if indirect_attrs:
self._fill_hardware_proto(device, skeleton.hardware)
self._fill_system_proto(device.host, skeleton.system)
def _fill_hardware_proto(self, hwent, skeleton, embedded=True,
indirect_attrs=True):
skeleton.hardware_type = skeleton.NETWORK_DEVICE
skeleton.label = hwent.label
if hwent.serial_no:
skeleton.serial_no = hwent.serial_no
self.redirect_proto(hwent.model, skeleton.model, indirect_attrs=False)
self.redirect_proto(hwent.location, skeleton.location, indirect_attrs=False)
if indirect_attrs:
for iface in sorted(hwent.interfaces, key=attrgetter('name')):
int_msg = skeleton.interfaces.add()
int_msg.device = iface.name
self.redirect_proto(iface, int_msg)
self._fill_address_assignment_proto(iface, int_msg.address_assignments)
def _fill_address_assignment_proto(self, iface, skeleton, embedded=True,
indirect_attrs=True):
for addr in iface.assignments:
addr_msg = skeleton.add()
if addr.assignment_type == 'standard':
addr_msg.assignment_type = addr_msg.STANDARD
elif addr.assignment_type == 'shared':
addr_msg.assignment_type = addr_msg.SHARED
else:
raise ProtocolError("Unknown address assignmment type %s." %
addr.assignment_type)
if addr.label:
addr_msg.label = addr.label
addr_msg.ip = str(addr.ip)
addr_msg.fqdn.extend([str(fqdn) for fqdn in addr.fqdns])
for dns_record in addr.dns_records:
if dns_record.alias_cnt:
addr_msg.aliases.extend([str(a.fqdn) for a in
dns_record.all_aliases])
if hasattr(addr, "priority"):
addr_msg.priority = addr.priority
def _fill_system_proto(self, host, skeleton, embedded=True,
indirect_attrs=True):
self.redirect_proto(host.branch, skeleton.domain)
skeleton.status = host.status.name
self.redirect_proto(host.personality_stage, skeleton.personality)
self.redirect_proto(host.operating_system, skeleton.operating_system)
if host.cluster and not embedded:
skeleton.cluster = host.cluster.name
if host.resholder:
self.redirect_proto(host.resholder.resources, skeleton.resources)
self.redirect_proto(host.services_used, skeleton.services_used,
indirect_attrs=False)
self.redirect_proto([srv.service_instance for srv in host.services_provided],
skeleton.services_provided, indirect_attrs=False)
skeleton.owner_eonid = host.effective_owner_grn.eon_id
for grn_rec in host.grns:
map = skeleton.eonid_maps.add()
map.target = grn_rec.target
map.eonid = grn_rec.eon_id
ObjectFormatter.handlers[NetworkDevice] = NetworkDeviceFormatter()
| apache-2.0 | 1,171,513,508,783,954,400 | 42.577922 | 87 | 0.606318 | false |
jdodds/pyrana | pyrana/plugins/pidginstatus.py | 1 | 1213 | import dbus
from feather import Plugin
class PidginStatus(Plugin):
listeners = set(['songstart', 'songpause', 'songresume'])
messengers = set()
def songstart(self, payload):
#hacky.
parts = payload.split('/')
artist = parts[-3]
album = parts[-2]
song = parts[-1]
self.song_msg = "%s (%s): %s" % (artist, album, song)
self.update_status(self.song_msg)
def songpause(self, payload=None):
self.update_status("Paused")
def songresume(self, payload=None):
self.update_status(self.song_msg)
def update_status(self, msg):
bus = dbus.SessionBus()
if "im.pidgin.purple.PurpleService" in bus.list_names():
purple = bus.get_object("im.pidgin.purple.PurpleService",
"/im/pidgin/purple/PurpleObject",
"im.pidgin.purple.PurpleInterface")
current = purple.PurpleSavedstatusGetType(
purple.PurpleSavedstatusGetCurrent())
status = purple.PurpleSavedstatusNew("", current)
purple.PurpleSavedstatusSetMessage(status, msg)
purple.PurpleSavedstatusActivate(status)
| bsd-3-clause | 7,194,651,118,975,871,000 | 31.783784 | 71 | 0.591096 | false |
leesdolphin/rentme | api/trademe/enums.py | 1 | 1262 | import enum
def named_enum(name, item):
if isinstance(item, str):
item = item.split(' ')
item = list(map(str.strip, item))
return enum.Enum(name, dict(zip(item, item)), module=__name__)
@enum.unique
class AreaOfBusiness(enum.Enum):
All = 0
Marketplace = 1
Property = 2
Motors = 3
Jobs = 4
Services = 5
SearchSortOrder = named_enum('SearchSortOrder',
'Default FeaturedFirst SuperGridFeaturedFirst '
'TitleAsc ExpiryAsc ExpiryDesc PriceAsc PriceDesc '
'BidsMost BuyNowAsc BuyNowDesc ReviewsDesc '
'HighestSalary LowestSalary LowestKilometres '
'HighestKilometres NewestVehicle OldestVehicle '
'BestMatch LargestDiscount')
PhotoSize = named_enum('PhotoSize',
'Thumbnail List Medium Gallery Large FullSize')
PropertyType = named_enum('PropertyType', 'Apartment CarPark House Townhouse Unit')
AllowsPickups = enum.Enum('AllowsPickups', 'None Allow Demand Forbid', start=0)
GeographicLocationAccuracy = enum.Enum('GeographicLocationAccuracy',
'None Address Suburb Street', start=0)
| agpl-3.0 | -7,150,790,870,608,253,000 | 37.242424 | 83 | 0.606181 | false |
pareidolic/bharati-braille | bottle.py | 1 | 128654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2012, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.11.rc1'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, urllib, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
try: from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3,0,0)
py25 = py < (2,6,0)
py31 = (3,1,0) <= py < (3,2,0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
json_loads = json_lds
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# File uploads (which are implemented as empty FiledStorage instances...)
# have a negative truth value. That makes no sense, here is a fix.
class FieldStorage(cgi.FieldStorage):
def __nonzero__(self): return bool(self.list or self.file)
if py3k: __bool__ = __nonzero__
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: Sorry for the mess. It works. Trust me.
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def __init__(self, strict=False):
self.rules = {} # A {rule: Rule} mapping
self.builder = {} # A rule/name->build_info mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {'re': self.re_filter, 'int': self.int_filter,
'float': self.float_filter, 'path': self.path_filter}
def re_filter(self, conf):
return conf or self.default_pattern, None, None
def int_filter(self, conf):
return r'-?\d+', int, lambda x: str(int(x))
def float_filter(self, conf):
return r'-?[\d.]+', float, lambda x: str(float(x))
def path_filter(self, conf):
return r'.+?', None, None
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
def parse_rule(self, rule):
''' Parses a rule into a (name, filter, conf) token stream. If mode is
None, name contains a static rule part. '''
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix: yield prefix, None, None
name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
if not filtr: filtr = self.default_filter
yield name, filtr, conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.rules:
self.rules[rule][method] = target
if name: self.builder[name] = self.builder[rule]
return
target = self.rules[rule] = {method: target}
# Build pattern and other structures for dynamic routes
anons = 0 # Number of anonymous wildcards
pattern = '' # Regular expression pattern
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self.parse_rule(rule):
if mode:
is_static = False
mask, in_filter, out_filter = self.filters[mode](conf)
if key:
pattern += '(?P<%s>%s)' % (key, mask)
else:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons; anons += 1
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static[self.build(rule)] = target
return
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
try:
re_match = re.compile('^(%s)$' % pattern).match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
def match(path):
""" Return an url-argument dictionary. """
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
try:
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((match, target))
except (AssertionError, IndexError): # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
[(match, target)]))
return match
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
if path in self.static:
targets = self.static[path]
else:
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
getargs, targets = rules[match.lastindex - 1]
urlargs = getargs(path) if getargs else {}
break
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def __repr__(self):
return '<%s %r %r>' % (self.method, self.rule, self.callback)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
#: A :cls:`ResourceManager` for application files
self.resources = ResourceManager()
#: A :cls:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config.autojson = autojson
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.hooks = HooksPlugin()
self.install(self.hooks)
if self.config.autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
body = itertools.chain(rs.body, body)
return HTTPResponse(body, rs.status_code, rs.headers)
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. Three hooks
are currently implemented:
- before_request: Executed once before each request
- after_request: Executed once after each request
- app_reset: Called whenever :meth:`reset` is called.
"""
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = next(out)
while not first:
first = next(out)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)))
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 1024000
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
maxlen = max(0, min(self.content_length, self.MEMFILE_MAX))
pairs = _parse_qsl(tonat(self.body.read(maxlen), 'latin1'))
for key, value in pairs[:self.MAX_PARAMS]:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='ISO-8859-1',
newline='\n')
elif py3k:
args['encoding'] = 'ISO-8859-1'
data = FieldStorage(**args)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.body = body
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = self._headers.items()
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse`
#: attributes.
_lctx = threading.local()
def local_property(name):
def fget(self):
try:
return getattr(_lctx, name)
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): setattr(_lctx, name, value)
def fdel(self): delattr(_lctx, name)
return property(fget, fset, fdel,
'Thread-local property stored in :data:`_lctx.%s`' % name)
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property('request_environ')
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property('response_status_line')
_status_code = local_property('response_status_code')
_cookies = local_property('response_cookies')
_headers = local_property('response_headers')
body = local_property('response_body')
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, header=None, **headers):
if header or 'output' in headers:
depr('Call signature changed (for the better)')
if header: headers.update(header)
if 'output' in headers: body = headers.pop('output')
super(HTTPResponse, self).__init__(body, status, **headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
def _output(self, value=None):
depr('Use HTTPResponse.body instead of HTTPResponse.output')
if value is None: return self.body
self.body = value
output = property(_output, _output, doc='Alias for .body')
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None, header=None, **headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, header, **headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
api = 2
_names = 'before_request', 'after_request', 'app_reset'
def __init__(self):
self.hooks = dict((name, []) for name in self._names)
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
was_empty = self._empty()
self.hooks.setdefault(name, []).append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def trigger(self, name, *a, **ka):
''' Trigger a hook and return a list of results. '''
hooks = self.hooks[name]
if ka.pop('reversed', False): hooks = hooks[::-1]
return [hook(*a, **ka) for hook in hooks]
def apply(self, callback, route):
if self._empty(): return callback
def wrapper(*a, **ka):
self.trigger('before_request')
rv = callback(*a, **ka)
self.trigger('after_request', reversed=True)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in route.config:
depr('The `template_opts` parameter is deprecated.') #0.9
return view(conf, **route.config['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
s = s.encode('latin1')
if isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-subclass with some extras: You can access keys like attributes.
Uppercase attributes create new ConfigDicts and act as name-spaces.
Other missing attributes return None. Calling a ConfigDict updates its
values and returns itself.
>>> cfg = ConfigDict()
>>> cfg.Namespace.value = 5
>>> cfg.OtherNamespace(a=1, b=2)
>>> cfg
{'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
'''
def __getattr__(self, key):
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
for key, value in dict(*a, **ka).items(): setattr(self, key, value)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(name, mode=mode, *args, **kwargs)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if code is None:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: header['Content-Type'] = mimetype
if encoding: header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
header["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
header["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
header["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, header=header, status=206)
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
depr('Use route wildcard filters instead.')
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kargs):
for key, value in vkargs.items():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(*args, **kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(_lctx, local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.get('fast'): wsgi = pywsgi
log = None if self.quiet else 'default'
wsgi.WSGIServer((self.host, self.port), handler, log=log).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
_debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Deprecated, do not use. '''
def prepare(self, **options):
depr('The SimpleTAL template handler is deprecated'\
' and will be removed in 0.12')
from simpletal import simpleTAL
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
for dictarg in args: kwargs.update(dictarg)
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source or open(self.filename, 'rb').read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = touni(line, self.encoding)
sline = line.lstrip()
if lineno <= 2:
m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if sline and sline[0] == '%' and sline[:2] != '%%':
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return self.subtemplate(subtpl,_stdout,rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host:
host, port = host.rsplit(':', 1)
run(args[0], host=host, port=port, server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| agpl-3.0 | 24,366,193,587,758,924 | 38.671292 | 103 | 0.582197 | false |
moccu/django-markymark | tests/test_fields.py | 1 | 1065 | from markymark.fields import MarkdownField, MarkdownFormField
from markymark.widgets import MarkdownTextarea
class CustomMarkdownTextarea(MarkdownTextarea):
pass
def test_markdownfield_formfield():
field = MarkdownField()
form_field = field.formfield()
assert isinstance(form_field, MarkdownFormField)
assert isinstance(form_field.widget, MarkdownTextarea)
def test_markdownfield_formfield_no_override():
field = MarkdownField()
form_field = field.formfield(widget=CustomMarkdownTextarea)
assert isinstance(form_field, MarkdownFormField)
assert isinstance(form_field.widget, CustomMarkdownTextarea)
def test_markdownfield_widget_instance():
field = MarkdownField()
widget_instance = MarkdownTextarea(attrs={'rows': 30, 'autofocus': True})
form_field = field.formfield(widget=widget_instance)
assert isinstance(form_field, MarkdownFormField)
assert isinstance(form_field.widget, MarkdownTextarea)
assert form_field.widget.attrs['rows'] == 30
assert form_field.widget.attrs['autofocus'] is True
| mit | -8,218,850,503,655,239,000 | 34.5 | 77 | 0.767136 | false |
zseder/hunmisc | hunmisc/liblinear/filter_problem.py | 1 | 2772 | import sys
def filter_fs(problem_f_handler, needed_features_list, orig_num_fname,
needed_labels_list, orig_num_labelname, filtered_name):
a = open('{0}.problem'.format(filtered_name), 'w')
orig_new_nums = {}
new_num_fname = {}
orig_new_labelnums = {}
new_num_labelname = {}
max_new_value = 0
max_new_labelvalue = -1
needed_feats = set(needed_features_list)
needed_labels = set(needed_labels_list)
for l in problem_f_handler:
data = l.strip().split(' ')
label_index = str(data[0])
if label_index in needed_labels:
if label_index not in orig_new_labelnums:
max_new_labelvalue += 1
orig_new_labelnums[label_index] = max_new_labelvalue
new_num_labelname[max_new_labelvalue] =\
orig_num_labelname[label_index]
needed_data = []
for d in data[1:]:
index, value = d.split(':')
if index in needed_feats:
if index not in orig_new_nums:
max_new_value += 1
orig_new_nums[index] = max_new_value
new_num_fname[max_new_value] = orig_num_fname[index]
needed_data.append('{0}:{1}'.format(orig_new_nums[index],
value))
needed_data.sort(key=lambda x:int(x.split(':')[0]))
a.write('{0} {1}\n'.format(orig_new_labelnums\
[label_index], ' '.join(needed_data)))
a.close()
b = open('{0}.featureNumbers'.format(filtered_name), 'w')
for i in new_num_fname:
b.write('{0}\t{1}\n'.format(new_num_fname[i], i))
b.close()
c = open('{0}.labelNumbers'.format(filtered_name), 'w')
for i in new_num_labelname:
c.write('{0}\t{1}\n'.format(new_num_labelname[i], i))
c.close()
def main():
orig_name = sys.argv[1]
problem_file = '{0}.problem'.format(orig_name)
#nums_of_needed_features_file = sys.argv[2]
orig_feature_name_nums_file = '{0}.featureNumbers'.format(orig_name)
orig_label_nums_file = '{0}.labelNumbers'.format(orig_name)
name_of_resulting = sys.argv[2]
filter_fs(open(problem_file), [ l.strip().split('\t')[1] for l in
open(orig_feature_name_nums_file).readlines()],\
dict([(l.strip().split('\t')[1],l.strip().split('\t')[0])
for l in open(orig_feature_name_nums_file)]), [ str(0), str(9), str(11)], #needed_label_nums
dict([(l.strip().split('\t')[1],l.strip().split('\t')[0])
for l in open(orig_label_nums_file)]), name_of_resulting)
if __name__ == "__main__":
main()
| gpl-3.0 | -7,254,434,017,910,495,000 | 36.459459 | 103 | 0.527778 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/ad_group_asset_service/client.py | 1 | 23429 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.enums.types import asset_field_type
from google.ads.googleads.v8.enums.types import asset_link_status
from google.ads.googleads.v8.resources.types import ad_group_asset
from google.ads.googleads.v8.services.types import ad_group_asset_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AdGroupAssetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdGroupAssetServiceGrpcTransport
class AdGroupAssetServiceClientMeta(type):
"""Metaclass for the AdGroupAssetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[AdGroupAssetServiceTransport]]
_transport_registry['grpc'] = AdGroupAssetServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[AdGroupAssetServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupAssetServiceClient(metaclass=AdGroupAssetServiceClientMeta):
"""Service to manage ad group assets."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupAssetServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupAssetServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_group_path(customer_id: str,ad_group_id: str,) -> str:
"""Return a fully-qualified ad_group string."""
return "customers/{customer_id}/adGroups/{ad_group_id}".format(customer_id=customer_id, ad_group_id=ad_group_id, )
@staticmethod
def parse_ad_group_path(path: str) -> Dict[str,str]:
"""Parse a ad_group path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def ad_group_asset_path(customer_id: str,ad_group_id: str,asset_id: str,field_type: str,) -> str:
"""Return a fully-qualified ad_group_asset string."""
return "customers/{customer_id}/adGroupAssets/{ad_group_id}~{asset_id}~{field_type}".format(customer_id=customer_id, ad_group_id=ad_group_id, asset_id=asset_id, field_type=field_type, )
@staticmethod
def parse_ad_group_asset_path(path: str) -> Dict[str,str]:
"""Parse a ad_group_asset path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/adGroupAssets/(?P<ad_group_id>.+?)~(?P<asset_id>.+?)~(?P<field_type>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def asset_path(customer_id: str,asset_id: str,) -> str:
"""Return a fully-qualified asset string."""
return "customers/{customer_id}/assets/{asset_id}".format(customer_id=customer_id, asset_id=asset_id, )
@staticmethod
def parse_asset_path(path: str) -> Dict[str,str]:
"""Parse a asset path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdGroupAssetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group asset service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupAssetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupAssetServiceTransport):
# transport is a AdGroupAssetServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupAssetServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_asset(self,
request: ad_group_asset_service.GetAdGroupAssetRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_asset.AdGroupAsset:
r"""Returns the requested ad group asset in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdGroupAssetRequest`):
The request object. Request message for
[AdGroupAssetService.GetAdGroupAsset][google.ads.googleads.v8.services.AdGroupAssetService.GetAdGroupAsset].
resource_name (:class:`str`):
Required. The resource name of the ad
group asset to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AdGroupAsset:
A link between an ad group and an
asset.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_asset_service.GetAdGroupAssetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_asset_service.GetAdGroupAssetRequest):
request = ad_group_asset_service.GetAdGroupAssetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad_group_asset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_ad_group_assets(self,
request: ad_group_asset_service.MutateAdGroupAssetsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_group_asset_service.AdGroupAssetOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_asset_service.MutateAdGroupAssetsResponse:
r"""Creates, updates, or removes ad group assets. Operation statuses
are returned.
List of thrown errors: `AssetLinkError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`ContextError <>`__ `FieldError <>`__ `HeaderError <>`__
`InternalError <>`__ `MutateError <>`__
`NotAllowlistedError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateAdGroupAssetsRequest`):
The request object. Request message for
[AdGroupAssetService.MutateAdGroupAssets][google.ads.googleads.v8.services.AdGroupAssetService.MutateAdGroupAssets].
customer_id (:class:`str`):
Required. The ID of the customer
whose ad group assets are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v8.services.types.AdGroupAssetOperation]`):
Required. The list of operations to
perform on individual ad group assets.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateAdGroupAssetsResponse:
Response message for an ad group
asset mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_asset_service.MutateAdGroupAssetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_asset_service.MutateAdGroupAssetsRequest):
request = ad_group_asset_service.MutateAdGroupAssetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ad_group_assets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'AdGroupAssetServiceClient',
)
| apache-2.0 | 3,013,399,569,031,883,300 | 44.142582 | 193 | 0.622733 | false |
ilogue/niprov | niprov/filesystem.py | 1 | 1532 | import os
import datetime
class Filesystem(object):
"""Wrapper of filesystem access functionality such as that implemented by
the os package in the standard library.
"""
def __init__(self):
self.open = open
def fileExists(self, path):
return os.path.isfile(path)
def walk(self, path):
return os.walk(path)
def readlines(self, path):
with open(path) as fhandle:
lines = fhandle.read().splitlines()
return lines
def read(self, path):
"""Read the contents of a textfile.
Args:
path: Path to the file to read.
Returns:
str: Contents of the file
Raises:
IOError: [Errno 2] No such file or directory: 'xyz'
"""
with open(path) as fhandle:
contents = fhandle.read()
return contents
def write(self, path, content):
"""Write string content to a textfile.
Args:
path: Path to the file to read.
content (str): What to fill the file with
"""
with open(path, 'w') as fhandle:
fhandle.write(content)
def getsize(self, path):
return os.path.getsize(path)
def getctime(self, path):
"""Get the creation time for the file at path.
Args:
path: Path to the file to read.
Returns:
datetime: Time when the file was last changed
"""
return datetime.datetime.fromtimestamp(os.path.getctime(path))
| bsd-3-clause | 3,807,397,935,507,642,000 | 23.31746 | 78 | 0.569191 | false |
etkirsch/legends-of-erukar | config/world/regions/BarlenRegion.py | 1 | 3367 | import erukar
from erukar.system.engine import EnvironmentProfile, OverlandSector, Sector, Region, Location, Chunk, EconomicProfile
def create():
barlen = Region()
barlen.name = "Greater Barlen Region"
barlen.description = "A fertile area, known best for its vast barley and wheat fields. The seat of this region is a large town known as Barlen whose economy consists mostly on agriculture taxes and exports of the barley harvest."
barlen.add_sector(create_barlen_outskirts)
barlen.add_sector(create_razorwoods_camp)
barlen.add_sector(create_izeth_terrace)
barlen.add_sector(create_izeth_citadel_1f)
barlen.sector_limits = acceptable_bounds()
barlen.sector_template = create_sector_template(barlen)
return barlen
def acceptable_bounds():
return [
(0, 0, 0),
(2, -2, 0),
(2, -3, 1),
(1, -1, 0),
(1, -2, 1),
(1, -3, 2),
(0, -1, 1),
(0, -2, 2),
(0, -3, 3),
(-1, 0, 1)
]
def create_barlen_outskirts(region):
def econ_seed(sector):
econ = EconomicProfile()
econ.demand[erukar.IurwoodLumber] = 2000
econ.supply[erukar.IurwoodLumber] = 100
econ.demand[erukar.AshLumber] = 1000
econ.supply[erukar.AshLumber] = 100
return econ
sector = create_sector_template(region, econ_seed)
sector.name = 'Barlen Town Outskirts'
sector.environment_profile = EnvironmentProfile.CityOutdoors()
sector.set_coordinates((0,0,0))
town = Location(sector)
town.is_named = True
town.name = 'Barlen Town Outskirts'
town.dungeon_file_name = 'BarlenOutskirts'
sector.locations.add(town)
return sector
def create_razorwoods_camp(region):
def econ_seed(sector):
econ = EconomicProfile()
econ.demand[erukar.IurwoodLumber] = 10
econ.supply[erukar.IurwoodLumber] = 5000
econ.demand[erukar.AshLumber] = 10
econ.supply[erukar.AshLumber] = 5000
return econ
sector = create_sector_template(region, econ_seed)
sector.name = 'Feriden Razorwoods Camp'
sector.set_coordinates((0,-3,3))
sector.environment_profile = EnvironmentProfile.SnowyWoodlands()
camp = Location(sector)
camp.is_named = True
camp.name = 'Feriden Razorwoods Camp'
camp.dungeon_file_name = 'RazorwoodsCamp'
sector.locations.add(camp)
return sector
def create_izeth_terrace(region):
sector = create_sector_template(region)
sector.name = 'Izeth Citadel Terrace'
sector.set_coordinates((0,-2,2))
sector.environment_profile = EnvironmentProfile.SnowyWoodlands()
terrace = Location(sector)
terrace.is_named = True
terrace.name = 'Izeth Citadel Terrace'
terrace.chunks = [Chunk()]
sector.locations.add(terrace)
return sector
def create_izeth_citadel_1f(region):
sector = Sector(region)
sector.name = 'Izeth Citadel 1F'
sector.set_coordinates("IzethCitadel1F")
citadel_1f = Location(sector)
citadel_1f.is_named = True
citadel_1f.name = 'Izeth Citadel 1F'
citadel_1f.dungeon_file_name = 'IzethCitadel1F'
sector.locations.add(citadel_1f)
return sector
def create_sector_template(region=None, econ_seed_fn=None):
sector = OverlandSector(region, econ_seed_fn)
sector.environment_profile = EnvironmentProfile.SnowyWoodlands()
return sector
| agpl-3.0 | -5,317,180,016,940,680,000 | 31.68932 | 233 | 0.674785 | false |
luk156/brick | documenti_acquisto/models.py | 1 | 3097 | from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
# Create your models here.
from django import forms
from suit.widgets import SuitDateWidget
from django.contrib.admin import widgets
from ore.models import *
# Create your models here.
class Fornitore(models.Model):
rag = models.TextField('Ragione Sociale', max_length=50)
mail = models.EmailField('E-Mail', blank=True, null=True)
telefono = models.IntegerField('Telefono principale', blank=True, null=True)
indirizzo = models.TextField('Indirizzo', max_length=100, blank=True, null=True)
def __unicode__(self):
return u'%s' % (self.rag)
class Meta:
verbose_name = "Fornitore"
verbose_name_plural = "Fornitori"
class Articolo(models.Model):
descrizione = models.TextField('Descrizione', max_length=50)
class Meta:
verbose_name = "Articolo"
verbose_name_plural = "Articoli"
def __unicode__(self):
return u'%s' % (self.descrizione)
class Documento_acquisto(models.Model):
data_emissione = models.DateField('Data di emissione')
fornitore = models.ForeignKey(Fornitore, related_name='fornitore_ddt')
class Meta:
abstract = False
def importo(self):
i = 0
for b in self.documento_bene.all():
i = i + b.importo()
return i
class Bene(models.Model):
articolo = models.ForeignKey(Articolo, related_name='articolo_bene')
quantita = models.DecimalField('Quantita', max_digits=8, decimal_places=2)
prezzo_unitario = models.DecimalField('Prezzo unitario', max_digits=8, decimal_places=2)
documento = models.ForeignKey('Documento_acquisto', related_name='documento_bene')
cantiere = models.ForeignKey(Cantiere, related_name='cantiere_bene')
class Meta:
verbose_name = "Bene"
verbose_name_plural = "Beni"
def importo(self):
return self.quantita * self.prezzo_unitario
def __unicode__(self):
return u'%s x %s' % (self.articolo,self.quantita)
class Documento_trasporto(Documento_acquisto):
convertito = models.BooleanField(default=False)
class Meta:
verbose_name = 'Documento di trasporto'
verbose_name_plural = 'Documenti di trasporto'
def __unicode__(self):
return u'%s (%s)' % (self.fornitore,self.data_emissione)
class Documento_trasportoForm(forms.ModelForm):
#data = forms.DateField(widget=widgets.AdminDateWidget)
class Meta:
model = Documento_trasporto
exclude = ['convertito']
widgets = {
'data_emissione': SuitDateWidget,
}
class Fattura_acquisto(Documento_acquisto):
data_scadenza = models.DateField('Data di scadenza')
class Meta:
verbose_name = 'Fattura di acquisto'
verbose_name_plural = 'Fatture di acquisto'
class Fattura_acquistoForm(forms.ModelForm):
#data = forms.DateField(widget=widgets.AdminDateWidget)
class Meta:
model = Fattura_acquisto
widgets = {
'data_emissione': SuitDateWidget,
'data_scadenza': SuitDateWidget,
} | agpl-3.0 | -7,517,543,585,246,214,000 | 33.422222 | 92 | 0.671295 | false |
wooga/airflow | tests/providers/google/cloud/operators/test_translate_speech_system.py | 1 | 1653 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from airflow.providers.google.cloud.example_dags.example_translate_speech import BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_GCS_KEY)
class GCPTextToSpeechExampleDagSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_GCS_KEY)
def setUp(self):
super().setUp()
self.create_gcs_bucket(BUCKET_NAME)
@provide_gcp_context(GCP_GCS_KEY)
def tearDown(self):
self.delete_gcs_bucket(BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_GCS_KEY)
def test_run_example_dag_gcp_translate_speech(self):
self.run_dag("example_gcp_translate_speech", CLOUD_DAG_FOLDER)
| apache-2.0 | 1,552,072,888,841,843,200 | 38.357143 | 103 | 0.754386 | false |
ldesousa/PyWPS | tests/test_describe.py | 1 | 14235 | ##################################################################
# Copyright 2016 OSGeo Foundation, #
# represented by PyWPS Project Steering Committee, #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import unittest
from collections import namedtuple
from pywps import Process, Service, LiteralInput, ComplexInput, BoundingBoxInput
from pywps import LiteralOutput, ComplexOutput, BoundingBoxOutput
from pywps import E, WPS, OWS, OGCTYPE, Format, NAMESPACES, OGCUNIT
from pywps.inout.literaltypes import LITERAL_DATA_TYPES
from pywps.app.basic import xpath_ns
from pywps.app.Common import Metadata
from pywps.inout.formats import Format
from pywps.inout.literaltypes import AllowedValue
from pywps.validator.allowed_value import ALLOWEDVALUETYPE
from pywps.exceptions import InvalidParameterValue
from pywps.exceptions import MissingParameterValue
from pywps.tests import assert_pywps_version, client_for
ProcessDescription = namedtuple('ProcessDescription', ['identifier', 'inputs', 'metadata'])
def get_data_type(el):
if el.text in LITERAL_DATA_TYPES:
return el.text
raise RuntimeError("Can't parse data type")
def get_describe_result(resp):
assert resp.status_code == 200
assert resp.headers['Content-Type'] == 'text/xml'
result = []
for desc_el in resp.xpath('/wps:ProcessDescriptions/ProcessDescription'):
[identifier_el] = xpath_ns(desc_el, './ows:Identifier')
inputs = []
metadata = []
for metadata_el in xpath_ns(desc_el, './ows:Metadata'):
metadata.append(metadata_el.attrib['{http://www.w3.org/1999/xlink}title'])
for input_el in xpath_ns(desc_el, './DataInputs/Input'):
[input_identifier_el] = xpath_ns(input_el, './ows:Identifier')
input_identifier = input_identifier_el.text
literal_data_el_list = xpath_ns(input_el, './LiteralData')
complex_data_el_list = xpath_ns(input_el, './ComplexData')
if literal_data_el_list:
[literal_data_el] = literal_data_el_list
[data_type_el] = xpath_ns(literal_data_el, './ows:DataType')
data_type = get_data_type(data_type_el)
inputs.append((input_identifier, 'literal', data_type))
elif complex_data_el_list:
[complex_data_el] = complex_data_el_list
formats = []
for format_el in xpath_ns(complex_data_el,
'./Supported/Format'):
[mimetype_el] = xpath_ns(format_el, './ows:MimeType')
formats.append({'mime_type': mimetype_el.text})
inputs.append((input_identifier, 'complex', formats))
else:
raise RuntimeError("Can't parse input description")
result.append(ProcessDescription(identifier_el.text, inputs, metadata))
return result
class DescribeProcessTest(unittest.TestCase):
def setUp(self):
def hello(request):
pass
def ping(request):
pass
processes = [
Process(hello, 'hello', 'Process Hello', metadata=[
Metadata('hello metadata', 'http://example.org/hello',
role='http://www.opengis.net/spec/wps/2.0/def/process/description/documentation')]),
Process(ping, 'ping', 'Process Ping', metadata=[Metadata('ping metadata', 'http://example.org/ping')]),
]
self.client = client_for(Service(processes=processes))
def test_get_request_all_args(self):
resp = self.client.get('?Request=DescribeProcess&service=wps&version=1.0.0&identifier=all')
identifiers = [desc.identifier for desc in get_describe_result(resp)]
metadata = [desc.metadata for desc in get_describe_result(resp)]
assert 'ping' in identifiers
assert 'hello' in identifiers
assert_pywps_version(resp)
assert 'hello metadata' in [item for sublist in metadata for item in sublist]
def test_get_request_zero_args(self):
with self.assertRaises(MissingParameterValue) as e:
resp = self.client.get('?Request=DescribeProcess&version=1.0.0&service=wps')
assert resp.status_code == 400 # bad request, identifier is missing
def test_get_request_nonexisting_process_args(self):
with self.assertRaises(InvalidParameterValue) as e:
resp = self.client.get('?Request=DescribeProcess&version=1.0.0&service=wps&identifier=NONEXISTINGPROCESS')
assert resp.status_code == 400
def test_post_request_zero_args(self):
request_doc = WPS.DescribeProcess()
resp = self.client.post_xml(doc=request_doc)
assert resp.status_code == 400
def test_get_one_arg(self):
resp = self.client.get('?service=wps&version=1.0.0&Request=DescribeProcess&identifier=hello')
assert [pr.identifier for pr in get_describe_result(resp)] == ['hello']
def test_post_one_arg(self):
request_doc = WPS.DescribeProcess(
OWS.Identifier('hello'),
version='1.0.0'
)
resp = self.client.post_xml(doc=request_doc)
assert [pr.identifier for pr in get_describe_result(resp)] == ['hello']
def test_get_two_args(self):
resp = self.client.get('?Request=DescribeProcess'
'&service=wps'
'&version=1.0.0'
'&identifier=hello,ping')
result = get_describe_result(resp)
assert [pr.identifier for pr in result] == ['hello', 'ping']
def test_post_two_args(self):
request_doc = WPS.DescribeProcess(
OWS.Identifier('hello'),
OWS.Identifier('ping'),
version='1.0.0'
)
resp = self.client.post_xml(doc=request_doc)
result = get_describe_result(resp)
assert [pr.identifier for pr in result] == ['hello', 'ping']
class DescribeProcessInputTest(unittest.TestCase):
def describe_process(self, process):
client = client_for(Service(processes=[process]))
resp = client.get('?service=wps&version=1.0.0&Request=DescribeProcess&identifier=%s'
% process.identifier)
[result] = get_describe_result(resp)
return result
def test_one_literal_string_input(self):
def hello(request):
pass
hello_process = Process(
hello,
'hello',
'Process Hello',
inputs=[LiteralInput('the_name', 'Input name')],
metadata=[
Metadata('process metadata 1', 'http://example.org/1'),
Metadata('process metadata 2', 'http://example.org/2')]
)
result = self.describe_process(hello_process)
assert result.inputs == [('the_name', 'literal', 'integer')]
assert result.metadata == ['process metadata 1', 'process metadata 2']
def test_one_literal_integer_input(self):
def hello(request):
pass
hello_process = Process(hello, 'hello',
'Process Hello',
inputs=[LiteralInput('the_number',
'Input number',
data_type='positiveInteger')])
result = self.describe_process(hello_process)
assert result.inputs == [('the_number', 'literal', 'positiveInteger')]
class InputDescriptionTest(unittest.TestCase):
def test_literal_integer_input(self):
literal = LiteralInput('foo', 'Literal foo', data_type='positiveInteger', keywords=['kw1', 'kw2'], uoms=['metre'])
doc = literal.describe_xml()
self.assertEqual(doc.tag, E.Input().tag)
[identifier_el] = xpath_ns(doc, './ows:Identifier')
self.assertEqual(identifier_el.text, 'foo')
kws = xpath_ns(doc, './ows:Keywords/ows:Keyword')
self.assertEqual(len(kws), 2)
[type_el] = xpath_ns(doc, './LiteralData/ows:DataType')
self.assertEqual(type_el.text, 'positiveInteger')
self.assertEqual(type_el.attrib['{%s}reference' % NAMESPACES['ows']],
OGCTYPE['positiveInteger'])
anyvalue = xpath_ns(doc, './LiteralData/ows:AnyValue')
self.assertEqual(len(anyvalue), 1)
def test_literal_allowed_values_input(self):
"""Test all around allowed_values
"""
literal = LiteralInput(
'foo',
'Foo',
data_type='integer',
uoms=['metre'],
allowed_values=(
1, 2, (5, 10), (12, 4, 24),
AllowedValue(
allowed_type=ALLOWEDVALUETYPE.RANGE,
minval=30,
maxval=33,
range_closure='closed-open')
)
)
doc = literal.describe_xml()
allowed_values = xpath_ns(doc, './LiteralData/ows:AllowedValues')
self.assertEqual(len(allowed_values), 1)
allowed_value = allowed_values[0]
values = xpath_ns(allowed_value, './ows:Value')
ranges = xpath_ns(allowed_value, './ows:Range')
self.assertEqual(len(values), 2)
self.assertEqual(len(ranges), 3)
def test_complex_input_identifier(self):
complex_in = ComplexInput('foo', 'Complex foo', keywords=['kw1', 'kw2'], supported_formats=[Format('bar/baz')])
doc = complex_in.describe_xml()
self.assertEqual(doc.tag, E.Input().tag)
[identifier_el] = xpath_ns(doc, './ows:Identifier')
self.assertEqual(identifier_el.text, 'foo')
kws = xpath_ns(doc, './ows:Keywords/ows:Keyword')
self.assertEqual(len(kws), 2)
def test_complex_input_default_and_supported(self):
complex_in = ComplexInput(
'foo',
'Complex foo',
supported_formats=[
Format('a/b'),
Format('c/d')
]
)
doc = complex_in.describe_xml()
[default_format] = xpath_ns(doc, './ComplexData/Default/Format')
[default_mime_el] = xpath_ns(default_format, './MimeType')
self.assertEqual(default_mime_el.text, 'a/b')
supported_mime_types = []
for supported_el in xpath_ns(doc, './ComplexData/Supported/Format'):
[mime_el] = xpath_ns(supported_el, './MimeType')
supported_mime_types.append(mime_el.text)
self.assertEqual(supported_mime_types, ['a/b', 'c/d'])
def test_bbox_input(self):
bbox = BoundingBoxInput('bbox', 'BBox foo', keywords=['kw1', 'kw2'],
crss=["EPSG:4326", "EPSG:3035"])
doc = bbox.describe_xml()
[inpt] = xpath_ns(doc, '/Input')
[default_crs] = xpath_ns(doc, './BoundingBoxData/Default/CRS')
supported = xpath_ns(doc, './BoundingBoxData/Supported/CRS')
self.assertEqual(inpt.attrib['minOccurs'], '1')
self.assertEqual(default_crs.text, 'EPSG:4326')
self.assertEqual(len(supported), 2)
kws = xpath_ns(doc, './ows:Keywords/ows:Keyword')
self.assertEqual(len(kws), 2)
class OutputDescriptionTest(unittest.TestCase):
def test_literal_output(self):
literal = LiteralOutput('literal', 'Literal foo', abstract='Description', keywords=['kw1', 'kw2'], uoms=['metre'])
doc = literal.describe_xml()
[output] = xpath_ns(doc, '/Output')
[identifier] = xpath_ns(doc, '/Output/ows:Identifier')
[abstract] = xpath_ns(doc, '/Output/ows:Abstract')
[keywords] = xpath_ns(doc, '/Output/ows:Keywords')
kws = xpath_ns(keywords, './ows:Keyword')
[data_type] = xpath_ns(doc, '/Output/LiteralOutput/ows:DataType')
[uoms] = xpath_ns(doc, '/Output/LiteralOutput/UOMs')
[default_uom] = xpath_ns(uoms, './Default/ows:UOM')
supported_uoms = xpath_ns(uoms, './Supported/ows:UOM')
assert output is not None
assert identifier.text == 'literal'
assert abstract.text == 'Description'
assert keywords is not None
assert len(kws) == 2
assert data_type.attrib['{%s}reference' % NAMESPACES['ows']] == OGCTYPE['string']
assert uoms is not None
assert default_uom.text == 'metre'
assert default_uom.attrib['{%s}reference' % NAMESPACES['ows']] == OGCUNIT['metre']
assert len(supported_uoms) == 1
def test_complex_output(self):
complexo = ComplexOutput('complex', 'Complex foo', [Format('GML')], keywords=['kw1', 'kw2'])
doc = complexo.describe_xml()
[outpt] = xpath_ns(doc, '/Output')
[default] = xpath_ns(doc, '/Output/ComplexOutput/Default/Format/MimeType')
supported = xpath_ns(doc,
'/Output/ComplexOutput/Supported/Format/MimeType')
assert default.text == 'application/gml+xml'
assert len(supported) == 1
[keywords] = xpath_ns(doc, '/Output/ows:Keywords')
kws = xpath_ns(keywords, './ows:Keyword')
assert keywords is not None
assert len(kws) == 2
def test_bbox_output(self):
bbox = BoundingBoxOutput('bbox', 'BBox foo', keywords=['kw1', 'kw2'],
crss=["EPSG:4326"])
doc = bbox.describe_xml()
[outpt] = xpath_ns(doc, '/Output')
[default_crs] = xpath_ns(doc, './BoundingBoxOutput/Default/CRS')
supported = xpath_ns(doc, './BoundingBoxOutput/Supported/CRS')
assert default_crs.text == 'EPSG:4326'
assert len(supported) == 1
[keywords] = xpath_ns(doc, '/Output/ows:Keywords')
kws = xpath_ns(keywords, './ows:Keyword')
assert keywords is not None
assert len(kws) == 2
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(DescribeProcessTest),
loader.loadTestsFromTestCase(DescribeProcessInputTest),
loader.loadTestsFromTestCase(InputDescriptionTest),
]
return unittest.TestSuite(suite_list)
| mit | -5,498,443,039,569,499,000 | 42.53211 | 122 | 0.591781 | false |
beeftornado/sentry | tests/sentry/web/frontend/test_msteams_extension.py | 1 | 1768 | from __future__ import absolute_import
from django.core.signing import SignatureExpired
from sentry.models import OrganizationMember
from sentry.testutils import TestCase
from sentry.web.frontend.msteams_extension_configuration import MsTeamsExtensionConfigurationView
from sentry.utils.compat.mock import patch
from sentry.utils.signing import sign
class MsTeamsExtensionConfigurationTest(TestCase):
def hit_configure(self, params):
self.login_as(self.user)
org = self.create_organization()
OrganizationMember.objects.create(user=self.user, organization=org, role="admin")
path = u"/extensions/msteams/configure/"
return self.client.get(path, params)
def test_map_params(self):
config_view = MsTeamsExtensionConfigurationView()
data = {"my_param": "test"}
signed_data = sign(**data)
params = {"signed_params": signed_data}
assert data == config_view.map_params_to_state(params)
@patch("sentry.web.frontend.msteams_extension_configuration.unsign")
def test_expired_signature(self, mock_unsign):
with self.feature({"organizations:integrations-alert-rule": True}):
mock_unsign.side_effect = SignatureExpired()
resp = self.hit_configure({"signed_params": "test"})
assert b"Installation link expired" in resp.content
def test_no_team_plan_feature_flag(self):
with self.feature(
{
"organizations:integrations-alert-rule": False,
"organizations:integrations-chat-unfurl": False,
}
):
resp = self.hit_configure({"signed_params": "test"})
assert resp.status_code == 302
assert "/extensions/msteams/link/" in resp.url
| bsd-3-clause | -5,759,949,768,597,155 | 40.116279 | 97 | 0.675339 | false |
xmendez/wfuzz | src/wfuzz/plugins/payloads/hexrange.py | 1 | 1661 | from wfuzz.externals.moduleman.plugin import moduleman_plugin
from wfuzz.plugin_api.base import BasePayload
from wfuzz.exception import FuzzExceptBadOptions
from wfuzz.fuzzobjects import FuzzWordType
@moduleman_plugin
class hexrange(BasePayload):
name = "hexrange"
author = (
"Carlos del Ojo",
"Christian Martorella",
"Adapted to newer versions Xavi Mendez (@xmendez)",
)
version = "0.1"
description = ()
summary = "Returns each hex number of the given hex range."
category = ["default"]
priority = 99
parameters = (
("range", "", True, "Range of hex numbers to generate in the form of 00-ff."),
)
default_parameter = "range"
def __init__(self, params):
BasePayload.__init__(self, params)
try:
ran = self.params["range"].split("-")
self.minimum = int(ran[0], 16)
self.maximum = int(ran[1], 16)
self.__count = self.maximum - self.minimum + 1
self.current = self.minimum
self.lgth = max(
len(ran[0]), len(ran[1]), len(hex(self.maximum).replace("0x", ""))
)
except ValueError:
raise FuzzExceptBadOptions('Bad range format (eg. "0-ffa")')
def count(self):
return self.__count
def get_type(self):
return FuzzWordType.WORD
def get_next(self):
if self.current > self.maximum:
raise StopIteration
pl = "%" + str(self.lgth) + "s"
num = hex(self.current).replace("0x", "")
pl = pl % (num)
payl = pl.replace(" ", "0")
self.current += 1
return payl
| gpl-2.0 | 7,504,221,438,334,975,000 | 27.152542 | 86 | 0.571945 | false |
jdmonaco/vmo-feedback-model | src/remapping/simulate.py | 1 | 5749 | # encoding: utf-8
"""
simulate.py -- Simulate double rotation experiments using VMOModel
Exported namespace: VMOExperiment
Created by Joe Monaco on 2010-02-03.
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
from IPython.kernel.client import MapTask
import numpy as np
import os
# Package imports
from ..core.analysis import BaseAnalysis
from ..double_rotation import VMODoubleRotation
from ..session import VMOSession
# Directory constants
RAT_DIR = "Rat%02d"
def run_session(model_dict, save_dir, get_clone=False):
"""Run a session as part of a double-rotation experiment
"""
success = False
try:
model = VMODoubleRotation(**model_dict)
model.advance()
data = VMOSession(model)
VMOSession.save_session_list([data], save_dir)
except:
raise
else:
success = True
if get_clone:
success = model.clone_dict()
return success
class VMOExperiment(BaseAnalysis):
"""
Run double-rotation experiments using the VMODoubleRotation model class
Convenience methods:
run_mismatch_analyses -- Run mismatch analysis on each simulated mismatch
angle followed by a remapping trends analysis. All data is saved to
the analysis data directory.
"""
label = "Cue Experiment"
def collect_data(self, rats=1, mismatch=None, **kwargs):
"""Run the simulations and collect results data
Simulated experimental data is saved in per-rat directories containing
MIS_XXX.tar.gz archive files of VMOSession objects.
Keyword arguments:
rats -- number of experiments to run with different random networks
mismatch -- list of mismatch angles (in degrees; don't include 0 for
standard session, this is done automatically)
Additional keywords are passed in as model parameters.
"""
# Set the mismatch angles and convert to radians
if mismatch is None:
mismatch = [45, 90, 135, 180]
self.results['mismatch'] = mismatch
self.results['rats'] = rats
mismatch = [(np.pi/180) * angle for angle in mismatch]
# Set up parameter dictionary
pdict = dict(
N_theta=1000,
N_outputs=500,
C_W=0.05,
cue_std=np.pi/24
)
pdict.update(kwargs)
# Set up IPython engines
mec = self.get_multiengine_client()
tc = self.get_task_client()
mec.execute('from vmo_feedback import VMODoubleRotation, VMOSession')
mec.clear_queue()
tc.clear()
# Run STD sessions and save model states
mis_args = []
for rat in xrange(rats):
# Run the standard session to get network, fixed points
self.out('Running standard session for rat %d...'%rat)
if rats == 1:
rat_dir = self.datadir
else:
rat_dir = os.path.join(self.datadir, RAT_DIR%rat)
pdict.update(mismatch=[0])
clone_dict = run_session(pdict, rat_dir, get_clone=True)
if not clone_dict:
self.out('STD session failed', error=True)
continue
mis_args.append((rat_dir, clone_dict))
# Farm out mismatch sessions to task controller
self.out('Now task-farming the mismatch sessions...')
for rat in xrange(rats):
rat_dir, clone_dict = mis_args[rat]
tasks = []
for angle in mismatch:
clone_dict.update(mismatch=[angle])
tasks.append(
tc.run(
MapTask(run_session,
args=(clone_dict, rat_dir),
kwargs={'get_clone':False})))
tc.barrier(tasks)
success = np.all([tc.get_task_result(t_id) for t_id in tasks])
tc.clear()
if success:
self.out('Successfully completed mismatch sessions!')
else:
self.out('Error(s) detected during mismatch sessions',
error=True)
# Good-bye
self.out('All done!')
def run_mismatch_analyses(self):
"""Perform MismatchAnalysis for each mismatch angle in this experiment
and then perform MismatchTrends on those results, saving all the data
and figures in this experiment's data directory.
"""
if not self.finished:
self.out('Analysis has not been completed yet!', error=True)
return
from mismatch import MismatchAnalysis
from trends import MismatchTrends
from pylab import close
self.out('Running mismatch analysis for each angle...')
mismatch = self.results['mismatch']
MA_list = []
for angle in mismatch:
MA_dir = os.path.join(self.datadir, 'mismatch_%03d'%angle)
MA = MismatchAnalysis(desc='mismatch', datadir=MA_dir)
MA(load_dir=self.datadir, mismatch=angle)
MA.view()
MA.save_plots()
close('all')
MA_list.append(MA)
self.out('Running mismatch trends analysis across angles...')
trends_dir = os.path.join(self.datadir, 'trends')
trends = MismatchTrends(desc='experiment', datadir=trends_dir)
trends(*MA_list)
trends.save_data()
trends.view()
trends.save_plots()
close('all')
| mit | -745,938,006,877,985,200 | 32.823529 | 80 | 0.589494 | false |
DeltaOS/deltaos-theme | theme_deltaos/__openerp__.py | 1 | 1289 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015-Trust-Code (<http://www.trustcode.com.br>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Deltaos Theme',
'category': 'Theme/Corporate',
'summary': 'Delta WebSite Theme',
'version': '1.0',
'description': """Delta Open Projects""",
'author': 'Raphael',
'depends': ['website'],
'data': [
'views/header.xml',
],
'application': True,
}
| gpl-3.0 | 1,249,369,750,968,677,000 | 36.911765 | 78 | 0.583398 | false |
jlyonsmith/pytools | Test/test_buckle.py | 1 | 2894 | import os
import subprocess
def writeFile(fileName, contents):
with open(fileName, "w") as f:
f.write(contents)
os.makedirs('Scratch', exist_ok=True)
os.chdir('Scratch')
writeFile('test_buckle.resx', '''<?xml version="1.0" encoding="utf-8"?>
<root>
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="String" xml:space="preserve">
<value>String</value>
</data>
<data name="StringWithArgs" xml:space="preserve">
<value>String {0} {1}</value>
</data>
</root>''')
# Create .resources and .cs files
subprocess.call(
("/usr/local/bin/python3", "../../buckle.py",
"test_buckle.resx", "-o", "test_buckle.cs", "-r", "test_buckle.resources",
"-n", "ToolBelt", "-c", "TestBuckleResources", "-b", "SillyBaseName"))
os.chdir('..')
| mit | -112,656,978,342,174,060 | 33.047059 | 139 | 0.707326 | false |
Xeralux/tensorflow | tensorflow/compiler/tests/binary_ops_test.py | 1 | 50582 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.test_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]], dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array([[0.213838, 0.236328, -0.738817, 0.288651],
[0.213838, -0.763672, 0.261183, 0.288651],
[np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
def testIntOps(self):
for dtype in self.int_types:
self._testBinary(
gen_math_ops.truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_and,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_or,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
self._testBinary(
bitwise_ops.left_shift, lhs, rhs,
expected=np.left_shift(lhs, rhs))
self._testBinary(
bitwise_ops.right_shift, lhs, rhs,
expected=np.right_shift(lhs, rhs))
if dtype in [np.int8, np.int16, np.int32, np.int64]:
lhs = np.array([-1, -5, -3, -14, -2], dtype=dtype)
rhs = np.array([5, 0, 1, 11, 36], dtype=dtype)
# HLO has saturating shift behavior.
bits = np.ceil(
np.log(np.iinfo(dtype).max - np.iinfo(dtype).min) / np.log(2))
expected = [
np.right_shift(l, r) if r < bits else np.sign(l)
for l, r in zip(lhs, rhs)
]
self._testBinary(bitwise_ops.right_shift, lhs, rhs, expected=expected)
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([11, 22], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
expected=np.array(
[1 << 32, 1 << 36, 1 << 32, 1 << 36], dtype=np.int64))
self._testBinary(
math_ops.subtract,
np.array([1, 2, 100], dtype=dtype),
np.array([10, 20, -1], dtype=dtype),
expected=np.array([-9, -18, 101], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
if dtype not in self.complex_types: # min/max not supported for complex
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
# Complex support for squared_difference is incidental, see b/68205550
if dtype not in self.complex_types:
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
def testComplexOps(self):
for dtype in self.complex_types:
ctypes = {np.complex64: np.float32}
self._testBinary(
math_ops.complex,
np.array([[[[-1, 2], [2, 0]]]], dtype=ctypes[dtype]),
np.array([[[[2, -3], [0, 4]]]], dtype=ctypes[dtype]),
expected=np.array([[[[-1 + 2j, 2 - 3j], [2, 4j]]]], dtype=dtype))
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array(
[[[[-1 + 2j, 2.00009999 - 3j], [2 - 3j, 3 + 4.01j]]]],
dtype=dtype),
np.array(
[[[[-1.001 + 2j, 2 - 3j], [2 - 3.00009j, 3 + 4j]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3j, -1.5j, -8, 2 + 3j, 2 + 4j], dtype=dtype),
np.array([2, -2, 7j, -4j, 4 - 6j, 1 + 2j], dtype=dtype),
expected=np.array(
[1.5, -1.5j, -0.2142857, -2j, (2 + 3j) / (4 - 6j), 2],
dtype=dtype))
# Test inf/nan scenarios.
self._testBinary(
gen_math_ops.real_div,
np.array([4 + 3j, 4, 3j, -4, -4j, 2 - 3j], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0], dtype=dtype),
expected=np.array(
[
dtype(1 + 1j) / 0,
dtype(1) / 0,
dtype(1j) / 0,
dtype(-1) / 0,
dtype(-1j) / 0,
dtype(1 - 1j) / 0
],
dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(3 + 2j),
dtype(4 - 5j),
expected=np.power(dtype(3 + 2j), dtype(4 - 5j)))
self._testBinary( # empty rhs
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary( # to zero power
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[1, 2], dtype=dtype),
expected=np.ones(shape=[1, 2], dtype=dtype))
lhs = np.array([1 - 2j, 4 + 3j, 2 - 3j, 3, 2j, 1, 4], dtype=dtype)
rhs = np.array([2, 3j, 3 + 4j, 2 + 3j, 3 - 2j, 2, 3 + 3j], dtype=dtype)
scalar = dtype(2 + 2j)
self._testBinary(math_ops.pow, lhs, rhs, expected=np.power(lhs, rhs))
self._testBinary(
math_ops.pow, scalar, rhs, expected=np.power(scalar, rhs))
self._testBinary(math_ops.pow, lhs, scalar, np.power(lhs, scalar))
lhs = np.array([4 + 2j, -3 - 1j, 2j, 1], dtype=dtype)
rhs = np.array([5, -6j, 7 - 3j, -8j], dtype=dtype)
self._testBinary(
gen_math_ops.reciprocal_grad, lhs, rhs, expected=-rhs * lhs * lhs)
self._testBinary(
gen_math_ops.sigmoid_grad, lhs, rhs, expected=rhs * lhs * (1 - lhs))
self._testBinary(
gen_math_ops.rsqrt_grad, lhs, rhs, expected=lhs**3 * rhs / -2)
self._testBinary(
gen_math_ops.sqrt_grad, lhs, rhs, expected=rhs / (2 * lhs))
self._testBinary(
gen_math_ops.tanh_grad, lhs, rhs, expected=rhs * (1 - lhs * lhs))
def testComplexMath(self):
for dtype in self.complex_types:
self._testBinary(
math_ops.add,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([11 - 1j, 22 + 24j], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([6 - 5j, 7 - 3j], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[8 + 3j], [9 + 6j]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([-9 + 7j, -18 - 10j], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([4 - 9j, 3 - 11j], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[-6 - 7j], [-5 - 4j]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array(
[(1 + 3j) * (10 - 4j), (2 + 7j) * (20 + 17j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array(
[(5 - 7j) * (1 + 2j), (5 - 7j) * (2 + 4j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array(
[[(7 + 5j) * (1 - 2j)], [(7 + 5j) * (2 + 1j)]], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([8 - 1j, 2 + 16j], dtype=dtype),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(8 - 1j) / (2 + 4j), (2 + 16j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(1 + 2j),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(1 + 2j) / (2 + 4j), (1 + 2j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([2 + 4j, 4 - 8j], dtype=dtype),
dtype(1 + 2j),
expected=np.array(
[(2 + 4j) / (1 + 2j), (4 - 8j) / (1 + 2j)], dtype=dtype))
# TODO(b/68205550): math_ops.squared_difference shouldn't be supported.
self._testBinary(
nn_ops.bias_add,
np.array([[1 + 2j, 2 + 7j], [3 - 5j, 4 + 2j]], dtype=dtype),
np.array([2 + 6j, -1 - 3j], dtype=dtype),
expected=np.array([[3 + 8j, 1 + 4j], [5 + 1j, 3 - 1j]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1 + 4j, 2 - 1j], [3 + 7j, 4]]]], dtype=dtype),
np.array([2 + 1j, -1 + 2j], dtype=dtype),
expected=np.array(
[[[[3 + 5j, 1 + 1j], [5 + 8j, 3 + 2j]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
if dtype not in self.complex_types: # floordiv unsupported for complex.
self._testBinary(
gen_math_ops.floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types | self.complex_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops.floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops.truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.int_types:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2], [-1]], dtype=np.int64),
np.int64(7),
expected=np.array([[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testS64Comparisons(self):
for op in [(lambda x, y: x < y), (lambda x, y: x <= y),
(lambda x, y: x >= y), (lambda x, y: x > y)]:
lhs = np.array(
[
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000000),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x7ffffffefff00010),
np.int64(0x7ffffffefff00010),
np.int64(-1),
np.int64(-1)
],
dtype=np.int64)
rhs = np.array(
[
np.int64(0x000000007FFFFFFE),
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0001),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000001),
np.int64(0x0000000100000002),
np.int64(0x0000000100000003),
np.int64(0x0000000200000001),
np.int64(0x0000000200000002),
np.int64(0x0000000200000003),
np.int64(0x0000000300000001),
np.int64(0x0000000300000002),
np.int64(0x0000000300000003),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(-2),
np.int64(-1)
],
dtype=np.int64)
expected = np.array([op(l, r) for l, r in zip(lhs, rhs)], dtype=np.bool)
self._testBinary(op, lhs, rhs, expected=expected)
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def _testMatMul(self, op):
for dtype in self.float_types:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul)
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return math_ops.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(math_ops.sparse_matmul)
self._testMatMul(SparseMatmulWrapperTF)
self._testMatMul(SparseMatmulWrapperFT)
self._testMatMul(SparseMatmulWrapperTT)
def testBatchMatMul(self):
# Same tests as for tf.matmul above.
self._testMatMul(math_ops.matmul)
# Tests with batches of matrices.
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=np.float32),
np.array([[[8]]], dtype=np.float32),
expected=np.array([[[-2]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=np.float32),
np.array([[[8]], [[2]]], dtype=np.float32),
expected=np.array([[[-2]], [[8]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array(
[[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
np.array([], dtype=np.float32).reshape((2, 0, 3)),
expected=np.array(
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((0, 2, 4)),
np.array([], dtype=np.float32).reshape((0, 4, 3)),
expected=np.array([], dtype=np.float32).reshape(0, 2, 3))
# Regression test for b/31472796.
if hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.int32(0),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=np.int32),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
self._testBinary(
lambda x, y: array_ops.pad(x, y, constant_values=7),
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[0, 3], [2, 1]], dtype=np.int32),
expected=np.array(
[[7, 7, 1, 2, 3, 7],
[7, 7, 4, 5, 6, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7]],
dtype=dtype))
def testMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
for axis in [0, -3]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
for axis in [1, -2]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def splitvOp(x, y): # pylint: disable=invalid-name
return array_ops.split(value=y, num_or_size_splits=[2, 3], axis=x)
for axis in [1, -1]:
self._testBinary(
splitvOp,
np.int32(axis),
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtype),
expected=[
np.array([[0, 1], [5, 6]], dtype=dtype),
np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
def testBroadcastArgs(self):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 1, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 1, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Incompatible shapes"):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1, 2, 3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32),
expected=None)
def testMatrixSetDiag(self):
for dtype in self.numeric_types:
# Square
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
dtype=dtype),
np.array([1.0, 2.0, 3.0], dtype=dtype),
expected=np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], [1.0, 1.0, 3.0]],
dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], [2.0, 0.0, 6.0]]],
dtype=dtype),
np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]], dtype=dtype),
expected=np.array(
[[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0], [1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0], [2.0, 0.0, -6.0]]],
dtype=dtype))
# Rectangular
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]], dtype=dtype),
np.array([[-1.0, -2.0], [-4.0, -5.0]],
dtype=dtype),
expected=np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]],
dtype=dtype))
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 7,264,468,469,406,065,000 | 36.917541 | 80 | 0.492211 | false |
cczhu/baseband | baseband/mark4/base.py | 1 | 21350 | # Licensed under the GPLv3 - see LICENSE
import numpy as np
from astropy.utils import lazyproperty
import astropy.units as u
from ..vlbi_base.base import (make_opener, VLBIFileBase, VLBIFileReaderBase,
VLBIStreamBase, VLBIStreamReaderBase,
VLBIStreamWriterBase)
from .header import Mark4Header
from .payload import Mark4Payload
from .frame import Mark4Frame
from .file_info import Mark4FileReaderInfo
__all__ = ['Mark4FileReader', 'Mark4FileWriter',
'Mark4StreamBase', 'Mark4StreamReader', 'Mark4StreamWriter',
'open']
# Look-up table for the number of bits in a byte.
nbits = ((np.arange(256)[:, np.newaxis] >> np.arange(8) & 1)
.sum(1).astype(np.int16))
class Mark4FileReader(VLBIFileReaderBase):
"""Simple reader for Mark 4 files.
Wraps a binary filehandle, providing methods to help interpret the data,
such as `locate_frame`, `read_frame` and `get_frame_rate`.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw binary data file.
ntrack : int or None, optional.
Number of Mark 4 bitstreams. Can be determined automatically as
part of locating the first frame.
decade : int or None
Decade in which the observations were taken. Can instead pass an
approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the observation time. Used only
if ``decade`` is not given.
"""
def __init__(self, fh_raw, ntrack=None, decade=None, ref_time=None):
self.ntrack = ntrack
self.decade = decade
self.ref_time = ref_time
super().__init__(fh_raw)
def __repr__(self):
return ("{name}(fh_raw={s.fh_raw}, ntrack={s.ntrack}, "
"decade={s.decade}, ref_time={s.ref_time})"
.format(name=self.__class__.__name__, s=self))
info = Mark4FileReaderInfo()
def read_header(self):
"""Read a single header from the file.
Returns
-------
header : `~baseband.mark4.Mark4Header`
"""
return Mark4Header.fromfile(self, ntrack=self.ntrack,
decade=self.decade, ref_time=self.ref_time)
def read_frame(self, verify=True):
"""Read a single frame (header plus payload).
Returns
-------
frame : `~baseband.mark4.Mark4Frame`
With ``.header`` and ``.data`` properties that return the
:class:`~baseband.mark4.Mark4Header` and data encoded in the frame,
respectively.
verify : bool, optional
Whether to do basic checks of frame integrity. Default: `True`.
"""
return Mark4Frame.fromfile(self.fh_raw, self.ntrack,
decade=self.decade, ref_time=self.ref_time,
verify=verify)
def get_frame_rate(self):
"""Determine the number of frames per second.
The frame rate is calculated from the time elapsed between the
first two frames, as inferred from their time stamps.
Returns
-------
frame_rate : `~astropy.units.Quantity`
Frames per second.
"""
with self.temporary_offset():
self.seek(0)
self.locate_frame()
header0 = self.read_header()
self.seek(header0.payload_nbytes, 1)
header1 = self.read_header()
# Mark 4 specification states frames-lengths range from 1.25 ms
# to 160 ms.
tdelta = header1.fraction[0] - header0.fraction[0]
return np.round(1 / tdelta) * u.Hz
def locate_frame(self, forward=True, maximum=None):
"""Locate the frame nearest the current position.
The search is for the following pattern:
* 32*tracks bits set at offset bytes
* 1*tracks bits unset before offset
* 32*tracks bits set at offset+2500*tracks bytes
This reflects 'sync_pattern' of 0xffffffff for a given header and one
a frame ahead, which is in word 2, plus the lsb of word 1, which is
'system_id'.
If the file does not have ntrack is set, it will be auto-determined.
Parameters
----------
forward : bool, optional
Whether to search forwards or backwards. Default: `True`.
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
offset : int or `None`
Byte offset of the next frame. `None` if the search was not
successful.
"""
fh = self.fh_raw
file_pos = fh.tell()
# Use initializer value (determines ntrack if not already given).
ntrack = self.ntrack
if ntrack is None:
fh.seek(0)
ntrack = self.determine_ntrack(maximum=maximum)
if ntrack is None:
raise ValueError("cannot determine ntrack automatically. "
"Try passing in an explicit value.")
if forward and fh.tell() >= file_pos:
return fh.tell()
fh.seek(file_pos)
nset = np.ones(32 * ntrack // 8, dtype=np.int16)
nunset = np.ones(ntrack // 8, dtype=np.int16)
frame_nbytes = ntrack * 2500
fh.seek(0, 2)
filesize = fh.tell()
if filesize < frame_nbytes:
fh.seek(file_pos)
return None
if maximum is None:
maximum = 2 * frame_nbytes
# Loop over chunks to try to find the frame marker.
step = frame_nbytes // 2
# Read a bit more at every step to ensure we don't miss a "split"
# header.
block = step + 160 * ntrack // 8
if forward:
iterate = range(max(min(file_pos, filesize - block), 0),
max(min(file_pos + maximum, filesize - block + 1),
1),
step)
else:
iterate = range(min(max(file_pos - step, 0), filesize - block),
min(max(file_pos - step - maximum - 1, -1),
filesize - block),
-step)
for frame in iterate:
fh.seek(frame)
data = np.frombuffer(fh.read(block), dtype=np.uint8)
assert len(data) == block
# Find header pattern.
databits1 = nbits[data]
nosync = np.convolve(databits1[len(nunset):] < 6, nset, 'valid')
nolow = np.convolve(databits1[:-len(nset)] > 1, nunset, 'valid')
wrong = nosync + nolow
possibilities = np.where(wrong == 0)[0]
# Check candidates by seeing whether there is a sync word
# a frame size ahead. (Note: loop can be empty.)
for possibility in possibilities[::1 if forward else -1]:
# Real start of possible header.
frame_start = frame + possibility - 63 * ntrack // 8
if (forward and frame_start < file_pos or
not forward and frame_start > file_pos):
continue
# Check there is a header following this.
check = frame_start + frame_nbytes
if check >= filesize - 32 * 2 * ntrack // 8 - len(nunset):
# But do before this one if we're beyond end of file.
check = frame_start - frame_nbytes
if check < 0: # Assume OK if only one frame fits in file.
if frame_start + frame_nbytes > filesize:
continue
else:
break
fh.seek(check + 32 * 2 * ntrack // 8)
check_data = np.frombuffer(fh.read(len(nunset)),
dtype=np.uint8)
databits2 = nbits[check_data]
if np.all(databits2 >= 6):
break # Got it!
else: # None of them worked, so do next block.
continue
fh.seek(frame_start)
return frame_start
fh.seek(file_pos)
return None
def determine_ntrack(self, maximum=None):
"""Determines the number of tracks, by seeking the next frame.
Uses `locate_frame` to look for the first occurrence of a frame from
the current position for all supported ``ntrack`` values. Returns the
first ``ntrack`` for which `locate_frame` is successful, setting
the file's ``ntrack`` property appropriately, and leaving the
file pointer at the start of the frame.
Parameters
----------
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
ntrack : int or None
Number of Mark 4 bitstreams. `None` if no frame was found.
"""
# Currently only 16, 32 and 64-track frames supported.
old_ntrack = self.ntrack
for ntrack in 16, 32, 64:
try:
self.ntrack = ntrack
if self.locate_frame(maximum=maximum) is not None:
return ntrack
except Exception:
self.ntrack = old_ntrack
raise
self.ntrack = old_ntrack
return None
def find_header(self, forward=True, maximum=None):
"""Find the nearest header from the current position.
If successful, the file pointer is left at the start of the header.
Parameters
----------
forward : bool, optional
Seek forward if `True` (default), backward if `False`.
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
header : :class:`~baseband.mark4.Mark4Header` or None
Retrieved Mark 4 header, or `None` if nothing found.
"""
offset = self.locate_frame(forward=forward)
if offset is None:
return None
header = self.read_header()
self.fh_raw.seek(offset)
return header
class Mark4FileWriter(VLBIFileBase):
"""Simple writer for Mark 4 files.
Adds `write_frame` method to the VLBI binary file wrapper.
"""
def write_frame(self, data, header=None, **kwargs):
"""Write a single frame (header plus payload).
Parameters
----------
data : `~numpy.ndarray` or `~baseband.mark4.Mark4Frame`
If an array, a header should be given, which will be used to
get the information needed to encode the array, and to construct
the Mark 4 frame.
header : `~baseband.mark4.Mark4Header`
Can instead give keyword arguments to construct a header. Ignored
if payload is a :class:`~baseband.mark4.Mark4Frame` instance.
**kwargs :
If ``header`` is not given, these are used to initialize one.
"""
if not isinstance(data, Mark4Frame):
data = Mark4Frame.fromdata(data, header, **kwargs)
return data.tofile(self.fh_raw)
class Mark4StreamBase(VLBIStreamBase):
"""Base for Mark 4 streams."""
def __init__(self, fh_raw, header0, sample_rate=None, squeeze=True,
subset=(), fill_value=0., verify=True):
super().__init__(
fh_raw, header0=header0, sample_rate=sample_rate,
samples_per_frame=header0.samples_per_frame,
unsliced_shape=(header0.nchan,),
bps=header0.bps, complex_data=False, squeeze=squeeze,
subset=subset, fill_value=fill_value, verify=verify)
self._frame_rate = int(round((self.sample_rate /
self.samples_per_frame).to_value(u.Hz)))
class Mark4StreamReader(Mark4StreamBase, VLBIStreamReaderBase):
"""VLBI Mark 4 format reader.
Allows access to a Mark 4 file as a continuous series of samples. Parts
of the data stream replaced by header values are filled in.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw Mark 4 stream.
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each
channel is sampled. If `None`, will be inferred from scanning two
frames of the file.
ntrack : int or None, optional
Number of Mark 4 bitstreams. If `None` (default), will attempt to
automatically detect it by scanning the file.
decade : int or None
Decade of the observation start time (eg. ``2010`` for 2018), needed to
remove ambiguity in the Mark 4 time stamp. Can instead pass an
approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the start time of the observations.
Used only if ``decade`` is not given.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object, optional
Specific channels of the complete sample to decode (after possible
squeezing). If an empty tuple (default), all channels are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frame of the stream is always checked. Default: `True`.
"""
_sample_shape_maker = Mark4Payload._sample_shape_maker
def __init__(self, fh_raw, sample_rate=None, ntrack=None, decade=None,
ref_time=None, squeeze=True, subset=(), fill_value=0.,
verify=True):
if decade is None and ref_time is None:
raise TypeError("Mark 4 stream reader requires either decade or "
"ref_time to be passed in.")
# Get binary file reader.
fh_raw = Mark4FileReader(fh_raw, ntrack=ntrack, decade=decade,
ref_time=ref_time)
# Find first header, determining ntrack if needed.
header0 = fh_raw.find_header()
assert header0 is not None, (
"could not find a first frame using ntrack={}. Perhaps "
"try ntrack=None for auto-determination.".format(ntrack))
self._offset0 = fh_raw.tell()
super().__init__(
fh_raw, header0=header0, sample_rate=sample_rate,
squeeze=squeeze, subset=subset, fill_value=fill_value,
verify=verify)
# Use reference time in preference to decade so that a stream wrapping
# a decade will work.
self.fh_raw.decade = None
self.fh_raw.ref_time = self.start_time
@lazyproperty
def _last_header(self):
"""Last header of the file."""
last_header = super()._last_header
# Infer the decade, assuming the end of the file is no more than
# 4 years away from the start.
last_header.infer_decade(self.start_time)
return last_header
def _read_frame(self, index):
self.fh_raw.seek(self._offset0 + index * self.header0.frame_nbytes)
frame = self.fh_raw.read_frame(verify=self.verify)
# Set decoded value for invalid data.
frame.fill_value = self.fill_value
# TODO: add check that we got the right frame.
return frame
class Mark4StreamWriter(Mark4StreamBase, VLBIStreamWriterBase):
"""VLBI Mark 4 format writer.
Encodes and writes sequences of samples to file.
Parameters
----------
raw : filehandle
Which will write filled sets of frames to storage.
header0 : `~baseband.mark4.Mark4Header`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each
channel is sampled. Needed to calculate header timestamps.
squeeze : bool, optional
If `True` (default), `write` accepts squeezed arrays as input, and
adds any dimensions of length unity.
**kwargs
If no header is given, an attempt is made to construct one from these.
For a standard header, this would include the following.
--- Header keywords : (see :meth:`~baseband.mark4.Mark4Header.fromvalues`)
time : `~astropy.time.Time`
Start time of the file. Sets bcd-encoded unit year, day, hour, minute,
second in the header.
ntrack : int
Number of Mark 4 bitstreams (equal to number of channels times
``fanout`` times ``bps``)
bps : int
Bits per elementary sample.
fanout : int
Number of tracks over which a given channel is spread out.
"""
_sample_shape_maker = Mark4Payload._sample_shape_maker
def __init__(self, fh_raw, header0=None, sample_rate=None, squeeze=True,
**kwargs):
if header0 is None:
header0 = Mark4Header.fromvalues(**kwargs)
super().__init__(fh_raw=fh_raw, header0=header0,
sample_rate=sample_rate, squeeze=squeeze)
# Set up initial payload with right shape.
samples_per_payload = (
header0.samples_per_frame * header0.payload_nbytes //
header0.frame_nbytes)
self._payload = Mark4Payload.fromdata(
np.zeros((samples_per_payload, header0.nchan), np.float32),
header0)
def _make_frame(self, frame_index):
header = self.header0.copy()
header.update(time=self.start_time + frame_index /
self._frame_rate * u.s)
# Reuse payload.
return Mark4Frame(header, self._payload)
open = make_opener('Mark4', globals(), doc="""
--- For reading a stream : (see `~baseband.mark4.base.Mark4StreamReader`)
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each channel
is sampled. If not given, will be inferred from scanning two frames of
the file.
ntrack : int, optional
Number of Mark 4 bitstreams. If `None` (default), will attempt to
automatically detect it by scanning the file.
decade : int or None
Decade of the observation start time (eg. ``2010`` for 2018), needed to
remove ambiguity in the Mark 4 time stamp (default: `None`). Can instead
pass an approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the start time of the observations. Used
only if ``decade`` is not given.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object, optional
Specific channels of the complete sample to decode (after possible
squeezing). If an empty tuple (default), all channels are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frame of the stream is always checked. Default: `True`.
--- For writing a stream : (see `~baseband.mark4.base.Mark4StreamWriter`)
header0 : `~baseband.mark4.Mark4Header`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each channel
is sampled. Needed to calculate header timestamps.
squeeze : bool, optional
If `True` (default), writer accepts squeezed arrays as input, and adds
any dimensions of length unity.
file_size : int or None, optional
When writing to a sequence of files, the maximum size of one file in bytes.
If `None` (default), the file size is unlimited, and only the first
file will be written to.
**kwargs
If the header is not given, an attempt will be made to construct one
with any further keyword arguments. See
:class:`~baseband.mark4.base.Mark4StreamWriter`.
Returns
-------
Filehandle
:class:`~baseband.mark4.base.Mark4FileReader` or
:class:`~baseband.mark4.base.Mark4FileWriter` (binary), or
:class:`~baseband.mark4.base.Mark4StreamReader` or
:class:`~baseband.mark4.base.Mark4StreamWriter` (stream)
Notes
-----
Although it is not generally expected to be useful for Mark 4, like for
other formats one can also pass to ``name`` a list, tuple, or subclass of
`~baseband.helpers.sequentialfile.FileNameSequencer`. For writing to multiple
files, the ``file_size`` keyword must be passed or only the first file will be
written to. One may also pass in a `~baseband.helpers.sequentialfile` object
(opened in 'rb' mode for reading or 'w+b' for writing), though for typical use
cases it is practically identical to passing in a list or template.
""")
| gpl-3.0 | 7,541,743,570,628,958,000 | 39.131579 | 79 | 0.603419 | false |
phobson/statsmodels | statsmodels/regression/linear_model.py | 1 | 97462 | # TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']
import numpy as np
import pandas as pd
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.tools import add_constant, chain_dot, pinv_extended
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom, defined as the rank of the regressor
matrix minus 1 if a constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom, defined as the number of observations
minus the rank of the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, X):
raise NotImplementedError("Subclasses should implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance estimators
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
Returns
-------
A RegressionResults class instance.
See Also
---------
regression.linear_model.RegressionResults
regression.linear_model.RegressionResults.get_robustcov_results
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if ((not hasattr(self, 'pinv_wexog')) or
(not hasattr(self, 'normalized_cov_params')) or
(not hasattr(self, 'rank'))):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np_matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if ((not hasattr(self, 'exog_Q')) or
(not hasattr(self, 'exog_R')) or
(not hasattr(self, 'normalized_cov_params')) or
(getattr(self, 'rank', None) is None)):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np_matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model has not yet been fit, params is not optional.
"""
#JP: this doesn't look correct for GLMAR
#SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Returns a random number generator for the predictive distribution.
Parameters
----------
params : array-like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array-like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and
'scale' as arguments and return a random number generator
implementing an `rvs` method for simulating random values.
Defaults to Gaussian.
Returns a frozen random number generator object with mean and
variance determined by the fitted linear model. Use the
``rvs`` method to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen
class GLS(RegressionModel):
__doc__ = """
Generalized least squares model with a general covariance structure.
%(params)s
sigma : scalar or array
`sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
**Attributes**
pinv_wexog : array
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : array
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : array
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : array
`sigma` is the n x n covariance structure of the error terms.
wexog : array
Design matrix whitened by `cholsigmainv`
wendog : array
Response variable whitened by `cholsigmainv`
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary()))
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
#TODO: add options igls, for iterative fgls if sigma is None
#TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
#store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, X):
"""
GLS whiten method.
Parameters
-----------
X : array-like
Data to be whitened.
Returns
-------
np.dot(cholsigmainv,X)
See Also
--------
regression.GLS
"""
X = np.asarray(X)
if self.sigma is None or self.sigma.shape == ():
return X
elif self.sigma.ndim == 1:
if X.ndim == 1:
return X * self.cholsigmainv
else:
return X * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, X)
def loglike(self, params):
"""
Returns the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array-like
The parameter estimates
Returns
-------
loglike : float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right)
Y and Y-hat are whitened.
"""
#TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim==2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
class WLS(RegressionModel):
__doc__ = """
A regression model with diagonal but non-identity covariance structure.
The weights are presumed to be (proportional to) the inverse of the
variance of the observations. That is, if the variables are to be
transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
%(extra_params)s
Attributes
----------
weights : array
The stored weights supplied as an argument.
See regression.GLS
Examples
---------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, X):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights)
Parameters
----------
X : array-like
Data to be whitened
Returns
-------
sqrt(weights)*X
"""
#print(self.weights.var()))
X = np.asarray(X)
if X.ndim == 1:
return X * np.sqrt(self.weights)
elif X.ndim == 2:
return np.sqrt(self.weights)[:, None]*X
def loglike(self, params):
"""
Returns the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array-like
The parameter estimates.
Returns
-------
llf : float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\\frac{n}{2}\\log\\left(Y-\\hat{Y}\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}log\\left(\\left|W\\right|\\right)
where :math:`W` is a diagonal matrix
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog,params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
class OLS(WLS):
__doc__ = """
A simple ordinary least squares model.
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
GLS
Examples
--------
>>> import numpy as np
>>>
>>> import statsmodels.api as sm
>>>
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>>
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0])))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
Notes
-----
No constant is added by the model unless you are using formulas.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
#TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array-like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf
def whiten(self, Y):
"""
OLS model whitener does nothing: returns Y.
"""
return Y
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array-like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T, params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale
def _setup_score_hess(self):
y = self.wendog
if hasattr(self, 'offset'):
y = y - self.offset
self._wendog_xprod = np.sum(y * y)
self._wexog_xprod = np.dot(self.wexog.T, self.wexog)
self._wexog_x_wendog = np.dot(self.wexog.T, y)
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array-like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T, params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale
return hess
def fit_regularized(self, method="elastic_net", alpha=0.,
start_params=None, profile_scale=False,
refit=False, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : string
Only the 'elastic_net' approach is currently implemented.
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
start_params : array-like
Starting values for ``params``.
profile_scale : bool
If True the penalized fit is computed using the profile
(concentrated) log-likelihood for the Gaussian model.
Otherwise the fit uses the residual sum of squares.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
Returns
-------
An array of coefficients, or a RegressionResults object of the
same type returned by ``fit``.
Notes
-----
The elastic net approach closely follows that implemented in
the glmnet package in R. The penalty is a combination of L1
and L2 penalties.
The function that is minimized is: ..math::
0.5*RSS/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
L1_wt : float
Must be in [0, 1]. The L1 penalty has weight L1_wt and the
L2 penalty has weight 1 - L1_wt.
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
References
----------
Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
"""
from statsmodels.base.elastic_net import fit_elasticnet
# In the future we could add support for other penalties, e.g. SCAD.
if method != "elastic_net":
raise ValueError("method for fit_regularied must be elastic_net")
# Set default parameters.
defaults = {"maxiter" : 50, "L1_wt" : 1, "cnvrg_tol" : 1e-10,
"zero_tol" : 1e-10}
defaults.update(kwargs)
# If a scale parameter is passed in, the non-profile
# likelihood (residual sum of squares divided by -2) is used,
# otherwise the profile likelihood is used.
if profile_scale:
loglike_kwds = {}
score_kwds = {}
hess_kwds = {}
else:
loglike_kwds = {"scale": 1}
score_kwds = {"scale": 1}
hess_kwds = {"scale": 1}
return fit_elasticnet(self, method=method,
alpha=alpha,
start_params=start_params,
loglike_kwds=loglike_kwds,
score_kwds=score_kwds,
hess_kwds=hess_kwds,
refit=refit,
**defaults)
class GLSAR(GLS):
__doc__ = """
A regression model with an AR(p) covariance structure.
%(params)s
rho : int
Order of the autoregressive covariance
%(extra_params)s
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):
#this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0,1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
#JP this looks wrong, should be a regression on constant
#results for rho estimate now identical to yule-walker on y
#super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),
missing=missing, **kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwds):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : integer, optional
the number of iterations
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if
max(abs(last - current) / abs(last)) < rtol
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho':[self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we didn't update rho
results = self.fit(history=history, **kwds)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results
def whiten(self, X):
"""
Whiten a series of columns according to an AR(p)
covariance structure. This drops initial p observations.
Parameters
----------
X : array-like
The data to be whitened,
Returns
-------
whitened array
"""
#TODO: notation for AR process
X = np.asarray(X, np.float64)
_X = X.copy()
#the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]
return _X[self.order:]
def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
"""
Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
Unbiased or maximum-likelihood estimator (mle)
See, for example:
http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
Parameters
----------
X : array-like
1d array
order : integer, optional
The order of the autoregressive process. Default is 1.
method : string, optional
Method can be "unbiased" or "mle" and this determines denominator in
estimate of autocorrelation function (ACF) at lag k. If "mle", the
denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
The default is unbiased.
df : integer, optional
Specifies the degrees of freedom. If `df` is supplied, then it is assumed
the X has `df` degrees of freedom rather than `n`. Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho
The autoregressive coefficients
sigma
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog,
order=4, method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
#TODO: define R better, look back at notes and technical notes on YW.
#First link here is useful
#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
X = np.array(X, dtype=np.float64)
if demean:
X -= X.mean() # automatically demean's X
n = df or X.shape[0]
if method == "unbiased": # this is df_resid ie., n - p
denom = lambda k: n - k
else:
denom = lambda k: n
if X.ndim > 1 and X.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (X**2).sum() / denom(0)
for k in range(1,order+1):
r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv==True:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
class RegressionResults(base.LikelihoodModelResults):
"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Returns
-------
**Attributes**
aic
Aikake's information criteria. For a model with a constant
:math:`-2llf + 2(df_model + 1)`. For a model without a constant
:math:`-2llf + 2(df_model)`.
bic
Bayes' information criteria For a model with a constant
:math:`-2llf + \log(n)(df_model+1)`. For a model without a constant
:math:`-2llf + \log(n)(df_model)`
bse
The standard errors of the parameter estimates.
pinv_wexog
See specific model class docstring
centered_tss
The total (weighted) sum of squares centered about the mean.
cov_HC0
Heteroscedasticity robust covariance matrix. See HC0_se below.
cov_HC1
Heteroscedasticity robust covariance matrix. See HC1_se below.
cov_HC2
Heteroscedasticity robust covariance matrix. See HC2_se below.
cov_HC3
Heteroscedasticity robust covariance matrix. See HC3_se below.
cov_type
Parameter covariance estimator used for standard errors and t-stats
df_model
Model degress of freedom. The number of regressors `p`. Does not
include the constant if one is present
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
ess
Explained sum of squares. If a constant is present, the centered
total sum of squares minus the sum of squared residuals. If there is
no constant, the uncentered total sum of squares is used.
fvalue
F-statistic of the fully specified model. Calculated as the mean
squared error of the model divided by the mean squared error of the
residuals.
f_pvalue
p-value of the F-statistic
fittedvalues
The predicted the values for the original (unwhitened) design.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
history
Estimation history for iterative estimators
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a cached property.
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_see is a cached property.
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_see is a cached property.
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_see is a cached property.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
model
A pointer to the model instance that called fit() or results.
mse_model
Mean squared error the model. This is the explained sum of squares
divided by the model degrees of freedom.
mse_resid
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
mse_total
Total mean squared error. Defined as the uncentered total sum of
squares divided by n the number of observations.
nobs
Number of observations n.
normalized_cov_params
See specific model class docstring
params
The linear coefficients that minimize the least squares criterion. This
is usually called Beta for the classical linear model.
pvalues
The two-tailed p values for the t-stats of the params.
resid
The residuals of the model.
resid_pearson
`wresid` normalized to have unit variance.
rsquared
R-squared of a model with an intercept. This is defined here as
1 - `ssr`/`centered_tss` if the constant is included in the model and
1 - `ssr`/`uncentered_tss` if the constant is omitted.
rsquared_adj
Adjusted R-squared. This is defined here as
1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included
and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.
scale
A scale factor for the covariance matrix.
Default value is ssr/(n-p). Note that the square root of `scale` is
often called the standard error of the regression.
ssr
Sum of squared (whitened) residuals.
uncentered_tss
Uncentered sum of squares. Sum of the squared values of the
(whitened) endogenous response variable.
wresid
The residuals of the transformed/whitened regressand and regressor(s)
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
super(RegressionResults, self).__init__(model, params,
normalized_cov_params,
scale)
self._cache = resettable_cache()
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
self.use_t = True # TODO: class default
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
for key in kwargs:
setattr(self, key, kwargs[key])
def __str__(self):
self.summary()
def conf_int(self, alpha=.05, cols=None):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
return self.model.wendog - self.model.predict(self.params,
self.model.wexog)
@cache_readonly
def resid(self):
return self.model.endog - self.model.predict(self.params,
self.model.exog)
#TODO: fix writable example
@cache_writable()
def scale(self):
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
model = self.model
weights = getattr(model, 'weights', None)
if weights is not None:
return np.sum(weights*(model.endog - np.average(model.endog,
weights=weights))**2)
else: # this is probably broken for GLS
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)
@cache_readonly
def mse_model(self):
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implcit constant, e.g. all dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
#TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:,None]*self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
See statsmodels.RegressionResults
"""
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale=(self.wresid/(1-h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
An array wresid/sqrt(scale)
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# don't divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller model are spanned
by the regressors in the larger model and the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:,None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of regressors
return np.allclose(score_l2,0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""Use Lagrange Multiplier test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the residuals
from the restricted model. If True, the covariance of the scores
are used and the LM test is identical to the large sample version
of the LR test.
Returns
-------
lm_value : float
test statistic, chi2 distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
TODO: explain LM text
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:,None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:,None]
demean = False
if demean:
scores = scores - scores.mean(0)[None,:]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, shoudl use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
XpX = np.dot(wexog.T,wexog) / n
Sinv = inv(sigma2 * XpX)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
Sinv = inv(np.dot(scores.T,scores) / n)
elif cov_type == 'HAC':
print("HAC")
maxlags = self.cov_kwds['maxlags']
Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
#cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
Sinv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * chain_dot(s,Sinv,s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""use F test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
f_value : float
test statistic, F distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two models.
This is not a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results under the assumption of homoscedasticity
and no autocorrelation (sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
likelihood ratio, chisquare distributed with df_diff degrees of
freedom
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
The exact likelihood ratio is valid for homoskedastic data, and is
defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\mathcal{L}` is the likelihood of the model. With :math:`D`
distributed as chisquare with df equal to difference in number of
parameters or equivalently difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
TODO: put into separate function, needs tests
"""
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`true` if the cov_type is nonrobust, and `false` in all other cases.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument 'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calulated with a small
sample correction.
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series for
each panel unit or cluster need to be stacked.
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
Reminder:
`use_correction` in "nw-groupsum" and "nw-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = ('Standard Errors are based on ' +
'fixed scale')
res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covarians ' +
'does not use keywords')
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'robust ' + '(' + cov_type + ')')
# TODO cannot access cov without calling se first
getattr(self, cov_type.upper() + '_se')
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type == 'HAC':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'and autocorrelation robust (HAC) using %d lags and %s small ' +
'sample correction') % (maxlags, ['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
use_correction=use_correction)
elif cov_type == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist()
groupidx = lzip([0] + tt, tt + [len(time)])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(np.diff(time) < 0)[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = (
'Driscoll and Kraay Standard Errors are robust to ' +
'cluster correlation ' + '(' + cov_type + ')')
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwds):
return pred.get_prediction(self, exog=exog, transform=transform,
weights=weights, row_labels=row_labels, **kwds)
get_prediction.__doc__ = pred.get_prediction.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
#add warnings/notes, added to text format only
etext =[]
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
#top = summary_top(self, gleft=topleft, gright=diagn_left, #[],
# yname=yname, xname=xname,
# title=self.model.__class__.__name__ + ' ' +
# "Regression Results")
#par = summary_params(self, yname=yname, xname=xname, alpha=.05,
# use_t=False)
#
#diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="Linear Model")
#
#return summary_return([top, par, diagn], return_fmt=return_fmt)
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function to summarize the regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from statsmodels.compat.collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) #in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
def get_influence(self):
"""
get an instance of Influence with influence and outlier measures
Returns
-------
infl : Influence instance
the instance has methods to calculate the main influence and
outlier measures for the OLS regression
See also
--------
:class:`statsmodels.stats.outliers_influence.OLSInfluence`
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05):
"""
Test observations for outliers according to method
Parameters
----------
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha)
def el_test(self, b0_vals, param_nums, return_weights=0,
ret_params=0, method='nm',
stochastic_exog=1, return_params=0):
"""
Tests single or joint hypotheses of the regression parameters using
Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested
param_nums : 1darray
The parameter number to be tested
print_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. Default is False
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default = TRUE
Returns
-------
res : tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.test_beta([0], [1])
>>> (1.7894660442330235e-07, 27.248146353709153)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress([],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,
maxiter=10000, full_output=1, disp=0,
args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,
method='nm', stochastic_exog=1):
"""
Computes the confidence interval for the parameter given by param_num
using Empirical Likelihood
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired
sig : float
The significance level. Default is .05
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
Returns
-------
ci : tuple
The confidence interval
See Also
--------
el_test
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical
value.
The function returns the results of each iteration of brentq at
each value of beta.
The current function value of the last printed optimization
should be the critical value at the desired significance level.
For alpha=.05, the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to
do el_test([lower_limit], [param_num])
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0]-r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq' : 'columns',
'sresid' : 'rows',
'weights' : 'rows',
'wresid' : 'rows',
'bcov_unscaled' : 'cov',
'bcov_scaled' : 'cov',
'HC0_se' : 'columns',
'HC1_se' : 'columns',
'HC2_se' : 'columns',
'HC3_se' : 'columns',
'norm_resid' : 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_results = OLS(data.endog, data.exog).fit() #results
gls_results = GLS(data.endog, data.exog).fit() #results
print(ols_results.summary())
tables = ols_results.summary(returns='tables')
csv = ols_results.summary(returns='csv')
"""
Summary of Regression Results
=======================================
| Dependent Variable: ['y']|
| Model: OLS|
| Method: Least Squares|
| Date: Tue, 29 Jun 2010|
| Time: 22:32:21|
| # obs: 16.0|
| Df residuals: 9.0|
| Df model: 6.0|
===========================================================================
| coefficient std. error t-statistic prob.|
---------------------------------------------------------------------------
| x1 15.0619 84.9149 0.1774 0.8631|
| x2 -0.0358 0.0335 -1.0695 0.3127|
| x3 -2.0202 0.4884 -4.1364 0.002535|
| x4 -1.0332 0.2143 -4.8220 0.0009444|
| x5 -0.0511 0.2261 -0.2261 0.8262|
| x6 1829.1515 455.4785 4.0159 0.003037|
| const -3482258.6346 890420.3836 -3.9108 0.003560|
===========================================================================
| Models stats Residual stats |
---------------------------------------------------------------------------
| R-squared: 0.995479 Durbin-Watson: 2.55949 |
| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |
| F-statistic: 330.285 Prob(Omnibus): 0.687765 |
| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |
| Log likelihood: -109.617 Prob(JB): 0.838294 |
| AIC criterion: 233.235 Skew: 0.419984 |
| BIC criterion: 238.643 Kurtosis: 2.43373 |
---------------------------------------------------------------------------
"""
| bsd-3-clause | 6,871,677,632,340,146,000 | 36.156691 | 232 | 0.560105 | false |
openai/cleverhans | cleverhans/model_zoo/madry_lab_challenges/cifar10_model.py | 2 | 10334 | """cleverhans.model.Model implementation of cifar10_challenge.model.Model
This re-implementation factors variable creation apart from forward
propagation so it is possible to run forward propagation more than once
in the same model.
based on https://github.com/tensorflow/models/tree/master/resnet
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from cleverhans.serial import NoRefModel
class Layer(object):
def get_output_shape(self):
return self.output_shape
class ResNet(NoRefModel):
"""ResNet model."""
def __init__(self, layers, input_shape, scope=None):
"""ResNet constructor.
:param layers: a list of layers in CleverHans format
each with set_input_shape() and fprop() methods.
:param input_shape: 4-tuple describing input shape (e.g None, 32, 32, 3)
:param scope: string name of scope for Variables
This works in two ways.
If scope is None, the variables are not put in a scope, and the
model is compatible with Saver.restore from the public downloads
for the CIFAR10 Challenge.
If the scope is a string, then Saver.restore won't work, but the
model functions as a picklable NoRefModels that finds its variables
based on the scope.
"""
super(ResNet, self).__init__(scope, 10, {}, scope is not None)
if scope is None:
before = list(tf.trainable_variables())
before_vars = list(tf.global_variables())
self.build(layers, input_shape)
after = list(tf.trainable_variables())
after_vars = list(tf.global_variables())
self.params = [param for param in after if param not in before]
self.vars = [var for var in after_vars if var not in before_vars]
else:
with tf.variable_scope(self.scope):
self.build(layers, input_shape)
def get_vars(self):
if hasattr(self, "vars"):
return self.vars
return super(ResNet, self).get_vars()
def build(self, layers, input_shape):
self.layer_names = []
self.layers = layers
self.input_shape = input_shape
if isinstance(layers[-1], Softmax):
layers[-1].name = 'probs'
layers[-2].name = 'logits'
else:
layers[-1].name = 'logits'
for i, layer in enumerate(self.layers):
if hasattr(layer, 'name'):
name = layer.name
else:
name = layer.__class__.__name__ + str(i)
layer.name = name
self.layer_names.append(name)
layer.set_input_shape(input_shape)
input_shape = layer.get_output_shape()
def make_input_placeholder(self):
return tf.placeholder(tf.float32, (None, 32, 32, 3))
def make_label_placeholder(self):
return tf.placeholder(tf.float32, (None, 10))
def fprop(self, x, set_ref=False):
if self.scope is not None:
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
return self._fprop(x, set_ref)
return self._prop(x, set_ref)
def _fprop(self, x, set_ref=False):
states = []
for layer in self.layers:
if set_ref:
layer.ref = x
x = layer.fprop(x)
assert x is not None
states.append(x)
states = dict(zip(self.layer_names, states))
return states
def add_internal_summaries(self):
pass
def _stride_arr(stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
class Input(Layer):
def __init__(self):
pass
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
# assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
def fprop(self, x):
with tf.variable_scope('input', reuse=tf.AUTO_REUSE):
input_standardized = tf.map_fn(
lambda img: tf.image.per_image_standardization(img), x)
return _conv('init_conv', input_standardized,
3, 3, 16, _stride_arr(1))
class Conv2D(Layer):
def __init__(self):
pass
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
def fprop(self, x):
# Update hps.num_residual_units to 9
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
filters = [16, 160, 320, 640]
res_func = _residual
with tf.variable_scope('unit_1_0', reuse=tf.AUTO_REUSE):
x = res_func(x, filters[0], filters[1], _stride_arr(strides[0]),
activate_before_residual[0])
for i in range(1, 5):
with tf.variable_scope(('unit_1_%d' % i), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[1], filters[1],
_stride_arr(1), False)
with tf.variable_scope(('unit_2_0'), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[1], filters[2], _stride_arr(strides[1]),
activate_before_residual[1])
for i in range(1, 5):
with tf.variable_scope(('unit_2_%d' % i), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[2], filters[2],
_stride_arr(1), False)
with tf.variable_scope(('unit_3_0'), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[2], filters[3], _stride_arr(strides[2]),
activate_before_residual[2])
for i in range(1, 5):
with tf.variable_scope(('unit_3_%d' % i), reuse=tf.AUTO_REUSE):
x = res_func(x, filters[3], filters[3],
_stride_arr(1), False)
with tf.variable_scope(('unit_last'), reuse=tf.AUTO_REUSE):
x = _batch_norm('final_bn', x)
x = _relu(x, 0.1)
x = _global_avg_pool(x)
return x
class Linear(Layer):
def __init__(self, num_hid):
self.num_hid = num_hid
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.dim = dim
self.output_shape = [batch_size, self.num_hid]
self.make_vars()
def make_vars(self):
with tf.variable_scope('logit', reuse=tf.AUTO_REUSE):
w = tf.get_variable(
'DW', [self.dim, self.num_hid],
initializer=tf.initializers.variance_scaling(
distribution='uniform'))
b = tf.get_variable('biases', [self.num_hid],
initializer=tf.initializers.constant())
return w, b
def fprop(self, x):
w, b = self.make_vars()
return tf.nn.xw_plus_b(x, w, b)
def _batch_norm(name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(
inputs=x,
decay=.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=None,
is_training=False)
def _residual(x, in_filter, out_filter, stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = _batch_norm('init_bn', x)
x = _relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = _batch_norm('init_bn', x)
x = _relu(x, 0.1)
with tf.variable_scope('sub1'):
x = _conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = _batch_norm('bn2', x)
x = _relu(x, 0.1)
x = _conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0],
[0, 0], [(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay():
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _global_avg_pool(x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class Softmax(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
self.output_shape = shape
def fprop(self, x):
return tf.nn.softmax(x)
class Flatten(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
output_width = 1
for factor in shape[1:]:
output_width *= factor
self.output_width = output_width
self.output_shape = [None, output_width]
def fprop(self, x):
return tf.reshape(x, [-1, self.output_width])
def make_wresnet(nb_classes=10, input_shape=(None, 32, 32, 3), scope=None):
layers = [Input(),
Conv2D(), # the whole ResNet is basically created in this layer
Flatten(),
Linear(nb_classes),
Softmax()]
model = ResNet(layers, input_shape, scope)
return model
| mit | 6,172,888,378,718,482,000 | 29.394118 | 76 | 0.613993 | false |
taigaio/taiga-back | taiga/hooks/api.py | 1 | 2892 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base import exceptions as exc
from taiga.base import response
from taiga.base.api.viewsets import GenericViewSet
from taiga.base.utils import json
from taiga.projects.models import Project
from .exceptions import ActionSyntaxException
class BaseWebhookApiViewSet(GenericViewSet):
# We don't want rest framework to parse the request body and transform it in
# a dict in request.DATA, we need it raw
parser_classes = ()
# This dict associates the event names we are listening for
# with their responsible classes (extending event_hooks.BaseEventHook)
event_hook_classes = {}
def _validate_signature(self, project, request):
raise NotImplemented
def _get_project(self, request):
project_id = request.GET.get("project", None)
try:
project = Project.objects.get(id=project_id)
return project
except (ValueError, Project.DoesNotExist):
return None
def _get_payload(self, request):
try:
payload = json.loads(request.body.decode("utf-8"))
except ValueError:
raise exc.BadRequest(_("The payload is not valid json"))
return payload
def _get_event_name(self, request):
raise NotImplemented
def create(self, request, *args, **kwargs):
project = self._get_project(request)
if not project:
raise exc.BadRequest(_("The project doesn't exist"))
if not self._validate_signature(project, request):
raise exc.BadRequest(_("Bad signature"))
if project.blocked_code is not None:
raise exc.Blocked(_("Blocked element"))
event_name = self._get_event_name(request)
payload = self._get_payload(request)
event_hook_class = self.event_hook_classes.get(event_name, None)
if event_hook_class is not None:
event_hook = event_hook_class(project, payload)
try:
event_hook.process_event()
except ActionSyntaxException as e:
raise exc.BadRequest(e)
return response.NoContent()
| agpl-3.0 | 7,278,701,815,280,412,000 | 34.703704 | 80 | 0.677732 | false |
saeki-masaki/cinder | cinder/tests/unit/test_ibm_flashsystem.py | 1 | 47040 | # Copyright 2014 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Tests for the IBM FlashSystem volume driver.
"""
import mock
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
import random
import re
from cinder import context
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import flashsystem
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class FlashSystemManagementSimulator(object):
def __init__(self):
# Default protocol is FC
self._protocol = 'FC'
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._next_cmd_error = {
'lsnode': '',
'lssystem': '',
'lsmdiskgrp': ''
}
self._errors = {
# CMMVC50000 is a fake error which indicates that command has not
# got expected results. This error represents kinds of CLI errors.
'CMMVC50000': ('', 'CMMVC50000 The command can not be executed '
'successfully.')
}
@staticmethod
def _find_unused_id(d):
ids = []
for v in d.values():
ids.append(int(v['id']))
ids.sort()
for index, n in enumerate(ids):
if n > index:
return six.text_type(index)
return six.text_type(len(ids))
@staticmethod
def _is_invalid_name(name):
if re.match(r'^[a-zA-Z_][\w ._-]*$', name):
return False
return True
@staticmethod
def _cmd_to_dict(arg_list):
no_param_args = [
'bytes',
'force'
]
one_param_args = [
'delim',
'hbawwpn',
'host',
'iogrp',
'iscsiname',
'mdiskgrp',
'name',
'scsi',
'size',
'unit'
]
# All commands should begin with svcinfo or svctask
if arg_list[0] not in ('svcinfo', 'svctask') or len(arg_list) < 2:
raise exception.InvalidInput(reason=six.text_type(arg_list))
ret = {'cmd': arg_list[1]}
arg_list.pop(0)
skip = False
for i in range(1, len(arg_list)):
if skip:
skip = False
continue
if arg_list[i][0] == '-':
param = arg_list[i][1:]
if param in no_param_args:
ret[param] = True
elif param in one_param_args:
ret[param] = arg_list[i + 1]
skip = True
else:
raise exception.InvalidInput(
reason=('unrecognized argument %s') % arg_list[i])
else:
ret['obj'] = arg_list[i]
return ret
@staticmethod
def _print_cmd_info(rows, delim=' ', nohdr=False, **kwargs):
"""Generic function for printing information."""
if nohdr:
del rows[0]
for index in range(len(rows)):
rows[index] = delim.join(rows[index])
return ('%s' % '\n'.join(rows), '')
@staticmethod
def _convert_units_bytes(num, unit):
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while unit.lower() != unit_array[unit_index].lower():
num = num * 1024
unit_index += 1
return six.text_type(num)
def _cmd_lshost(self, **kwargs):
"""svcinfo lshost -delim !
svcinfo lshost -delim ! <host>
"""
if 'obj' not in kwargs:
rows = []
rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
for host in self._hosts_list.values():
rows.append([host['id'], host['host_name'], '1', '1',
'degraded'])
if len(rows) > 1:
return self._print_cmd_info(rows=rows, **kwargs)
else:
return ('', '')
else:
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
host = self._hosts_list[host_name]
rows = []
rows.append(['id', host['id']])
rows.append(['name', host['host_name']])
rows.append(['port_count', '1'])
rows.append(['type', 'generic'])
rows.append(['mask', '1111'])
rows.append(['iogrp_count', '1'])
rows.append(['status', 'degraded'])
for port in host['iscsi_names']:
rows.append(['iscsi_name', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'offline'])
for port in host['wwpns']:
rows.append(['WWPN', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'active'])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lshostvdiskmap(self, **kwargs):
"""svcinfo lshostvdiskmap -delim ! <host_name>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
'vdisk_UID'])
for mapping in self._mappings_list.values():
if (host_name == '') or (mapping['host'] == host_name):
volume = self._volumes_list[mapping['vol']]
rows.append([mapping['id'], mapping['host'],
mapping['lun'], volume['id'],
volume['name'], volume['vdisk_UID']])
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsmdiskgrp(self, **kwargs):
"""svcinfo lsmdiskgrp -gui -bytes -delim ! <pool>"""
status = 'online'
if self._next_cmd_error['lsmdiskgrp'] == 'error':
self._next_cmd_error['lsmdiskgrp'] = ''
return self._errors['CMMVC50000']
if self._next_cmd_error['lsmdiskgrp'] == 'status=offline':
self._next_cmd_error['lsmdiskgrp'] = ''
status = 'offline'
rows = [None] * 2
rows[0] = ['id', 'status', 'mdisk_count', 'vdisk_count', 'capacity',
'free_capacity', 'virtual_capacity', 'used_capacity',
'real_capacity', 'encrypted', 'type', 'encrypt']
rows[1] = ['0', status, '1', '0', '3573412790272',
'3529432325160', '1693247906775', '277841182',
'38203734097', 'no', 'parent', 'no']
if kwargs['obj'] == 'mdiskgrp0':
row = rows[1]
else:
return self._errors['CMMVC50000']
objrows = []
for idx, val in enumerate(rows[0]):
objrows.append([val, row[idx]])
if 'delim' in kwargs:
for index in range(len(objrows)):
objrows[index] = kwargs['delim'].join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
def _cmd_lsnode(self, **kwargs):
"""svcinfo lsnode -delim !
svcinfo lsnode -delim ! <node>
"""
if self._protocol == 'FC' or self._protocol == 'both':
port_status = 'active'
else:
port_status = 'unconfigured'
rows1 = [None] * 7
rows1[0] = ['name', 'node1']
rows1[1] = ['port_id', '000000000000001']
rows1[2] = ['port_status', port_status]
rows1[3] = ['port_speed', '8Gb']
rows1[4] = ['port_id', '000000000000001']
rows1[5] = ['port_status', port_status]
rows1[6] = ['port_speed', '8Gb']
rows2 = [None] * 7
rows2[0] = ['name', 'node2']
rows2[1] = ['port_id', '000000000000002']
rows2[2] = ['port_status', port_status]
rows2[3] = ['port_speed', '8Gb']
rows2[4] = ['port_id', '000000000000002']
rows2[5] = ['port_status', port_status]
rows2[6] = ['port_speed', 'N/A']
rows3 = [None] * 3
rows3[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status',
'IO_group_id', 'IO_group_name', 'config_node',
'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias',
'panel_name', 'enclosure_id', 'canister_id',
'enclosure_serial_number']
rows3[1] = ['1', 'node1', '', '0123456789ABCDEF', 'online', '0',
'io_grp0', 'yes', '', 'TR1', 'naa.0123456789ABCDEF', '',
'01-1', '1', '1', 'H441028']
rows3[2] = ['2', 'node2', '', '0123456789ABCDEF', 'online', '0',
'io_grp0', 'no', '', 'TR1', 'naa.0123456789ABCDEF', '',
'01-2', '1', '2', 'H441028']
if self._next_cmd_error['lsnode'] == 'error':
self._next_cmd_error['lsnode'] = ''
return self._errors['CMMVC50000']
rows = None
if 'obj' not in kwargs:
rows = rows3
elif kwargs['obj'] == '1':
rows = rows1
elif kwargs['obj'] == '2':
rows = rows2
else:
return self._errors['CMMVC50000']
if self._next_cmd_error['lsnode'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsnode'] = ''
return self._print_cmd_info(rows=rows, delim=kwargs.get('delim', None))
def _cmd_lssystem(self, **kwargs):
"""svcinfo lssystem -delim !"""
open_access_enabled = 'off'
if self._next_cmd_error['lssystem'] == 'error':
self._next_cmd_error['lssystem'] = ''
return self._errors['CMMVC50000']
if self._next_cmd_error['lssystem'] == 'open_access_enabled=on':
self._next_cmd_error['lssystem'] = ''
open_access_enabled = 'on'
rows = [None] * 3
rows[0] = ['id', '0123456789ABCDEF']
rows[1] = ['name', 'flashsystem_1.2.3.4']
rows[2] = ['open_access_enabled', open_access_enabled]
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsportfc(self, **kwargs):
"""svcinfo lsportfc"""
if self._protocol == 'FC' or self._protocol == 'both':
status = 'active'
else:
status = 'unconfigured'
rows = [None] * 3
rows[0] = ['id', 'canister_id', 'adapter_id', 'port_id', 'type',
'port_speed', 'node_id', 'node_name', 'WWPN',
'nportid', 'status', 'attachment', 'topology']
rows[1] = ['0', '1', '1', '1', 'fc',
'8Gb', '1', 'node_1', 'AABBCCDDEEFF0011',
'000000', status, 'host', 'al']
rows[2] = ['1', '1', '1', '1', 'fc',
'8Gb', '1', 'node_1', 'AABBCCDDEEFF0010',
'000000', status, 'host', 'al']
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsportip(self, **kwargs):
"""svcinfo lsportip"""
if self._protocol == 'iSCSI' or self._protocol == 'both':
IP_address1 = '192.168.1.10'
IP_address2 = '192.168.1.11'
state = 'online'
speed = '8G'
else:
IP_address1 = ''
IP_address2 = ''
state = ''
speed = ''
rows = [None] * 3
rows[0] = ['id', 'node_id', 'node_name', 'canister_id', 'adapter_id',
'port_id', 'IP_address', 'mask', 'gateway', 'IP_address_6',
'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed',
'failover', 'link_state', 'host', 'host_6', 'vlan',
'vlan_6', 'adapter_location', 'adapter_port_id']
rows[1] = ['1', '1', 'node1', '0', '0',
'0', IP_address1, '', '', '',
'0', '', '11:22:33:44:55:AA', '', state, speed,
'no', 'active', '', '', '', '', '0', '0']
rows[2] = ['2', '2', 'node2', '0', '0',
'0', IP_address2, '', '', '',
'0', '', '11:22:33:44:55:BB', '', state, speed,
'no', 'active', '', '', '', '', '0', '0']
return self._print_cmd_info(rows=rows, **kwargs)
def _cmd_lsvdisk(self, **kwargs):
"""cmd: svcinfo lsvdisk -gui -bytes -delim ! <vdisk_name>"""
if 'obj' not in kwargs or (
'delim' not in kwargs) or (
'bytes' not in kwargs):
return self._errors['CMMVC50000']
if kwargs['obj'] not in self._volumes_list:
return self._errors['CMMVC50000']
vol = self._volumes_list[kwargs['obj']]
rows = []
rows.append(['id', vol['id']])
rows.append(['name', vol['name']])
rows.append(['status', vol['status']])
rows.append(['capacity', vol['capacity']])
rows.append(['vdisk_UID', vol['vdisk_UID']])
rows.append(['udid', ''])
rows.append(['open_access_scsi_id', '1'])
rows.append(['parent_mdisk_grp_id', '0'])
rows.append(['parent_mdisk_grp_name', 'mdiskgrp0'])
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lsvdiskhostmap(self, **kwargs):
"""svcinfo lsvdiskhostmap -delim ! <vdisk_name>"""
if 'obj' not in kwargs or (
'delim' not in kwargs):
return self._errors['CMMVC50000']
vdisk_name = kwargs['obj']
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC50000']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'host_id', 'host_name',
'vdisk_UID', 'IO_group_id', 'IO_group_name'])
mappings_found = 0
for mapping in self._mappings_list.values():
if (mapping['vol'] == vdisk_name):
mappings_found += 1
volume = self._volumes_list[mapping['vol']]
host = self._hosts_list[mapping['host']]
rows.append([volume['id'], volume['name'], '1', host['id'],
host['host_name'], volume['vdisk_UID'],
'0', 'mdiskgrp0'])
if mappings_found:
return self._print_cmd_info(rows=rows, **kwargs)
else:
return ('', '')
def _cmd_expandvdisksize(self, **kwargs):
"""svctask expandvdisksize -size <size> -unit gb <vdisk_name>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
vol_name = kwargs['obj'].strip('\'\"')
if 'size' not in kwargs:
return self._errors['CMMVC50000']
size = int(kwargs['size'])
if vol_name not in self._volumes_list:
return self._errors['CMMVC50000']
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = six.text_type(
curr_size + addition)
return ('', '')
def _cmd_mkvdisk(self, **kwargs):
"""svctask mkvdisk -name <name> -mdiskgrp <mdiskgrp> -iogrp <iogrp>
-size <size> -unit <unit>
"""
if 'name' not in kwargs or (
'size' not in kwargs) or (
'unit' not in kwargs):
return self._errors['CMMVC50000']
vdisk_info = {}
vdisk_info['id'] = self._find_unused_id(self._volumes_list)
vdisk_info['name'] = kwargs['name'].strip('\'\"')
vdisk_info['status'] = 'online'
vdisk_info['capacity'] = self._convert_units_bytes(
int(kwargs['size']), kwargs['unit'])
vdisk_info['vdisk_UID'] = ('60050760') + ('0' * 14) + vdisk_info['id']
if vdisk_info['name'] in self._volumes_list:
return self._errors['CMMVC50000']
else:
self._volumes_list[vdisk_info['name']] = vdisk_info
return ('Virtual Disk, id [%s], successfully created' %
(vdisk_info['id']), '')
def _cmd_rmvdisk(self, **kwargs):
"""svctask rmvdisk -force <vdisk_name>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
vdisk_name = kwargs['obj'].strip('\'\"')
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC50000']
del self._volumes_list[vdisk_name]
return ('', '')
def _add_port_to_host(self, host_info, **kwargs):
if 'iscsiname' in kwargs:
added_key = 'iscsi_names'
added_val = kwargs['iscsiname'].strip('\'\"')
elif 'hbawwpn' in kwargs:
added_key = 'wwpns'
added_val = kwargs['hbawwpn'].strip('\'\"')
else:
return self._errors['CMMVC50000']
host_info[added_key].append(added_val)
for v in self._hosts_list.values():
if v['id'] == host_info['id']:
continue
for port in v[added_key]:
if port == added_val:
return self._errors['CMMVC50000']
return ('', '')
def _cmd_mkhost(self, **kwargs):
"""svctask mkhost -force -hbawwpn <wwpn> -name <host_name>
svctask mkhost -force -iscsiname <initiator> -name <host_name>
"""
if 'name' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['name'].strip('\'\"')
if self._is_invalid_name(host_name):
return self._errors['CMMVC50000']
if host_name in self._hosts_list:
return self._errors['CMMVC50000']
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
host_info['host_name'] = host_name
host_info['iscsi_names'] = []
host_info['wwpns'] = []
out, err = self._add_port_to_host(host_info, **kwargs)
if not len(err):
self._hosts_list[host_name] = host_info
return ('Host, id [%s], successfully created' %
(host_info['id']), '')
else:
return (out, err)
def _cmd_addhostport(self, **kwargs):
"""svctask addhostport -force -hbawwpn <wwpn> <host>
svctask addhostport -force -iscsiname <initiator> <host>
"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
host_info = self._hosts_list[host_name]
return self._add_port_to_host(host_info, **kwargs)
def _cmd_rmhost(self, **kwargs):
"""svctask rmhost <host>"""
if 'obj' not in kwargs:
return self._errors['CMMVC50000']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC50000']
for v in self._mappings_list.values():
if (v['host'] == host_name):
return self._errors['CMMVC50000']
del self._hosts_list[host_name]
return ('', '')
def _cmd_mkvdiskhostmap(self, **kwargs):
"""svctask mkvdiskhostmap -host <host> -scsi <lun> <vdisk_name>"""
mapping_info = {}
mapping_info['id'] = self._find_unused_id(self._mappings_list)
if 'host' not in kwargs or (
'scsi' not in kwargs) or (
'obj' not in kwargs):
return self._errors['CMMVC50000']
mapping_info['host'] = kwargs['host'].strip('\'\"')
mapping_info['lun'] = kwargs['scsi'].strip('\'\"')
mapping_info['vol'] = kwargs['obj'].strip('\'\"')
if mapping_info['vol'] not in self._volumes_list:
return self._errors['CMMVC50000']
if mapping_info['host'] not in self._hosts_list:
return self._errors['CMMVC50000']
if mapping_info['vol'] in self._mappings_list:
return self._errors['CMMVC50000']
for v in self._mappings_list.values():
if ((v['host'] == mapping_info['host']) and
(v['lun'] == mapping_info['lun'])):
return self._errors['CMMVC50000']
for v in self._mappings_list.values():
if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs):
return self._errors['CMMVC50000']
self._mappings_list[mapping_info['id']] = mapping_info
return ('Virtual Disk to Host map, id [%s], successfully created'
% (mapping_info['id']), '')
def _cmd_rmvdiskhostmap(self, **kwargs):
"""svctask rmvdiskhostmap -host <host> <vdisk_name>"""
if 'host' not in kwargs or 'obj' not in kwargs:
return self._errors['CMMVC50000']
host = kwargs['host'].strip('\'\"')
vdisk = kwargs['obj'].strip('\'\"')
mapping_ids = []
for v in self._mappings_list.values():
if v['vol'] == vdisk:
mapping_ids.append(v['id'])
if not mapping_ids:
return self._errors['CMMVC50000']
this_mapping = None
for mapping_id in mapping_ids:
if self._mappings_list[mapping_id]['host'] == host:
this_mapping = mapping_id
if this_mapping is None:
return self._errors['CMMVC50000']
del self._mappings_list[this_mapping]
return ('', '')
def set_protocol(self, protocol):
self._protocol = protocol
def execute_command(self, cmd, check_exit_code=True):
try:
kwargs = self._cmd_to_dict(cmd)
except exception.InvalidInput:
return self._errors['CMMVC50000']
command = kwargs['cmd']
del kwargs['cmd']
func = getattr(self, '_cmd_' + command)
out, err = func(**kwargs)
if (check_exit_code) and (len(err) != 0):
raise processutils.ProcessExecutionError(exit_code=1,
stdout=out,
stderr=err,
cmd=command)
return (out, err)
def error_injection(self, cmd, error):
self._next_cmd_error[cmd] = error
class FlashSystemFakeDriver(flashsystem.FlashSystemDriver):
def __init__(self, *args, **kwargs):
super(FlashSystemFakeDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _ssh(self, cmd, check_exit_code=True):
try:
LOG.debug('Run CLI command: %s' % cmd)
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
(stdout, stderr) = ret
LOG.debug('CLI output:\n stdout: %(stdout)s\n stderr: '
'%(stderr)s' % {'stdout': stdout, 'stderr': stderr})
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.debug('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s' % {'out': e.stdout,
'err': e.stderr})
return ret
class FlashSystemDriverTestCase(test.TestCase):
def _set_flag(self, flag, value):
group = self.driver.configuration.config_group
self.driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,
vol_name,
vol_size=10,
vol_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
if not vol_name:
vol_name = 'test_volume%s' % rand_id
return {'name': vol_name,
'size': vol_size,
'id': '%s' % rand_id,
'volume_type_id': None,
'status': vol_status,
'mdisk_grp_name': 'mdiskgrp0'}
def _generate_snap_info(self,
vol_name,
vol_id,
vol_size,
vol_status,
snap_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
return {'name': 'test_snap_%s' % rand_id,
'id': rand_id,
'volume': {'name': vol_name,
'id': vol_id,
'size': vol_size,
'status': vol_status},
'volume_size': vol_size,
'status': snap_status,
'mdisk_grp_name': 'mdiskgrp0'}
def setUp(self):
super(FlashSystemDriverTestCase, self).setUp()
self._def_flags = {'san_ip': 'hostname',
'san_login': 'username',
'san_password': 'password',
'flashsystem_connection_protocol': 'FC',
'flashsystem_multipath_enabled': False,
'flashsystem_multihostmap_enabled': True}
self.connector = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
self.sim = FlashSystemManagementSimulator()
self.driver = FlashSystemFakeDriver(
configuration=conf.Configuration(None))
self.driver.set_fake_storage(self.sim)
self._reset_flags()
self.ctxt = context.get_admin_context()
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
self.sleeppatch.start()
def tearDown(self):
self.sleeppatch.stop()
super(FlashSystemDriverTestCase, self).tearDown()
def test_flashsystem_do_setup(self):
# case 1: cmd lssystem encounters error
self.sim.error_injection('lssystem', 'error')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# case 2: open_access_enabled is not off
self.sim.error_injection('lssystem', 'open_access_enabled=on')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# case 3: cmd lsmdiskgrp encounters error
self.sim.error_injection('lsmdiskgrp', 'error')
self.assertRaises(exception.InvalidInput,
self.driver.do_setup, None)
# case 4: status is not online
self.sim.error_injection('lsmdiskgrp', 'status=offline')
self.assertRaises(exception.InvalidInput,
self.driver.do_setup, None)
# case 5: cmd lsnode encounters error
self.sim.error_injection('lsnode', 'error')
self.assertRaises(processutils.ProcessExecutionError,
self.driver.do_setup, None)
# case 6: cmd lsnode header does not match
self.sim.error_injection('lsnode', 'header_mismatch')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# case 7: set as FC
self.sim.set_protocol('FC')
self.driver.do_setup(None)
self.assertEqual('FC', self.driver._protocol)
# case 8: no configured nodes available
self.sim.set_protocol('unknown')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# clear environment
self.sim.set_protocol('FC')
self.driver.do_setup(None)
def test_flashsystem_check_for_setup_error(self):
self._set_flag('san_ip', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_ssh_port', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_login', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_password', None)
self._set_flag('san_private_key', None)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('flashsystem_connection_protocol', 'foo')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
# clear environment
self.driver.do_setup(None)
def test_flashsystem_validate_connector(self):
conn_neither = {'host': 'host'}
conn_iscsi = {'host': 'host', 'initiator': 'foo'}
conn_fc = {'host': 'host', 'wwpns': 'bar'}
conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
protocol = self.driver._protocol
# case 1: when protocol is FC
self.driver._protocol = 'FC'
self.driver.validate_connector(conn_fc)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_iscsi)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
# clear environment
self.driver._protocol = protocol
def test_flashsystem_volumes(self):
# case 1: create volume
vol = self._generate_vol_info(None)
self.driver.create_volume(vol)
# Check whether volume is created successfully
attributes = self.driver._get_vdisk_attributes(vol['name'])
attr_size = float(attributes['capacity']) / units.Gi
self.assertEqual(float(vol['size']), attr_size)
# case 2: delete volume
self.driver.delete_volume(vol)
# case 3: delete volume that doesn't exist (expected not fail)
vol_no_exist = self._generate_vol_info(None)
self.driver.delete_volume(vol_no_exist)
def test_flashsystem_extend_volume(self):
vol = self._generate_vol_info(None)
self.driver.create_volume(vol)
self.driver.extend_volume(vol, '200')
attrs = self.driver._get_vdisk_attributes(vol['name'])
vol_size = int(attrs['capacity']) / units.Gi
self.assertAlmostEqual(vol_size, 200)
# clear environment
self.driver.delete_volume(vol)
def test_flashsystem_connection(self):
# case 1: initialize_connection/terminate_connection for good path
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.driver.initialize_connection(vol1, self.connector)
self.driver.terminate_connection(vol1, self.connector)
# case 2: when volume is not existed
vol2 = self._generate_vol_info(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
vol2, self.connector)
# case 3: _get_vdisk_map_properties raises exception
with mock.patch.object(flashsystem.FlashSystemDriver,
'_get_vdisk_map_properties') as get_properties:
get_properties.side_effect = exception.VolumeBackendAPIException
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
vol1, self.connector)
# clear environment
self.driver.delete_volume(vol1)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_create_and_copy_vdisk_data')
def test_flashsystem_create_snapshot(self, _create_and_copy_vdisk_data):
# case 1: good path
vol1 = self._generate_vol_info(None)
snap1 = self._generate_snap_info(vol1['name'],
vol1['id'],
vol1['size'],
vol1['status'])
self.driver.create_snapshot(snap1)
# case 2: when volume status is error
vol2 = self._generate_vol_info(None, vol_status='error')
snap2 = self._generate_snap_info(vol2['name'],
vol2['id'],
vol2['size'],
vol2['status'])
self.assertRaises(exception.InvalidVolume,
self.driver.create_snapshot, snap2)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_delete_vdisk')
def test_flashsystem_delete_snapshot(self, _delete_vdisk):
vol1 = self._generate_vol_info(None)
snap1 = self._generate_snap_info(vol1['name'],
vol1['id'],
vol1['size'],
vol1['status'])
self.driver.delete_snapshot(snap1)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_create_and_copy_vdisk_data')
def test_flashsystem_create_volume_from_snapshot(
self, _create_and_copy_vdisk_data):
# case 1: good path
vol = self._generate_vol_info(None)
snap = self._generate_snap_info(vol['name'],
vol['id'],
vol['size'],
vol['status'])
self.driver.create_volume_from_snapshot(vol, snap)
# case 2: when size does not match
vol = self._generate_vol_info(None, vol_size=100)
snap = self._generate_snap_info(vol['name'],
vol['id'],
200,
vol['status'])
self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume_from_snapshot,
vol, snap)
# case 3: when snapshot status is not available
vol = self._generate_vol_info(None)
snap = self._generate_snap_info(vol['name'],
vol['id'],
vol['size'],
vol['status'],
snap_status='error')
self.assertRaises(exception.InvalidSnapshot,
self.driver.create_volume_from_snapshot,
vol, snap)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_create_and_copy_vdisk_data')
def test_flashsystem_create_cloned_volume(
self, _create_and_copy_vdisk_data):
# case 1: good path
vol1 = self._generate_vol_info(None)
vol2 = self._generate_vol_info(None)
self.driver.create_cloned_volume(vol2, vol1)
# case 2: when size does not match
vol1 = self._generate_vol_info(None, vol_size=10)
vol2 = self._generate_vol_info(None, vol_size=20)
self.assertRaises(exception.VolumeDriverException,
self.driver.create_cloned_volume,
vol2, vol1)
def test_flashsystem_get_volume_stats(self):
# case 1: good path
self._set_flag('reserved_percentage', 25)
pool = 'mdiskgrp0'
backend_name = 'flashsystem_1.2.3.4' + '_' + pool
stats = self.driver.get_volume_stats()
self.assertEqual(25, stats['reserved_percentage'])
self.assertEqual('IBM', stats['vendor_name'])
self.assertEqual('FC', stats['storage_protocol'])
self.assertEqual(backend_name, stats['volume_backend_name'])
self._reset_flags()
# case 2: when lsmdiskgrp returns error
self.sim.error_injection('lsmdiskgrp', 'error')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.get_volume_stats, refresh=True)
@mock.patch.object(flashsystem.FlashSystemDriver,
'_copy_vdisk_data')
def test_flashsystem_create_and_copy_vdisk_data(self, _copy_vdisk_data):
# case 1: when volume does not exist
vol1 = self._generate_vol_info(None)
vol2 = self._generate_vol_info(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._create_and_copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
# case 2: good path
self.driver.create_volume(vol1)
self.driver._create_and_copy_vdisk_data(
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
# case 3: _copy_vdisk_data raises exception
self.driver.create_volume(vol1)
_copy_vdisk_data.side_effect = exception.VolumeBackendAPIException
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._create_and_copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
self.assertEqual(set(), self.driver._vdisk_copy_in_progress)
# clear environment
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
@mock.patch.object(volume_utils, 'copy_volume')
@mock.patch.object(flashsystem.FlashSystemDriver, '_scan_device')
@mock.patch.object(flashsystem.FlashSystemDriver, '_remove_device')
@mock.patch.object(utils, 'brick_get_connector_properties')
def test_flashsystem_copy_vdisk_data(self,
_connector,
_remove_device,
_scan_device,
copy_volume):
connector = _connector.return_value = self.connector
vol1 = self._generate_vol_info(None)
vol2 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.driver.create_volume(vol2)
# case 1: no mapped before copy
self.driver._copy_vdisk_data(
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertFalse(v1_mapped)
self.assertFalse(v2_mapped)
# case 2: mapped before copy
self.driver.initialize_connection(vol1, connector)
self.driver.initialize_connection(vol2, connector)
self.driver._copy_vdisk_data(
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertTrue(v1_mapped)
self.assertTrue(v2_mapped)
self.driver.terminate_connection(vol1, connector)
self.driver.terminate_connection(vol2, connector)
# case 3: no mapped before copy, raise exception when scan
_scan_device.side_effect = exception.VolumeBackendAPIException
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertFalse(v1_mapped)
self.assertFalse(v2_mapped)
# case 4: no mapped before copy, raise exception when copy
copy_volume.side_effect = exception.VolumeBackendAPIException
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._copy_vdisk_data,
vol1['name'], vol1['id'], vol2['name'], vol2['id'])
(v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector)
(v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector)
self.assertFalse(v1_mapped)
self.assertFalse(v2_mapped)
# clear environment
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
def test_flashsystem_connector_to_hostname_prefix(self):
# Invalid characters will be translated to '-'
# case 1: host name is unicode with invalid characters
conn = {'host': u'unicode.test}.abc{.abc'}
self.assertEqual(u'unicode.test-.abc-.abc',
self.driver._connector_to_hostname_prefix(conn))
# case 2: host name is string with invalid characters
conn = {'host': 'string.test}.abc{.abc'}
self.assertEqual('string.test-.abc-.abc',
self.driver._connector_to_hostname_prefix(conn))
# case 3: host name is neither unicode nor string
conn = {'host': 12345}
self.assertRaises(exception.NoValidHost,
self.driver._connector_to_hostname_prefix,
conn)
# case 4: host name started with number will be translated
conn = {'host': '192.168.1.1'}
self.assertEqual('_192.168.1.1',
self.driver._connector_to_hostname_prefix(conn))
def test_flashsystem_create_host(self):
# case 1: create host
conn = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
host = self.driver._create_host(conn)
# case 2: create host that already exists
self.assertRaises(processutils.ProcessExecutionError,
self.driver._create_host,
conn)
# case 3: delete host
self.driver._delete_host(host)
# case 4: create host with empty ports
conn = {'host': 'flashsystem', 'wwpns': []}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._create_host,
conn)
def test_flashsystem_find_host_exhaustive(self):
# case 1: create host and find it
conn1 = {
'host': 'flashsystem-01',
'wwnns': ['1111111111abcdef', '1111111111abcdeg'],
'wwpns': ['1111111111000001', '1111111111000002'],
'initiator': 'iqn.111111'}
conn2 = {
'host': 'flashsystem-02',
'wwnns': ['2222222222abcdef', '2222222222abcdeg'],
'wwpns': ['2222222222000001', '2222222222000002'],
'initiator': 'iqn.222222'}
conn3 = {
'host': 'flashsystem-03',
'wwnns': ['3333333333abcdef', '3333333333abcdeg'],
'wwpns': ['3333333333000001', '3333333333000002'],
'initiator': 'iqn.333333'}
host1 = self.driver._create_host(conn1)
host2 = self.driver._create_host(conn2)
self.assertEqual(
host2,
self.driver._find_host_exhaustive(conn2, [host1, host2]))
self.assertEqual(
None,
self.driver._find_host_exhaustive(conn3, [host1, host2]))
# clear environment
self.driver._delete_host(host1)
self.driver._delete_host(host2)
def test_flashsystem_get_vdisk_params(self):
# case 1: use default params
self.driver._get_vdisk_params(None)
# case 2: use extra params from type
opts1 = {'storage_protocol': 'FC'}
opts2 = {'capabilities:storage_protocol': 'FC'}
opts3 = {'storage_protocol': 'iSCSI'}
type1 = volume_types.create(self.ctxt, 'opts1', opts1)
type2 = volume_types.create(self.ctxt, 'opts2', opts2)
type3 = volume_types.create(self.ctxt, 'opts3', opts3)
self.assertEqual(
'FC',
self.driver._get_vdisk_params(type1['id'])['protocol'])
self.assertEqual(
'FC',
self.driver._get_vdisk_params(type2['id'])['protocol'])
self.assertRaises(exception.InvalidInput,
self.driver._get_vdisk_params,
type3['id'])
# clear environment
volume_types.destroy(self.ctxt, type1['id'])
volume_types.destroy(self.ctxt, type2['id'])
def test_flashsystem_map_vdisk_to_host(self):
# case 1: no host found
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.assertEqual(
# lun id shoud begin with 1
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# case 2: host already exists
vol2 = self._generate_vol_info(None)
self.driver.create_volume(vol2)
self.assertEqual(
# lun id shoud be sequential
2,
self.driver._map_vdisk_to_host(vol2['name'], self.connector))
# case 3: test if already mapped
self.assertEqual(
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# clean environment
self.driver._unmap_vdisk_from_host(vol1['name'], self.connector)
self.driver._unmap_vdisk_from_host(vol2['name'], self.connector)
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
# case 4: If there is no vdisk mapped to host, host should be removed
self.assertEqual(
None,
self.driver._get_host_from_connector(self.connector))
| apache-2.0 | -6,044,078,275,345,714,000 | 37.243902 | 79 | 0.531803 | false |
RDXT/django-userena | userena/views.py | 1 | 38355 | from django.urls import reverse
from django.shortcuts import redirect, get_object_or_404,render
from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import logout as Signout
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseRedirect,HttpResponse
from userena.forms import (SignupForm, SignupFormOnlyEmail, AuthenticationForm,
ChangeEmailForm, EditProfileForm,InviteForm)
from userena.models import UserenaSignup
from userena.decorators import secure_required
from userena.utils import signin_redirect, get_profile_model, get_user_profile
from userena import signals as userena_signals
from userena import settings as userena_settings
from guardian.decorators import permission_required_or_403
from django.contrib.auth.decorators import login_required
import warnings
class ExtraContextTemplateView(TemplateView):
""" Add extra context to a simple template view """
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(ExtraContextTemplateView, self).get_context_data(*args, **kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
# this view is used in POST requests, e.g. signup when the form is not valid
post = TemplateView.get
class InvitedUsersListView(ListView):
""" Lists all profiles """
context_object_name='invited_user_list'
page=1
paginate_by=50
template_name='userena/list_invited_users.html'
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(InvitedUsersListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
profile_model= get_profile_model()
currentProfile=profile_model.objects.get(user=self.request.user)
context['numOfRemainingInvitationTicket']= currentProfile.get_remaining_invite_tickets_number()
return context
def get_queryset(self):
profile_model= get_profile_model()
currentProfile=profile_model.objects.get(user=self.request.user)
queryset = currentProfile.invited_users.all()
return queryset
class ProfileListView(ListView):
""" Lists all profiles """
context_object_name='profile_list'
page=1
paginate_by=50
template_name=userena_settings.USERENA_PROFILE_LIST_TEMPLATE
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not self.request.user.is_staff:
raise Http404
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
return context
def get_queryset(self):
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(self.request.user).select_related()
return queryset
@secure_required
@login_required
def invite_new_user(request,invite_form=InviteForm,template_name='userena/invite_new_user.html',success_url='userena_list_invited_users',extra_context=None):
if(request.user.has_perm('invite_user')):
if not extra_context:
extra_context = dict()
if request.method == 'POST':
form = invite_form(request.user,request.POST, request.FILES)
if form.is_valid():
result=form.save()
if result: #if result is True everythin was ok
return redirect(success_url)
else:
return HttpResponse(status=500)
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
form=invite_form(request.user)
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
raise PermissionDenied
@secure_required
@login_required
def list_invited_users(request,template_name='userena/list_invited_users.html'):
return InvitedUsersListView.as_view(template_name=template_name)(request)
@secure_required
def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate(request, activation_key,
template_name='userena/activate_fail.html',
retry_template_name='userena/activate_retry.html',
success_url=None, extra_context=None):
"""
Activate a user with an activation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (not UserenaSignup.objects.check_expired_activation(activation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY):
user = UserenaSignup.objects.activate_user(activation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(
request)
else:
if not extra_context: extra_context = dict()
extra_context['activation_key'] = activation_key
return ExtraContextTemplateView.as_view(template_name=retry_template_name,
extra_context=extra_context)(request)
except UserenaSignup.DoesNotExist:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,)))
@secure_required
def activate_invited_user(request, invitation_key,
template_name='userena/invite_fail.html',
retry_template_name='userena/invite_retry.html',
success_url=None, extra_context=None):
"""
Activate an invited user with an invitation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param invitation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (not UserenaSignup.objects.check_expired_invitation(invitation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY):
user = UserenaSignup.objects.activate_invited_user(invitation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(
request)
else:
if not extra_context: extra_context = dict()
extra_context['invitation_key'] = invitation_key
return ExtraContextTemplateView.as_view(template_name=retry_template_name,
extra_context=extra_context)(request)
except UserenaSignup.DoesNotExist:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def email_confirm(request, confirmation_key,
template_name='userena/email_confirm_fail.html',
success_url=None, extra_context=None):
"""
Confirms an email address with a confirmation key.
Confirms a new email address by running :func:`User.objects.confirm_email`
method. If the method returns an :class:`User` the user will have his new
e-mail address set and redirected to ``success_url``. If no ``User`` is
returned the user will be represented with a fail message from
``template_name``.
:param confirmation_key:
String with a SHA1 representing the confirmation key used to verify a
new email address.
:param template_name:
String containing the template name which should be rendered when
confirmation fails. When confirmation is successful, no template is
needed because the user will be redirected to ``success_url``.
:param success_url:
String containing the URL which is redirected to after a successful
confirmation. Supplied argument must be able to be rendered by
``reverse`` function.
:param extra_context:
Dictionary of variables that are passed on to the template supplied by
``template_name``.
"""
user = UserenaSignup.objects.confirm_email(confirmation_key)
if user:
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your email address has been changed.'),
fail_silently=True)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_email_confirm_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def direct_to_user_template(request, username, template_name,
extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access to
the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signin(request, auth_form=AuthenticationForm,
template_name='userena/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification, password, remember_me = (form.cleaned_data['identification'],
form.cleaned_data['password'],
form.cleaned_data['remember_me'])
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400)
else: request.session.set_expiry(0)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
#send a signal that a user has signed in
userena_signals.account_signin.send(sender=None, user=user)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)), user)
return redirect(redirect_to)
else:
return redirect(reverse('userena_disabled',
kwargs={'username': user.username}))
if not extra_context: extra_context = dict()
extra_context.update({
'form': form,
'next': request.GET.get(redirect_field_name,
request.POST.get(redirect_field_name)),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signout(request, next_page=userena_settings.USERENA_REDIRECT_ON_SIGNOUT,
template_name='userena/signout.html', *args, **kwargs):
"""
Signs out the user and adds a success message ``You have been signed
out.`` If next_page is defined you will be redirected to the URI. If
not the template in template_name is used.
:param next_page:
A string which specifies the URI to redirect to.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signout.html``.
"""
if request.user.is_authenticated() and userena_settings.USERENA_USE_MESSAGES: # pragma: no cover
messages.success(request, _('You have been signed out.'), fail_silently=True)
userena_signals.account_signout.send(sender=None, user=request.user)
return Signout(request, next_page=next_page, template_name=template_name, *args, **kwargs)
@secure_required
@permission_required_or_403('change_user', (get_user_model(), 'username', 'username'))
def email_change(request, username, email_form=ChangeEmailForm,
template_name='userena/email_form.html', success_url=None,
extra_context=None):
"""
Change email address
:param username:
String of the username which specifies the current account.
:param email_form:
Form that will be used to change the email address. Defaults to
:class:`ChangeEmailForm` supplied by userena.
:param template_name:
String containing the template to be used to display the email form.
Defaults to ``userena/email_form.html``.
:param success_url:
Named URL where the user will get redirected to when successfully
changing their email address. When not supplied will redirect to
``userena_email_complete`` URL.
:param extra_context:
Dictionary containing extra variables that can be used to render the
template. The ``form`` key is always the form supplied by the keyword
argument ``form`` and the ``user`` key by the user whose email address
is being changed.
**Context**
``form``
Form that is used to change the email address supplied by ``form``.
``account``
Instance of the ``Account`` whose email address is about to be changed.
**Todo**
Need to have per-object permissions, which enables users with the correct
permissions to alter the email address of others.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
prev_email = user.email
form = email_form(user)
if request.method == 'POST':
form = email_form(user, request.POST, request.FILES)
if form.is_valid():
form.save()
if success_url:
# Send a signal that the email has changed
userena_signals.email_change.send(sender=None,
user=user,
prev_email=prev_email,
new_email=user.email)
redirect_to = success_url
else: redirect_to = reverse('userena_email_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_user', (get_user_model(), 'username', 'username'))
def password_change(request, username, template_name='userena/password_form.html',
pass_form=PasswordChangeForm, success_url=None, extra_context=None):
""" Change password of user.
This view is almost a mirror of the view supplied in
:func:`contrib.auth.views.password_change`, with the minor change that in
this view we also use the username to change the password. This was needed
to keep our URLs logical (and REST) across the entire application. And
that in a later stadium administrators can also change the users password
through the web application itself.
:param username:
String supplying the username of the user who's password is about to be
changed.
:param template_name:
String of the name of the template that is used to display the password
change form. Defaults to ``userena/password_form.html``.
:param pass_form:
Form used to change password. Default is the form supplied by Django
itself named ``PasswordChangeForm``.
:param success_url:
Named URL that is passed onto a :func:`reverse` function with
``username`` of the active user. Defaults to the
``userena_password_complete`` URL.
:param extra_context:
Dictionary of extra variables that are passed on to the template. The
``form`` key is always used by the form supplied by ``pass_form``.
**Context**
``form``
Form used to change the password.
"""
user = get_object_or_404(get_user_model(),
username__iexact=username)
form = pass_form(user=user)
if request.method == "POST":
form = pass_form(user=user, data=request.POST)
if form.is_valid():
form.save()
# Send a signal that the password has changed
userena_signals.password_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_password_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_profile', (get_profile_model(), 'user__username', 'username'))
def profile_edit(request, username, edit_profile_form=EditProfileForm,
template_name='userena/profile_form.html', success_url=None,
extra_context=None, **kwargs):
"""
Edit profile.
Edits a profile selected by the supplied username. First checks
permissions if the user is allowed to edit this profile, if denied will
show a 404. When the profile is successfully edited will redirect to
``success_url``.
:param username:
Username of the user which profile should be edited.
:param edit_profile_form:
Form that is used to edit the profile. The :func:`EditProfileForm.save`
method of this form will be called when the form
:func:`EditProfileForm.is_valid`. Defaults to :class:`EditProfileForm`
from userena.
:param template_name:
String of the template that is used to render this view. Defaults to
``userena/edit_profile_form.html``.
:param success_url:
Named URL which will be passed on to a django ``reverse`` function after
the form is successfully saved. Defaults to the ``userena_detail`` url.
:param extra_context:
Dictionary containing variables that are passed on to the
``template_name`` template. ``form`` key will always be the form used
to edit the profile, and the ``profile`` key is always the edited
profile.
**Context**
``form``
Form that is used to alter the profile.
``profile``
Instance of the ``Profile`` that is edited.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
user_initial = {'first_name': user.first_name,
'last_name': user.last_name}
form = edit_profile_form(instance=profile, initial=user_initial)
if request.method == 'POST':
form = edit_profile_form(request.POST, request.FILES, instance=profile,
initial=user_initial)
if form.is_valid():
profile = form.save()
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your profile has been updated.'),
fail_silently=True)
if success_url:
# Send a signal that the profile has changed
userena_signals.profile_change.send(sender=None,
user=user)
redirect_to = success_url
else: redirect_to = reverse('userena_profile_detail', kwargs={'username': username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = profile
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_detail(request, username,
template_name=userena_settings.USERENA_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
if not profile.can_view_profile(request.user):
raise PermissionDenied
if not extra_context: extra_context = dict()
extra_context['profile'] = profile
extra_context['hide_email'] = userena_settings.USERENA_HIDE_EMAIL
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request)
| bsd-3-clause | -7,902,587,174,113,698,000 | 39.673383 | 157 | 0.63087 | false |
django-extensions/django-extensions | django_extensions/management/commands/validate_templates.py | 1 | 3627 | # -*- coding: utf-8 -*-
import os
import fnmatch
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
from django.template.loader import get_template
from django_extensions.compat import get_template_setting
from django_extensions.management.utils import signalcommand
#
# TODO: Render the template with fake request object ?
#
class Command(BaseCommand):
args = ''
help = "Validate templates on syntax and compile errors"
ignores = set([
".DS_Store",
"*.swp",
"*~",
])
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--no-apps', action='store_true', dest='no_apps',
default=False, help="Do not automatically include apps.")
parser.add_argument(
'--break', '-b', action='store_true', dest='break',
default=False, help="Break on first error.")
parser.add_argument(
'--include', '-i', action='append', dest='includes',
default=[], help="Append these paths to TEMPLATE DIRS")
parser.add_argument(
'--ignore-app', action='append', dest='ignore_apps',
default=[], help="Ignore these apps")
def ignore_filename(self, filename):
filename = os.path.basename(filename)
for ignore_pattern in self.ignores:
if fnmatch.fnmatch(filename, ignore_pattern):
return True
return False
@signalcommand
def handle(self, *args, **options):
if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):
self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')
style = color_style()
template_dirs = set(get_template_setting('DIRS', []))
template_dirs |= set(options['includes'])
template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))
if not options['no_apps']:
ignore_apps = options['ignore_apps']
if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):
ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')
for app in apps.get_app_configs():
if app.name in ignore_apps:
continue
app_template_dir = os.path.join(app.path, 'templates')
if os.path.isdir(app_template_dir):
template_dirs.add(app_template_dir)
settings.TEMPLATES[0]['DIRS'] = list(template_dirs)
settings.TEMPLATE_DEBUG = True
verbosity = options["verbosity"]
errors = 0
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if self.ignore_filename(filename):
continue
filepath = os.path.join(root, filename)
if verbosity > 1:
self.stdout.write(filepath)
try:
get_template(filepath)
except Exception as e:
errors += 1
self.stdout.write("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e)))))
if errors and options['break']:
raise CommandError("Errors found")
if errors:
raise CommandError("%s errors found" % errors)
self.stdout.write("%s errors found" % errors)
| mit | 256,871,737,937,782,240 | 36.78125 | 119 | 0.576234 | false |
agbs2k8/WebLearning | web-starter/blueyellow_pycharm_app/setup.py | 1 | 1137 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_handlers',
'waitress',
]
setup(name='blueyellow_pycharm_app',
version='0.0',
description='blueyellow_pycharm_app',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="blueyellow_pycharm_app",
entry_points="""\
[paste.app_factory]
main = blueyellow_pycharm_app:main
""",
)
| mit | 6,758,758,797,266,973,000 | 25.44186 | 63 | 0.592788 | false |
firmadyne/scraper | firmware/spiders/openwrt.py | 1 | 2425 | from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
import urllib.request, urllib.parse, urllib.error
class OpenWRTSpider(Spider):
name = "openwrt"
allowed_domains = ["downloads.openwrt.org"]
start_urls = ["http://downloads.openwrt.org/"]
def parse(self, response):
for link in response.xpath("//a"):
text = link.xpath("text()").extract_first()
href = link.xpath("@href").extract_first()
if text is None and href == "/":
# <a href="/"><em>(root)</em></a>
continue
yield Request(
url=urllib.parse.urljoin(response.url, href),
headers={"Referer": response.url},
meta={"version": FirmwareLoader.find_version_period(text)},
callback=self.parse_url)
def parse_url(self, response):
for link in response.xpath("//a"):
text = link.xpath("text()").extract_first()
href = link.xpath("@href").extract_first()
if text is None and href == "/":
# <a href="/"><em>(root)</em></a>
continue
if ".." in href:
continue
elif href.endswith('/'):
if "package/" not in text:
product = "%s-%s" % (response.meta["product"], text[0: -1]) if "product" in response.meta else text[0: -1]
yield Request(
url=urllib.parse.urljoin(response.url, href),
headers={"Referer": response.url},
meta={"version": response.meta[
"version"], "product": product},
callback=self.parse_url)
elif any(href.endswith(x) for x in [".bin", ".elf", ".fdt", ".imx", ".chk", ".trx"]):
item = FirmwareLoader(
item=FirmwareImage(), response=response, date_fmt=["%d-%b-%Y"])
item.add_value("version", response.meta["version"])
item.add_value("url", href)
item.add_value("date", item.find_date(
link.xpath("following::text()").extract()))
item.add_value("product", response.meta["product"])
item.add_value("vendor", self.name)
yield item.load_item()
| mit | 7,624,054,713,847,366,000 | 39.416667 | 126 | 0.51299 | false |
kozistr/Awesome-GANs | awesome_gans/discogan/discogan_train.py | 1 | 4150 | import sys
import time
import tensorflow as tf
import awesome_gans.discogan.discogan_model as discogan
import awesome_gans.image_utils as iu
from awesome_gans.datasets import Pix2PixDataSet as DataSets
# import numpy as np
sys.path.insert(0, '../')
results = {'sample_output': './gen_img/', 'model': './model/DiscoGAN-model.ckpt'}
paras = {'epoch': 200, 'batch_size': 64, 'logging_interval': 5}
def main():
start_time = time.time() # clocking start
# Dataset
dataset = DataSets(height=64, width=64, channel=3, ds_path='D:/DataSets/pix2pix/', ds_name="vangogh2photo")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# DiscoGAN model
model = discogan.DiscoGAN(s)
# load model & graph & weight
global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print("[+] global step : %s" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
# initializing variables
tf.global_variables_initializer().run()
d_overpowered = False # G loss > D loss * 2
for epoch in range(paras['epoch']):
for step in range(1000):
offset_a = (step * paras['batch_size']) % (dataset.images_a.shape[0] - paras['batch_size'])
offset_b = (step * paras['batch_size']) % (dataset.images_b.shape[0] - paras['batch_size'])
# batch data set
batch_a = dataset.images_a[offset_a : (offset_a + paras['batch_size']), :]
batch_b = dataset.images_b[offset_b : (offset_b + paras['batch_size']), :]
# update D network
if not d_overpowered:
s.run(model.d_op, feed_dict={model.A: batch_a})
# update G network
s.run(model.g_op, feed_dict={model.B: batch_b})
if epoch % paras['logging_interval'] == 0:
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged], feed_dict={model.A: batch_a, model.B: batch_b}
)
# print loss
print(
"[+] Epoch %03d Step %04d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# update overpowered
d_overpowered = d_loss < g_loss / 2.0
# training G model with sample image and noise
ab_samples = s.run(model.G_s2b, feed_dict={model.A: batch_a})
ba_samples = s.run(model.G_b2s, feed_dict={model.B: batch_b})
# summary saver
model.writer.add_summary(summary, global_step=global_step)
# export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_ab_dir = results['sample_output'] + 'train_A_{0}_{1}.png'.format(epoch, global_step)
sample_ba_dir = results['sample_output'] + 'train_B_{0}_{1}.png'.format(epoch, global_step)
# Generated image save
iu.save_images(ab_samples, size=[sample_image_height, sample_image_width], image_path=sample_ab_dir)
iu.save_images(ba_samples, size=[sample_image_height, sample_image_width], image_path=sample_ba_dir)
# model save
model.saver.save(s, results['model'], global_step=global_step)
end_time = time.time() - start_time
# elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# close tf.Session
s.close()
if __name__ == '__main__':
main()
| mit | -2,482,674,350,346,778,000 | 37.425926 | 120 | 0.536145 | false |
speksofdust/BeyondDreams | beyonddreams/char/attribs.py | 1 | 3234 | # ---------------------------------------------------------------------------- #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ---------------------------------------------------------------------------- #
from pdict import ChildPdict
from .. import bd
class CharAttrib:
__slots__ = ("_parent",)
"""Base class for all character attributes."""
@property
def char(self):
"""The char this attribute belongs to."""
try: return self._parent.char
except: return self._parent # parent is char
@property
def base(self):
"""Defaults for the char which this attribute belongs to."""
return self._parent.base
class CharAttrDict(ChildPDict, CharAttrib):
"""CharAttrib dict type for use with Char attributes and sub attributes."""
__slots__ = ChildPDict.__slots__
class Equip(CharAttrib):
__slots__ = CharAttrib.__slots__ + "_slots"
def __init__(self, char, slots):
self._char = char
self._slots = {}
def __bool__(self): return len(self._slots) > 0
def __len__(self): return len(self._slots)
def __iter__(self): return iter(self._slots)
def __contains__(self, i): return i in self._slots
def __getitem__(self, i): return self._slots[i]
class Body(CharAttrib):
__slots__ = CharAttrib.__slots__ + "_subparts", "_attribs", "_mesh"
def __init__(self, char):
self._char = char
self._subparts = {}
self._attribs = {}
self._mesh = None
#bd.datapath() TODO
@property
def subparts(self):
return self._subparts
@property
def attribs(self):
return self._attribs
class Stats(CharAttrib):
__slots__ = CharAttrib.__slots__
def __init__(self, char):
self._char = char
def base_stats(self):
"""The base stats."""
return self._char._base._stats
class StatusEffects(CharAttrib):
__slots__ = CharAttrib.__slots__
def __init__(self, char):
self._char = char
def base_statuseffects(self):
"""The base status effects."""
return self._char._base._statuseffects
| gpl-3.0 | 6,986,478,758,046,898,000 | 33.774194 | 80 | 0.497217 | false |
machinecoin-project/machinecoin | contrib/devtools/security-check.py | 2 | 8226 | #!/usr/bin/env python
# Copyright (c) 2015-2017 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
# ('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
| mit | 3,368,837,701,868,259,300 | 37.083333 | 163 | 0.617554 | false |
simartin/servo | tests/wpt/web-platform-tests/tools/ci/tc/tests/test_valid.py | 1 | 11726 | import json
import os
from io import open
import sys
import jsone
import mock
import pytest
import requests
import yaml
from jsonschema import validate
here = os.path.dirname(__file__)
root = os.path.abspath(os.path.join(here, "..", "..", "..", ".."))
sys.path.insert(0, root)
from tools.ci.tc import decision
def data_path(filename):
return os.path.join(here, "..", "testdata", filename)
def test_verify_taskcluster_yml():
"""Verify that the json-e in the .taskcluster.yml is valid"""
with open(os.path.join(root, ".taskcluster.yml"), encoding="utf8") as f:
template = yaml.safe_load(f)
events = [("pr_event.json", "github-pull-request", "Pull Request"),
("master_push_event.json", "github-push", "Push to master")]
for filename, tasks_for, title in events:
with open(data_path(filename), encoding="utf8") as f:
event = json.load(f)
context = {"tasks_for": tasks_for,
"event": event,
"as_slugid": lambda x: x}
jsone.render(template, context)
def test_verify_payload():
"""Verify that the decision task produces tasks with a valid payload"""
from tools.ci.tc.decision import decide
r = requests.get("https://community-tc.services.mozilla.com/schemas/queue/v1/create-task-request.json")
r.raise_for_status()
create_task_schema = r.json()
r = requests.get("https://community-tc.services.mozilla.com/references/schemas/docker-worker/v1/payload.json")
r.raise_for_status()
payload_schema = r.json()
jobs = ["lint",
"manifest_upload",
"resources_unittest",
"tools_unittest",
"wpt_integration",
"wptrunner_infrastructure",
"wptrunner_unittest"]
for filename in ["pr_event.json", "master_push_event.json"]:
with open(data_path(filename), encoding="utf8") as f:
event = json.load(f)
with mock.patch("tools.ci.tc.decision.get_fetch_rev", return_value=(None, event["after"], None)):
with mock.patch("tools.ci.tc.decision.get_run_jobs", return_value=set(jobs)):
task_id_map = decide(event)
for name, (task_id, task_data) in task_id_map.items():
try:
validate(instance=task_data, schema=create_task_schema)
validate(instance=task_data["payload"], schema=payload_schema)
except Exception as e:
print("Validation failed for task '%s':\n%s" % (name, json.dumps(task_data, indent=2)))
raise e
@pytest.mark.parametrize("event_path,is_pr,files_changed,expected", [
("master_push_event.json", False, None,
['download-firefox-nightly',
'wpt-firefox-nightly-testharness-1',
'wpt-firefox-nightly-testharness-2',
'wpt-firefox-nightly-testharness-3',
'wpt-firefox-nightly-testharness-4',
'wpt-firefox-nightly-testharness-5',
'wpt-firefox-nightly-testharness-6',
'wpt-firefox-nightly-testharness-7',
'wpt-firefox-nightly-testharness-8',
'wpt-firefox-nightly-testharness-9',
'wpt-firefox-nightly-testharness-10',
'wpt-firefox-nightly-testharness-11',
'wpt-firefox-nightly-testharness-12',
'wpt-firefox-nightly-testharness-13',
'wpt-firefox-nightly-testharness-14',
'wpt-firefox-nightly-testharness-15',
'wpt-firefox-nightly-testharness-16',
'wpt-chrome-dev-testharness-1',
'wpt-chrome-dev-testharness-2',
'wpt-chrome-dev-testharness-3',
'wpt-chrome-dev-testharness-4',
'wpt-chrome-dev-testharness-5',
'wpt-chrome-dev-testharness-6',
'wpt-chrome-dev-testharness-7',
'wpt-chrome-dev-testharness-8',
'wpt-chrome-dev-testharness-9',
'wpt-chrome-dev-testharness-10',
'wpt-chrome-dev-testharness-11',
'wpt-chrome-dev-testharness-12',
'wpt-chrome-dev-testharness-13',
'wpt-chrome-dev-testharness-14',
'wpt-chrome-dev-testharness-15',
'wpt-chrome-dev-testharness-16',
'wpt-firefox-nightly-reftest-1',
'wpt-firefox-nightly-reftest-2',
'wpt-firefox-nightly-reftest-3',
'wpt-firefox-nightly-reftest-4',
'wpt-firefox-nightly-reftest-5',
'wpt-chrome-dev-reftest-1',
'wpt-chrome-dev-reftest-2',
'wpt-chrome-dev-reftest-3',
'wpt-chrome-dev-reftest-4',
'wpt-chrome-dev-reftest-5',
'wpt-firefox-nightly-wdspec-1',
'wpt-firefox-nightly-wdspec-2',
'wpt-chrome-dev-wdspec-1',
'wpt-chrome-dev-wdspec-2',
'wpt-firefox-nightly-crashtest-1',
'wpt-chrome-dev-crashtest-1',
'wpt-firefox-nightly-print-reftest-1',
'wpt-chrome-dev-print-reftest-1',
'lint']),
("pr_event.json", True, {".taskcluster.yml", ".travis.yml", "tools/ci/start.sh"},
['lint',
'tools/ unittests (Python 3.6)',
'tools/ unittests (Python 3.8)',
'tools/ integration tests (Python 3.6)',
'tools/ integration tests (Python 3.8)',
'resources/ tests (Python 3.6)',
'resources/ tests (Python 3.8)',
'download-firefox-nightly',
'infrastructure/ tests (Python 3)',
'sink-task']),
# More tests are affected in the actual PR but it shouldn't affect the scheduled tasks
("pr_event_tests_affected.json", True, {"layout-instability/clip-negative-bottom-margin.html",
"layout-instability/composited-element-movement.html"},
['download-firefox-nightly',
'wpt-firefox-nightly-stability',
'wpt-firefox-nightly-results',
'wpt-firefox-nightly-results-without-changes',
'wpt-chrome-dev-stability',
'wpt-chrome-dev-results',
'wpt-chrome-dev-results-without-changes',
'lint',
'sink-task']),
("epochs_daily_push_event.json", False, None,
['download-firefox-stable',
'wpt-firefox-stable-testharness-1',
'wpt-firefox-stable-testharness-2',
'wpt-firefox-stable-testharness-3',
'wpt-firefox-stable-testharness-4',
'wpt-firefox-stable-testharness-5',
'wpt-firefox-stable-testharness-6',
'wpt-firefox-stable-testharness-7',
'wpt-firefox-stable-testharness-8',
'wpt-firefox-stable-testharness-9',
'wpt-firefox-stable-testharness-10',
'wpt-firefox-stable-testharness-11',
'wpt-firefox-stable-testharness-12',
'wpt-firefox-stable-testharness-13',
'wpt-firefox-stable-testharness-14',
'wpt-firefox-stable-testharness-15',
'wpt-firefox-stable-testharness-16',
'wpt-chrome-nightly-testharness-1',
'wpt-chrome-nightly-testharness-2',
'wpt-chrome-nightly-testharness-3',
'wpt-chrome-nightly-testharness-4',
'wpt-chrome-nightly-testharness-5',
'wpt-chrome-nightly-testharness-6',
'wpt-chrome-nightly-testharness-7',
'wpt-chrome-nightly-testharness-8',
'wpt-chrome-nightly-testharness-9',
'wpt-chrome-nightly-testharness-10',
'wpt-chrome-nightly-testharness-11',
'wpt-chrome-nightly-testharness-12',
'wpt-chrome-nightly-testharness-13',
'wpt-chrome-nightly-testharness-14',
'wpt-chrome-nightly-testharness-15',
'wpt-chrome-nightly-testharness-16',
'wpt-chrome-stable-testharness-1',
'wpt-chrome-stable-testharness-2',
'wpt-chrome-stable-testharness-3',
'wpt-chrome-stable-testharness-4',
'wpt-chrome-stable-testharness-5',
'wpt-chrome-stable-testharness-6',
'wpt-chrome-stable-testharness-7',
'wpt-chrome-stable-testharness-8',
'wpt-chrome-stable-testharness-9',
'wpt-chrome-stable-testharness-10',
'wpt-chrome-stable-testharness-11',
'wpt-chrome-stable-testharness-12',
'wpt-chrome-stable-testharness-13',
'wpt-chrome-stable-testharness-14',
'wpt-chrome-stable-testharness-15',
'wpt-chrome-stable-testharness-16',
'wpt-webkitgtk_minibrowser-nightly-testharness-1',
'wpt-webkitgtk_minibrowser-nightly-testharness-2',
'wpt-webkitgtk_minibrowser-nightly-testharness-3',
'wpt-webkitgtk_minibrowser-nightly-testharness-4',
'wpt-webkitgtk_minibrowser-nightly-testharness-5',
'wpt-webkitgtk_minibrowser-nightly-testharness-6',
'wpt-webkitgtk_minibrowser-nightly-testharness-7',
'wpt-webkitgtk_minibrowser-nightly-testharness-8',
'wpt-webkitgtk_minibrowser-nightly-testharness-9',
'wpt-webkitgtk_minibrowser-nightly-testharness-10',
'wpt-webkitgtk_minibrowser-nightly-testharness-11',
'wpt-webkitgtk_minibrowser-nightly-testharness-12',
'wpt-webkitgtk_minibrowser-nightly-testharness-13',
'wpt-webkitgtk_minibrowser-nightly-testharness-14',
'wpt-webkitgtk_minibrowser-nightly-testharness-15',
'wpt-webkitgtk_minibrowser-nightly-testharness-16',
'wpt-servo-nightly-testharness-1',
'wpt-servo-nightly-testharness-2',
'wpt-servo-nightly-testharness-3',
'wpt-servo-nightly-testharness-4',
'wpt-servo-nightly-testharness-5',
'wpt-servo-nightly-testharness-6',
'wpt-servo-nightly-testharness-7',
'wpt-servo-nightly-testharness-8',
'wpt-servo-nightly-testharness-9',
'wpt-servo-nightly-testharness-10',
'wpt-servo-nightly-testharness-11',
'wpt-servo-nightly-testharness-12',
'wpt-servo-nightly-testharness-13',
'wpt-servo-nightly-testharness-14',
'wpt-servo-nightly-testharness-15',
'wpt-servo-nightly-testharness-16',
'wpt-firefox-stable-reftest-1',
'wpt-firefox-stable-reftest-2',
'wpt-firefox-stable-reftest-3',
'wpt-firefox-stable-reftest-4',
'wpt-firefox-stable-reftest-5',
'wpt-chrome-nightly-reftest-1',
'wpt-chrome-nightly-reftest-2',
'wpt-chrome-nightly-reftest-3',
'wpt-chrome-nightly-reftest-4',
'wpt-chrome-nightly-reftest-5',
'wpt-chrome-stable-reftest-1',
'wpt-chrome-stable-reftest-2',
'wpt-chrome-stable-reftest-3',
'wpt-chrome-stable-reftest-4',
'wpt-chrome-stable-reftest-5',
'wpt-webkitgtk_minibrowser-nightly-reftest-1',
'wpt-webkitgtk_minibrowser-nightly-reftest-2',
'wpt-webkitgtk_minibrowser-nightly-reftest-3',
'wpt-webkitgtk_minibrowser-nightly-reftest-4',
'wpt-webkitgtk_minibrowser-nightly-reftest-5',
'wpt-servo-nightly-reftest-1',
'wpt-servo-nightly-reftest-2',
'wpt-servo-nightly-reftest-3',
'wpt-servo-nightly-reftest-4',
'wpt-servo-nightly-reftest-5',
'wpt-firefox-stable-wdspec-1',
'wpt-firefox-stable-wdspec-2',
'wpt-chrome-nightly-wdspec-1',
'wpt-chrome-nightly-wdspec-2',
'wpt-chrome-stable-wdspec-1',
'wpt-chrome-stable-wdspec-2',
'wpt-webkitgtk_minibrowser-nightly-wdspec-1',
'wpt-webkitgtk_minibrowser-nightly-wdspec-2',
'wpt-servo-nightly-wdspec-1',
'wpt-servo-nightly-wdspec-2',
'wpt-firefox-stable-crashtest-1',
'wpt-chrome-nightly-crashtest-1',
'wpt-chrome-stable-crashtest-1',
'wpt-webkitgtk_minibrowser-nightly-crashtest-1',
'wpt-servo-nightly-crashtest-1',
'wpt-firefox-stable-print-reftest-1',
'wpt-chrome-nightly-print-reftest-1',
'wpt-chrome-stable-print-reftest-1'])
])
def test_schedule_tasks(event_path, is_pr, files_changed, expected):
with mock.patch("tools.ci.tc.decision.get_fetch_rev", return_value=(None, None, None)):
with mock.patch("tools.wpt.testfiles.repo_files_changed",
return_value=files_changed):
with open(data_path(event_path), encoding="utf8") as event_file:
event = json.load(event_file)
scheduled = decision.decide(event)
assert list(scheduled.keys()) == expected
| mpl-2.0 | -2,762,706,756,789,860,400 | 39.574394 | 114 | 0.647109 | false |
nebgnahz/CS268NetworkMeasurement | king/outputdb.py | 1 | 4444 | import MySQLdb, MySQLdb.cursors
import sqlalchemy.ext.serializer
import cPickle as pickle
from datetime import timedelta
from utilities import distance
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser(description='Output db')
parser.add_argument('--csv', action='store_true', default=False, help='Output CSV file of data')
parser.add_argument('--count', action='store_true', default=False, help='Only Print Count and Exit')
arguments = parser.parse_args()
connection = MySQLdb.connect(host = "data.cnobwey0khau.us-west-2.rds.amazonaws.com",
user = 'ucb_268_measure',
passwd = 'ucb_268_measure',
db = 'mydb',
ssl = {},
cursorclass = MySQLdb.cursors.SSCursor)
if arguments.count:
cur = connection.cursor()
cur.execute("""SELECT
SUM(IF(success, 1, 0)),
SUM(IF(not success, 1, 0))
FROM data;""")
num_success, num_fail = cur.fetchone()
percent = float(num_success)/float(num_fail+num_success)
print 'Success %i, Fail %i, Success Rate %s' % (num_success, num_fail, "{0:.0f}%".format(percent * 100))
cur.close()
exit(0)
cur = connection.cursor()
cur.execute("SELECT * from data where success;")
if arguments.csv:
seen = defaultdict(int)
results = []
print 't1, t2, distance, latency, test_point'
for r in cur:
id, timestamp, name1, name2, target1, target2, start, end, pings, address, test_point, success = r
target1 = pickle.loads(target1)
target2 = pickle.loads(target2)
dist = distance(target1[2], target2[2])
start = pickle.loads(start)
end = pickle.loads(end)
pings = pickle.loads(pings)
# the minimum makes more sense than average actually
latency = (end - start - min(pings)).total_seconds()
address = pickle.loads(address)
# speed-of-light limit violation. check them manually
if latency < (dist/3.0/100000):
pass
# print "++++++++++++++++++++++++++"
# print 'Target1:', name1, target1
# print 'Target2:', name2, target2
# print 'Test Point:', test_point
# print 'Response Address', address
# print 'Start:', start
# print 'End:', end
# print 'Pings:', pings
# print dist/3.0/100000, latency
# print "++++++++++++++++++++++++++"
#if latency > 0 and dist > 0 and target1[1] == address[0] and latency > (dist/3.0/100000) \
# and len(filter(lambda x: x.total_seconds()/latency > 1.2, pings)) != 0 and latency < 5:
seen[(name1, name2)] += 1
results.append((name1, name2, dist, latency, test_point))
cur.close()
for r in results:
name1, name2, dist, latency, test_point = r
if (name1, name2) in seen and seen[(name1, name2)] >= 2\
and (name2, name1) in seen and seen[(name2, name1)] >= 2:
print name1, name2, dist, latency, test_point
else:
output_file = open("results.pickle", "wb")
results = []
for r in cur:
results.append(r)
#id, timestamp, name1, name2, target1, target2, start, end, pings, address, test_point, success = r
# print 'Date of Measurement', pickle.loads(timestamp)
# print 'Target1:', name1, pickle.loads(target1)
# print 'Target2:', name2, pickle.loads(target2)
# print 'Test Point:', test_point
# print 'Response Address', pickle.loads(address)
# print 'Start:', pickle.loads(start)
# print 'End:', pickle.loads(end)
# print 'Pings:', pickle.loads(pings)
# print '---------------------------------'
cur.close()
cur = connection.cursor()
cur.execute("""SELECT
SUM(IF(success, 1, 0)),
SUM(IF(not success, 1, 0))
FROM data;""")
num_success, num_fail = cur.fetchone()
percent = float(num_success)/float(num_fail+num_success)
print 'Success %i, Fail %i, Success Rate %s' % (num_success, num_fail, "{0:.0f}%".format(percent * 100))
cur.close()
pickle.dump(results, output_file)
| bsd-2-clause | -1,943,888,100,517,738,500 | 41.730769 | 109 | 0.54973 | false |
menglewis/guildwars2api | tests/test_urls.py | 1 | 5637 | # -*- coding: utf-8 -*-
import unittest
from guildwars2api.v2 import GuildWars2API
class URLBuildTestCase(unittest.TestCase):
def setUp(self):
self.api = GuildWars2API()
def test_all_items_url(self):
self.assertEqual(self.api.items.build_url(), 'https://api.guildwars2.com/v2/items')
def test_specific_item_url(self):
self.assertEqual(self.api.items.build_url(id=1051), 'https://api.guildwars2.com/v2/items?id=1051')
def test_multiple_items_url(self):
self.assertEqual(self.api.items.build_url(ids="1051,1052"), 'https://api.guildwars2.com/v2/items?ids=1051%2C1052')
def test_list_items_url(self):
self.assertEqual(self.api.items.build_url(ids=[1051, 1052]), 'https://api.guildwars2.com/v2/items?ids=1051%2C1052')
def test_items_url_with_bad_param(self):
self.assertEqual(self.api.items.build_url(testattr="food"), 'https://api.guildwars2.com/v2/items?testattr=food')
def test_transactions(self):
self.assertEqual(self.api.transactions.build_url(), 'https://api.guildwars2.com/v2/commerce/transactions')
def test_transactions_current(self):
self.assertEqual(self.api.transactions.build_url('current'), 'https://api.guildwars2.com/v2/commerce/transactions/current')
def test_transactions_history(self):
self.assertEqual(self.api.transactions.build_url('history'), 'https://api.guildwars2.com/v2/commerce/transactions/history')
def test_transactions_current_buys(self):
self.assertEqual(self.api.transactions.build_url('current', 'buys'), 'https://api.guildwars2.com/v2/commerce/transactions/current/buys')
def test_transactions_current_sells(self):
self.assertEqual(self.api.transactions.build_url('current', 'sells'), 'https://api.guildwars2.com/v2/commerce/transactions/current/sells')
def test_transactions_history_buys(self):
self.assertEqual(self.api.transactions.build_url('history', 'buys'), 'https://api.guildwars2.com/v2/commerce/transactions/history/buys')
def test_transactions_history_sells(self):
self.assertEqual(self.api.transactions.build_url('history', 'sells'), 'https://api.guildwars2.com/v2/commerce/transactions/history/sells')
def test_materials_url(self):
self.assertEqual(self.api.materials.build_url(), 'https://api.guildwars2.com/v2/materials')
def test_bank_url(self):
self.assertEqual(self.api.bank.build_url(), 'https://api.guildwars2.com/v2/account/bank')
def test_bank_materials_url(self):
self.assertEqual(self.api.bank_materials.build_url(), 'https://api.guildwars2.com/v2/account/materials')
def test_characters_url(self):
self.assertEqual(self.api.characters.build_url(), 'https://api.guildwars2.com/v2/characters')
def test_inventory_url(self):
self.assertEqual(self.api.inventory.build_url('Test Character'), 'https://api.guildwars2.com/v2/characters/Test Character/inventory')
def test_equipment_url(self):
self.assertEqual(self.api.equipment.build_url('Test Character'), 'https://api.guildwars2.com/v2/characters/Test Character/equipment')
def test_account_url(self):
self.assertEqual(self.api.account.build_url(), 'https://api.guildwars2.com/v2/account')
def test_tokeninfo_url(self):
self.assertEqual(self.api.token_info.build_url(), 'https://api.guildwars2.com/v2/tokeninfo')
def test_currency_url(self):
self.assertEqual(self.api.currencies.build_url(), 'https://api.guildwars2.com/v2/currencies')
def test_account_wallet_url(self):
self.assertEqual(self.api.account_wallet.build_url(), 'https://api.guildwars2.com/v2/account/wallet')
def test_account_dyes_url(self):
self.assertEqual(self.api.account_dyes.build_url(), 'https://api.guildwars2.com/v2/account/dyes')
def test_account_skins_url(self):
self.assertEqual(self.api.account_skins.build_url(), 'https://api.guildwars2.com/v2/account/skins')
def test_pvp_stats_url(self):
self.assertEqual(self.api.pvp_stats.build_url(), 'https://api.guildwars2.com/v2/pvp/stats')
def test_pvp_games_url(self):
self.assertEqual(self.api.pvp_games.build_url(), 'https://api.guildwars2.com/v2/pvp/games')
def test_specializations_url(self):
self.assertEqual(self.api.specializations.build_url('Test Character'), 'https://api.guildwars2.com/v2/characters/Test Character/specializations')
def test_wvw_objectives_url(self):
self.assertEqual(self.api.wvw_objectives.build_url(), 'https://api.guildwars2.com/v2/wvw/objectives')
def test_wvw_objectives_url_id(self):
self.assertEqual(self.api.wvw_objectives.build_url(id='968-92'), 'https://api.guildwars2.com/v2/wvw/objectives?id=968-92')
def test_minis_url(self):
self.assertEqual(self.api.minis.build_url(), 'https://api.guildwars2.com/v2/minis')
def test_minis_url_id(self):
self.assertEqual(self.api.minis.build_url(id=378), 'https://api.guildwars2.com/v2/minis?id=378')
def test_account_minis_url(self):
self.assertEqual(self.api.account_minis.build_url(), 'https://api.guildwars2.com/v2/account/minis')
def test_achievements_url(self):
self.assertEqual(self.api.achievements.build_url(), 'https://api.guildwars2.com/v2/achievements')
def test_achievements_url_id(self):
self.assertEqual(self.api.achievements.build_url(id=1965), 'https://api.guildwars2.com/v2/achievements?id=1965')
def test_account_achievements(self):
self.assertEqual(self.api.account_achievements.build_url(), 'https://api.guildwars2.com/v2/account/achievements')
| bsd-3-clause | -6,078,500,527,989,944,000 | 48.447368 | 153 | 0.710484 | false |
romeric/Fastor | benchmark/external/benchmark_inverse/benchmark_plot.py | 1 | 1615 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':14})
rc('text', usetex=True)
def read_results():
ms, ns, times_eigen, times_fastor = [], [], [], []
with open("benchmark_results.txt", "r") as f:
lines = f.readlines()
for line in lines:
sline = line.split(' ')
if len(sline) == 4:
times_eigen.append(float(sline[1]))
times_fastor.append(float(sline[2]))
elif len(sline) == 7 and "size" in sline[1]:
ms.append(int(sline[4]))
ns.append(int(sline[5]))
return np.array(ms), np.array(ns), np.array(times_eigen), np.array(times_fastor)
def main():
ms, ns, times_eigen, times_fastor = read_results()
fig, ax = plt.subplots()
index = np.arange(len(ms))
bar_width = 0.2
opacity = 0.8
rects1 = plt.bar(index, times_eigen/1e-6, bar_width,
alpha=opacity,
color='#C03B22',
label='Eigen')
rects3 = plt.bar(index + bar_width, times_fastor/1e-6, bar_width,
alpha=opacity,
color='#E98604',
label='Fastor')
xticks = [str(dim[0]) + 'x' + str(dim[1]) for dim in zip(ms,ns)]
plt.xlabel('(M,M)')
plt.ylabel('Time ($\mu$sec)')
plt.title("B = inv(A)")
plt.xticks(index, xticks, rotation=45)
plt.legend()
plt.tight_layout()
plt.grid(True)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
plt.show()
if __name__ == "__main__":
main() | mit | -533,837,418,136,971,400 | 26.389831 | 84 | 0.573994 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.